diff --git a/Dockerfile.multiplatform b/Dockerfile.multiplatform new file mode 100644 index 000000000..40550644b --- /dev/null +++ b/Dockerfile.multiplatform @@ -0,0 +1,76 @@ +FROM alpine:3.19 + +# Install runtime dependencies +RUN apk add --no-cache \ + curl \ + git \ + nodejs \ + npm \ + bash \ + ca-certificates \ + libc6-compat + +# Install Go 1.22.2 (required by go.mod) +ARG TARGETARCH +RUN if [ "$TARGETARCH" = "arm64" ]; then \ + GO_ARCH="arm64"; \ + else \ + GO_ARCH="amd64"; \ + fi && \ + curl -L "https://go.dev/dl/go1.22.2.linux-${GO_ARCH}.tar.gz" -o go.tar.gz && \ + tar -C /usr/local -xzf go.tar.gz && \ + rm go.tar.gz + +# Set Go environment variables +ENV GOPATH="/go" +ENV PATH="/usr/local/go/bin:${PATH}" +ENV PATH="${GOPATH}/bin:${PATH}" + +# Download Hugo binary directly (much more space efficient than compiling) +ARG TARGETARCH +RUN if [ "$TARGETARCH" = "arm64" ]; then \ + HUGO_ARCH="arm64"; \ + else \ + HUGO_ARCH="amd64"; \ + fi && \ + echo "Downloading Hugo for architecture: ${HUGO_ARCH}" && \ + curl -L "https://github.com/gohugoio/hugo/releases/download/v0.123.7/hugo_extended_0.123.7_linux-${HUGO_ARCH}.tar.gz" -o hugo.tar.gz && \ + echo "Extracting Hugo..." && \ + tar -xzf hugo.tar.gz && \ + echo "Contents after extraction:" && \ + ls -la && \ + echo "Hugo binary details:" && \ + ls -la hugo && \ + echo "Moving Hugo binary..." && \ + cp hugo /usr/local/bin/hugo && \ + chmod +x /usr/local/bin/hugo && \ + echo "Hugo binary location and permissions:" && \ + ls -la /usr/local/bin/hugo && \ + echo "Testing Hugo binary:" && \ + ldd /usr/local/bin/hugo && \ + /usr/local/bin/hugo version && \ + rm hugo.tar.gz hugo + +# Install global dependencies +RUN npm install -g postcss postcss-cli autoprefixer + +# Copy entrypoint script +COPY scripts/entrypoint.sh /usr/local/bin/ +RUN chmod +x /usr/local/bin/entrypoint.sh + +# Create working directory +WORKDIR /src + +# Configure Git to trust the working directory +RUN git config --global --add safe.directory /src + +# Verify installations +RUN node --version && \ + npm --version && \ + npx --version && \ + hugo version && \ + go version + +EXPOSE 1313 + +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] \ No newline at end of file diff --git a/Makefile b/Makefile index 577fb7a40..226040c47 100644 --- a/Makefile +++ b/Makefile @@ -1,20 +1,31 @@ # Hugo configuration OUTPUT_DIR := output -DOCKER_IMAGE := hvishwanath/hugo:v0.123.7-ext +HUGO_BASE_IMAGE := hvishwanath/hugo:v0.123.7-ext-multiplatform +DOCKER_IMAGE := $(HUGO_BASE_IMAGE) #PROD_IMAGE := hvishwanath/kafka-site-md:1.2.0 PROD_IMAGE := us-west1-docker.pkg.dev/play-394201/kafka-site-md/kafka-site-md:1.6.0 -.PHONY: build serve clean docker-image prod-image prod-run buildx-setup +.PHONY: build serve clean docker-image hugo-base-multi-platform prod-image prod-run buildx-setup ghcr-prod-image # Setup buildx for multi-arch builds buildx-setup: docker buildx create --name multiarch --driver docker-container --use || true docker buildx inspect multiarch --bootstrap -# Build the Docker image +# Build the Docker image (single platform) docker-image: docker build -t $(DOCKER_IMAGE) . --push +# Build and push multi-platform Hugo base image +hugo-base-multi-platform: buildx-setup + docker buildx build \ + --platform linux/amd64,linux/arm64 \ + --tag $(HUGO_BASE_IMAGE) \ + --file Dockerfile.multiplatform \ + --build-arg BUILDKIT_INLINE_CACHE=1 \ + --push \ + . + # Build the static site using Docker build: docker pull $(DOCKER_IMAGE) @@ -48,8 +59,19 @@ prod-run: prod-image docker pull $(PROD_IMAGE) docker run --rm -p 8080:80 $(PROD_IMAGE) +# Build and push production image to GHCR +ghcr-prod-image: build buildx-setup + docker buildx build \ + --platform linux/amd64,linux/arm64 \ + --tag ghcr.io/$(shell basename $(shell git rev-parse --show-toplevel))/kafka-site-md:prod-$(shell git rev-parse --abbrev-ref HEAD) \ + --tag ghcr.io/$(shell basename $(shell git rev-parse --show-toplevel))/kafka-site-md:prod-$(shell git rev-parse --short HEAD) \ + --tag ghcr.io/$(shell basename $(shell git rev-parse --show-toplevel))/kafka-site-md:prod-$(shell date +%Y%m%d-%H%M%S) \ + --file Dockerfile.prod \ + --push \ + . + # Clean the output directory and remove Docker images clean: rm -rf $(OUTPUT_DIR) - docker rmi $(DOCKER_IMAGE) $(PROD_IMAGE) + docker rmi $(DOCKER_IMAGE) $(HUGO_BASE_IMAGE) $(PROD_IMAGE) docker buildx rm multiarch || true diff --git a/README.md b/README.md index 1613c113f..f5d2c19f7 100644 --- a/README.md +++ b/README.md @@ -167,4 +167,4 @@ make clean 4. Test locally using `make serve` 5. Submit a pull request -For more details about the migration to Markdown and the overall architecture, see [KIP-1133](https://cwiki.apache.org/confluence/display/KAFKA/KIP-1133%3A+AK+Documentation+and+Website+in+Markdown). \ No newline at end of file +For more details about the migration to Markdown and the overall architecture, see [KIP-1133](https://cwiki.apache.org/confluence/display/KAFKA/KIP-1133%3A+AK+Documentation+and+Website+in+Markdown). diff --git a/content/en/0110/streams/core-concepts.md b/content/en/0110/streams/core-concepts.md index 2a30d9cf7..9774e4175 100644 --- a/content/en/0110/streams/core-concepts.md +++ b/content/en/0110/streams/core-concepts.md @@ -80,7 +80,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. To read more details on how this is done inside Kafka Streams, readers are recommended to read [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). In order to achieve exactly-once semantics when running Kafka Streams applications, users can simply set the `processing.guarantee` config value to **exactly_once** (default value is **at_least_once**). More details can be found in the [**Kafka Streams Configs**](/0110/documentation#streamsconfigs) section. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. To read more details on how this is done inside Kafka Streams, readers are recommended to read [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). In order to achieve exactly-once semantics when running Kafka Streams applications, users can simply set the `processing.guarantee` config value to **exactly_once** (default value is **at_least_once**). More details can be found in the [**Kafka Streams Configs**](/0110/documentation#streamsconfigs) section. [Previous](/0110/streams/developer-guide) [Next](/0110/streams/architecture) diff --git a/content/en/10/streams/core-concepts.md b/content/en/10/streams/core-concepts.md index b55f4c90c..5f9bb3ee6 100644 --- a/content/en/10/streams/core-concepts.md +++ b/content/en/10/streams/core-concepts.md @@ -82,7 +82,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. To read more details on how this is done inside Kafka Streams, readers are recommended to read [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). In order to achieve exactly-once semantics when running Kafka Streams applications, users can simply set the `processing.guarantee` config value to **exactly_once** (default value is **at_least_once**). More details can be found in the [**Kafka Streams Configs**](/10/documentation#streamsconfigs) section. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. To read more details on how this is done inside Kafka Streams, readers are recommended to read [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). In order to achieve exactly-once semantics when running Kafka Streams applications, users can simply set the `processing.guarantee` config value to **exactly_once** (default value is **at_least_once**). More details can be found in the [**Kafka Streams Configs**](/10/documentation#streamsconfigs) section. [Previous](/10/streams/tutorial) [Next](/10/streams/architecture) diff --git a/content/en/11/streams/core-concepts.md b/content/en/11/streams/core-concepts.md index 6037d9a66..884f54306 100644 --- a/content/en/11/streams/core-concepts.md +++ b/content/en/11/streams/core-concepts.md @@ -82,7 +82,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. To read more details on how this is done inside Kafka Streams, readers are recommended to read [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). In order to achieve exactly-once semantics when running Kafka Streams applications, users can simply set the `processing.guarantee` config value to **exactly_once** (default value is **at_least_once**). More details can be found in the [**Kafka Streams Configs**](/11/documentation#streamsconfigs) section. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. To read more details on how this is done inside Kafka Streams, readers are recommended to read [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). In order to achieve exactly-once semantics when running Kafka Streams applications, users can simply set the `processing.guarantee` config value to **exactly_once** (default value is **at_least_once**). More details can be found in the [**Kafka Streams Configs**](/11/documentation#streamsconfigs) section. [Previous](/11/streams/tutorial) [Next](/11/streams/architecture) diff --git a/content/en/20/streams/core-concepts.md b/content/en/20/streams/core-concepts.md index 7e6b505ab..db682d74b 100644 --- a/content/en/20/streams/core-concepts.md +++ b/content/en/20/streams/core-concepts.md @@ -106,7 +106,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. To read more details on how this is done inside Kafka Streams, readers are recommended to read [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). In order to achieve exactly-once semantics when running Kafka Streams applications, users can simply set the `processing.guarantee` config value to **exactly_once** (default value is **at_least_once**). More details can be found in the [**Kafka Streams Configs**](/20/documentation#streamsconfigs) section. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. To read more details on how this is done inside Kafka Streams, readers are recommended to read [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). In order to achieve exactly-once semantics when running Kafka Streams applications, users can simply set the `processing.guarantee` config value to **exactly_once** (default value is **at_least_once**). More details can be found in the [**Kafka Streams Configs**](/20/documentation#streamsconfigs) section. [Previous](/20/streams/tutorial) [Next](/20/streams/architecture) diff --git a/content/en/21/streams/core-concepts.md b/content/en/21/streams/core-concepts.md index 02d07f772..ff0fef824 100644 --- a/content/en/21/streams/core-concepts.md +++ b/content/en/21/streams/core-concepts.md @@ -106,7 +106,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. To read more details on how this is done inside Kafka Streams, readers are recommended to read [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). In order to achieve exactly-once semantics when running Kafka Streams applications, users can simply set the `processing.guarantee` config value to **exactly_once** (default value is **at_least_once**). More details can be found in the [**Kafka Streams Configs**](/21/documentation#streamsconfigs) section. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. To read more details on how this is done inside Kafka Streams, readers are recommended to read [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). In order to achieve exactly-once semantics when running Kafka Streams applications, users can simply set the `processing.guarantee` config value to **exactly_once** (default value is **at_least_once**). More details can be found in the [**Kafka Streams Configs**](/21/documentation#streamsconfigs) section. # Out-of-Order Handling diff --git a/content/en/22/streams/core-concepts.md b/content/en/22/streams/core-concepts.md index e1f2b384c..585b73ac0 100644 --- a/content/en/22/streams/core-concepts.md +++ b/content/en/22/streams/core-concepts.md @@ -106,7 +106,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. To read more details on how this is done inside Kafka Streams, readers are recommended to read [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). In order to achieve exactly-once semantics when running Kafka Streams applications, users can simply set the `processing.guarantee` config value to **exactly_once** (default value is **at_least_once**). More details can be found in the [**Kafka Streams Configs**](/22/documentation#streamsconfigs) section. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. To read more details on how this is done inside Kafka Streams, readers are recommended to read [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). In order to achieve exactly-once semantics when running Kafka Streams applications, users can simply set the `processing.guarantee` config value to **exactly_once** (default value is **at_least_once**). More details can be found in the [**Kafka Streams Configs**](/22/documentation#streamsconfigs) section. # Out-of-Order Handling diff --git a/content/en/23/streams/core-concepts.md b/content/en/23/streams/core-concepts.md index 9e7e368db..b3ce4b9df 100644 --- a/content/en/23/streams/core-concepts.md +++ b/content/en/23/streams/core-concepts.md @@ -106,7 +106,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. To read more details on how this is done inside Kafka Streams, readers are recommended to read [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). In order to achieve exactly-once semantics when running Kafka Streams applications, users can simply set the `processing.guarantee` config value to **exactly_once** (default value is **at_least_once**). More details can be found in the [**Kafka Streams Configs**](/23/documentation#streamsconfigs) section. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. To read more details on how this is done inside Kafka Streams, readers are recommended to read [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). In order to achieve exactly-once semantics when running Kafka Streams applications, users can simply set the `processing.guarantee` config value to **exactly_once** (default value is **at_least_once**). More details can be found in the [**Kafka Streams Configs**](/23/documentation#streamsconfigs) section. # Out-of-Order Handling diff --git a/content/en/24/streams/core-concepts.md b/content/en/24/streams/core-concepts.md index d80ff38cd..f3b856c76 100644 --- a/content/en/24/streams/core-concepts.md +++ b/content/en/24/streams/core-concepts.md @@ -106,7 +106,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. To read more details on how this is done inside Kafka Streams, readers are recommended to read [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). In order to achieve exactly-once semantics when running Kafka Streams applications, users can simply set the `processing.guarantee` config value to **exactly_once** (default value is **at_least_once**). More details can be found in the [**Kafka Streams Configs**](/24/documentation#streamsconfigs) section. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. To read more details on how this is done inside Kafka Streams, readers are recommended to read [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). In order to achieve exactly-once semantics when running Kafka Streams applications, users can simply set the `processing.guarantee` config value to **exactly_once** (default value is **at_least_once**). More details can be found in the [**Kafka Streams Configs**](/24/documentation#streamsconfigs) section. # Out-of-Order Handling diff --git a/content/en/25/streams/core-concepts.md b/content/en/25/streams/core-concepts.md index 51375444e..b313344de 100644 --- a/content/en/25/streams/core-concepts.md +++ b/content/en/25/streams/core-concepts.md @@ -106,7 +106,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. To read more details on how this is done inside Kafka Streams, readers are recommended to read [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). In order to achieve exactly-once semantics when running Kafka Streams applications, users can simply set the `processing.guarantee` config value to **exactly_once** (default value is **at_least_once**). More details can be found in the [**Kafka Streams Configs**](/25/documentation#streamsconfigs) section. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. To read more details on how this is done inside Kafka Streams, readers are recommended to read [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). In order to achieve exactly-once semantics when running Kafka Streams applications, users can simply set the `processing.guarantee` config value to **exactly_once** (default value is **at_least_once**). More details can be found in the [**Kafka Streams Configs**](/25/documentation#streamsconfigs) section. # Out-of-Order Handling diff --git a/content/en/26/streams/core-concepts.md b/content/en/26/streams/core-concepts.md index a2a372170..7c1a7da7a 100644 --- a/content/en/26/streams/core-concepts.md +++ b/content/en/26/streams/core-concepts.md @@ -115,7 +115,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](https://kafka.apache.org/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. For more information on how this is done inside Kafka Streams, see [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). As of the 2.6.0 release, Kafka Streams supports an improved implementation of exactly-once processing, named "exactly-once beta", which requires broker version 2.5.0 or newer. This implementation is more efficient, because it reduces client and broker resource utilization, like client threads and used network connections, and it enables higher throughput and improved scalability. For more information on how this is done inside the brokers and Kafka Streams, see [KIP-447](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics). To enable exactly-once semantics when running Kafka Streams applications, set the `processing.guarantee` config value (default value is **at_least_once**) to **exactly_once** for EOS version 1 (requires brokers version 0.11.0 or newer) or **exactly_once_beta** for EOS version 2 (requires brokers version 2.5 or newer). For more information, see the [Kafka Streams Configs](/26/streams/developer-guide/config-streams.html) section. diff --git a/content/en/27/streams/core-concepts.md b/content/en/27/streams/core-concepts.md index b777a5df3..505df170e 100644 --- a/content/en/27/streams/core-concepts.md +++ b/content/en/27/streams/core-concepts.md @@ -134,7 +134,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](https://kafka.apache.org/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. For more information on how this is done inside Kafka Streams, see [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). As of the 2.6.0 release, Kafka Streams supports an improved implementation of exactly-once processing, named "exactly-once beta", which requires broker version 2.5.0 or newer. This implementation is more efficient, because it reduces client and broker resource utilization, like client threads and used network connections, and it enables higher throughput and improved scalability. For more information on how this is done inside the brokers and Kafka Streams, see [KIP-447](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics). To enable exactly-once semantics when running Kafka Streams applications, set the `processing.guarantee` config value (default value is **at_least_once**) to **exactly_once** (requires brokers version 0.11.0 or newer) or **exactly_once_beta** (requires brokers version 2.5 or newer). For more information, see the [Kafka Streams Configs](/27/streams/developer-guide/config-streams.html) section. diff --git a/content/en/28/streams/core-concepts.md b/content/en/28/streams/core-concepts.md index 42f6dab3f..c0f8cfc47 100644 --- a/content/en/28/streams/core-concepts.md +++ b/content/en/28/streams/core-concepts.md @@ -134,7 +134,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](https://kafka.apache.org/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. For more information on how this is done inside Kafka Streams, see [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). As of the 2.6.0 release, Kafka Streams supports an improved implementation of exactly-once processing, named "exactly-once beta", which requires broker version 2.5.0 or newer. This implementation is more efficient, because it reduces client and broker resource utilization, like client threads and used network connections, and it enables higher throughput and improved scalability. For more information on how this is done inside the brokers and Kafka Streams, see [KIP-447](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics). To enable exactly-once semantics when running Kafka Streams applications, set the `processing.guarantee` config value (default value is **at_least_once**) to **exactly_once** for EOS version 1 (requires brokers version 0.11.0 or newer) or **exactly_once_beta** for EOS version 2 (requires brokers version 2.5 or newer). For more information, see the [Kafka Streams Configs](/28/streams/developer-guide/config-streams.html) section. diff --git a/content/en/30/streams/core-concepts.md b/content/en/30/streams/core-concepts.md index 4f1f52885..fcf8cf55b 100644 --- a/content/en/30/streams/core-concepts.md +++ b/content/en/30/streams/core-concepts.md @@ -134,7 +134,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](https://kafka.apache.org/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. For more information on how this is done inside Kafka Streams, see [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). As of the 2.6.0 release, Kafka Streams supports an improved implementation of exactly-once processing, named "exactly-once v2", which requires broker version 2.5.0 or newer. This implementation is more efficient, because it reduces client and broker resource utilization, like client threads and used network connections, and it enables higher throughput and improved scalability. As of the 3.0.0 release, the first version of exactly-once has been deprecated. Users are encouraged to use exactly-once v2 for exactly-once processing from now on, and prepare by upgrading their brokers if necessary. For more information on how this is done inside the brokers and Kafka Streams, see [KIP-447](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics). To enable exactly-once semantics when running Kafka Streams applications, set the `processing.guarantee` config value (default value is **at_least_once**) to **StreamsConfig.EXACTLY_ONCE_V2** (requires brokers version 2.5 or newer). For more information, see the [Kafka Streams Configs](/30/streams/developer-guide/config-streams.html) section. diff --git a/content/en/30/streams/developer-guide/datatypes.md b/content/en/30/streams/developer-guide/datatypes.md index 737227480..6916481a8 100644 --- a/content/en/30/streams/developer-guide/datatypes.md +++ b/content/en/30/streams/developer-guide/datatypes.md @@ -85,7 +85,7 @@ Apache Kafka includes several built-in serde implementations for Java primitives org.apache.kafka kafka-clients - 2.8.0 + 3.0.0 This artifact provides the following serde implementations under the package [org.apache.kafka.common.serialization](https://github.com/apache/kafka/blob/3.0/clients/src/main/java/org/apache/kafka/common/serialization), which you can leverage when e.g., defining default serializers in your Streams configuration. diff --git a/content/en/31/streams/core-concepts.md b/content/en/31/streams/core-concepts.md index 56eecad61..3bf5827aa 100644 --- a/content/en/31/streams/core-concepts.md +++ b/content/en/31/streams/core-concepts.md @@ -134,7 +134,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](https://kafka.apache.org/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. For more information on how this is done inside Kafka Streams, see [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). As of the 2.6.0 release, Kafka Streams supports an improved implementation of exactly-once processing, named "exactly-once v2", which requires broker version 2.5.0 or newer. This implementation is more efficient, because it reduces client and broker resource utilization, like client threads and used network connections, and it enables higher throughput and improved scalability. As of the 3.0.0 release, the first version of exactly-once has been deprecated. Users are encouraged to use exactly-once v2 for exactly-once processing from now on, and prepare by upgrading their brokers if necessary. For more information on how this is done inside the brokers and Kafka Streams, see [KIP-447](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics). To enable exactly-once semantics when running Kafka Streams applications, set the `processing.guarantee` config value (default value is **at_least_once**) to **StreamsConfig.EXACTLY_ONCE_V2** (requires brokers version 2.5 or newer). For more information, see the [Kafka Streams Configs](/31/streams/developer-guide/config-streams.html) section. diff --git a/content/en/31/streams/developer-guide/datatypes.md b/content/en/31/streams/developer-guide/datatypes.md index be584f318..ae64d7310 100644 --- a/content/en/31/streams/developer-guide/datatypes.md +++ b/content/en/31/streams/developer-guide/datatypes.md @@ -85,7 +85,7 @@ Apache Kafka includes several built-in serde implementations for Java primitives org.apache.kafka kafka-clients - 2.8.0 + 3.1.1-SNAPSHOT This artifact provides the following serde implementations under the package [org.apache.kafka.common.serialization](https://github.com/apache/kafka/blob/3.1/clients/src/main/java/org/apache/kafka/common/serialization), which you can leverage when e.g., defining default serializers in your Streams configuration. diff --git a/content/en/32/streams/core-concepts.md b/content/en/32/streams/core-concepts.md index cd05bb11d..edc59c86b 100644 --- a/content/en/32/streams/core-concepts.md +++ b/content/en/32/streams/core-concepts.md @@ -134,7 +134,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](https://kafka.apache.org/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. For more information on how this is done inside Kafka Streams, see [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). As of the 2.6.0 release, Kafka Streams supports an improved implementation of exactly-once processing, named "exactly-once v2", which requires broker version 2.5.0 or newer. This implementation is more efficient, because it reduces client and broker resource utilization, like client threads and used network connections, and it enables higher throughput and improved scalability. As of the 3.0.0 release, the first version of exactly-once has been deprecated. Users are encouraged to use exactly-once v2 for exactly-once processing from now on, and prepare by upgrading their brokers if necessary. For more information on how this is done inside the brokers and Kafka Streams, see [KIP-447](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics). To enable exactly-once semantics when running Kafka Streams applications, set the `processing.guarantee` config value (default value is **at_least_once**) to **StreamsConfig.EXACTLY_ONCE_V2** (requires brokers version 2.5 or newer). For more information, see the [Kafka Streams Configs](/32/streams/developer-guide/config-streams.html) section. diff --git a/content/en/32/streams/developer-guide/datatypes.md b/content/en/32/streams/developer-guide/datatypes.md index 8d90b8ef6..baea46a18 100644 --- a/content/en/32/streams/developer-guide/datatypes.md +++ b/content/en/32/streams/developer-guide/datatypes.md @@ -85,7 +85,7 @@ Apache Kafka includes several built-in serde implementations for Java primitives org.apache.kafka kafka-clients - 2.8.0 + 3.2.1 This artifact provides the following serde implementations under the package [org.apache.kafka.common.serialization](https://github.com/apache/kafka/blob/3.2/clients/src/main/java/org/apache/kafka/common/serialization), which you can leverage when e.g., defining default serializers in your Streams configuration. diff --git a/content/en/33/streams/core-concepts.md b/content/en/33/streams/core-concepts.md index 020e51a5b..ccffc224d 100644 --- a/content/en/33/streams/core-concepts.md +++ b/content/en/33/streams/core-concepts.md @@ -134,7 +134,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](https://kafka.apache.org/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. For more information on how this is done inside Kafka Streams, see [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). As of the 2.6.0 release, Kafka Streams supports an improved implementation of exactly-once processing, named "exactly-once v2", which requires broker version 2.5.0 or newer. This implementation is more efficient, because it reduces client and broker resource utilization, like client threads and used network connections, and it enables higher throughput and improved scalability. As of the 3.0.0 release, the first version of exactly-once has been deprecated. Users are encouraged to use exactly-once v2 for exactly-once processing from now on, and prepare by upgrading their brokers if necessary. For more information on how this is done inside the brokers and Kafka Streams, see [KIP-447](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics). To enable exactly-once semantics when running Kafka Streams applications, set the `processing.guarantee` config value (default value is **at_least_once**) to **StreamsConfig.EXACTLY_ONCE_V2** (requires brokers version 2.5 or newer). For more information, see the [Kafka Streams Configs](/33/streams/developer-guide/config-streams.html) section. diff --git a/content/en/33/streams/developer-guide/datatypes.md b/content/en/33/streams/developer-guide/datatypes.md index f1eea51d1..37e3fb622 100644 --- a/content/en/33/streams/developer-guide/datatypes.md +++ b/content/en/33/streams/developer-guide/datatypes.md @@ -85,7 +85,7 @@ Apache Kafka includes several built-in serde implementations for Java primitives org.apache.kafka kafka-clients - 2.8.0 + 3.3.1 This artifact provides the following serde implementations under the package [org.apache.kafka.common.serialization](https://github.com/apache/kafka/blob/3.3/clients/src/main/java/org/apache/kafka/common/serialization), which you can leverage when e.g., defining default serializers in your Streams configuration. diff --git a/content/en/33/streams/developer-guide/dsl-api.md b/content/en/33/streams/developer-guide/dsl-api.md index 7adc749d2..d595d2e52 100644 --- a/content/en/33/streams/developer-guide/dsl-api.md +++ b/content/en/33/streams/developer-guide/dsl-api.md @@ -2007,6 +2007,8 @@ Transformation | Description | Applies a `ValueTransformer` to each record, while retaining the key of the original record. `transformValues()` allows you to leverage the [Processor API](processor-api.html#streams-developer-guide-processor-api) from the DSL. ([details](/33/javadoc/org/apache/kafka/streams/kstream/KStream.html#transformValues-org.apache.kafka.streams.kstream.ValueTransformerSupplier-java.lang.String...-)) Each input record is transformed into exactly one output record (zero output records or multiple output records are not possible). The `ValueTransformer` may return `null` as the new value for a record. `transformValues` is preferable to `transform` because it will not cause data re-partitioning. `transformValues` is essentially equivalent to adding the `ValueTransformer` via `Topology#addProcessor()` to your [processor topology](../core-concepts.html#streams_topology). An example is available in the [javadocs](/33/javadoc/org/apache/kafka/streams/kstream/KStream.html#transformValues-org.apache.kafka.streams.kstream.ValueTransformerSupplier-java.lang.String...-). +**CAUTION:** If you are using "merge repartition topics" optimization, it is not recommended to use `KStream#processValues` to avoid compatibility issues for future upgrades to newer versions of Kafka Streams. For more details, see the [migration guide](/40/streams/developer-guide/dsl-api.htm#transformers-removal-and-migration-to-processors) in the Kafka Streams 4.0 docs. + The following example shows how to leverage, via the `KStream#process()` method, a custom `Processor` that sends an email notification whenever a page view count reaches a predefined threshold. First, we need to implement a custom stream processor, `PopularPageEmailAlert`, that implements the `Processor` interface: diff --git a/content/en/34/streams/core-concepts.md b/content/en/34/streams/core-concepts.md index f59c9a6d6..fb84b3467 100644 --- a/content/en/34/streams/core-concepts.md +++ b/content/en/34/streams/core-concepts.md @@ -134,7 +134,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](https://kafka.apache.org/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. For more information on how this is done inside Kafka Streams, see [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). As of the 2.6.0 release, Kafka Streams supports an improved implementation of exactly-once processing, named "exactly-once v2", which requires broker version 2.5.0 or newer. This implementation is more efficient, because it reduces client and broker resource utilization, like client threads and used network connections, and it enables higher throughput and improved scalability. As of the 3.0.0 release, the first version of exactly-once has been deprecated. Users are encouraged to use exactly-once v2 for exactly-once processing from now on, and prepare by upgrading their brokers if necessary. For more information on how this is done inside the brokers and Kafka Streams, see [KIP-447](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics). To enable exactly-once semantics when running Kafka Streams applications, set the `processing.guarantee` config value (default value is **at_least_once**) to **StreamsConfig.EXACTLY_ONCE_V2** (requires brokers version 2.5 or newer). For more information, see the [Kafka Streams Configs](/34/streams/developer-guide/config-streams.html) section. diff --git a/content/en/34/streams/developer-guide/datatypes.md b/content/en/34/streams/developer-guide/datatypes.md index e550b6c32..a52d4636a 100644 --- a/content/en/34/streams/developer-guide/datatypes.md +++ b/content/en/34/streams/developer-guide/datatypes.md @@ -85,7 +85,7 @@ Apache Kafka includes several built-in serde implementations for Java primitives org.apache.kafka kafka-clients - 2.8.0 + 3.4.0 This artifact provides the following serde implementations under the package [org.apache.kafka.common.serialization](https://github.com/apache/kafka/blob/3.4/clients/src/main/java/org/apache/kafka/common/serialization), which you can leverage when e.g., defining default serializers in your Streams configuration. diff --git a/content/en/34/streams/developer-guide/dsl-api.md b/content/en/34/streams/developer-guide/dsl-api.md index 00447e7dc..885a1095b 100644 --- a/content/en/34/streams/developer-guide/dsl-api.md +++ b/content/en/34/streams/developer-guide/dsl-api.md @@ -2007,6 +2007,8 @@ Transformation | Description | Applies a `ValueTransformer` to each record, while retaining the key of the original record. `transformValues()` allows you to leverage the [Processor API](processor-api.html#streams-developer-guide-processor-api) from the DSL. ([details](/34/javadoc/org/apache/kafka/streams/kstream/KStream.html#transformValues-org.apache.kafka.streams.kstream.ValueTransformerSupplier-java.lang.String...-)) Each input record is transformed into exactly one output record (zero output records or multiple output records are not possible). The `ValueTransformer` may return `null` as the new value for a record. `transformValues` is preferable to `transform` because it will not cause data re-partitioning. `transformValues` is essentially equivalent to adding the `ValueTransformer` via `Topology#addProcessor()` to your [processor topology](../core-concepts.html#streams_topology). An example is available in the [javadocs](/34/javadoc/org/apache/kafka/streams/kstream/KStream.html#transformValues-org.apache.kafka.streams.kstream.ValueTransformerSupplier-java.lang.String...-). +**CAUTION:** If you are using "merge repartition topics" optimization, it is not recommended to use `KStream#processValues` to avoid compatibility issues for future upgrades to newer versions of Kafka Streams. For more details, see the [migration guide](/40/streams/developer-guide/dsl-api.htm#transformers-removal-and-migration-to-processors) in the Kafka Streams 4.0 docs. + The following example shows how to leverage, via the `KStream#process()` method, a custom `Processor` that sends an email notification whenever a page view count reaches a predefined threshold. First, we need to implement a custom stream processor, `PopularPageEmailAlert`, that implements the `Processor` interface: diff --git a/content/en/35/streams/core-concepts.md b/content/en/35/streams/core-concepts.md index 75b6b79c2..6e884366c 100644 --- a/content/en/35/streams/core-concepts.md +++ b/content/en/35/streams/core-concepts.md @@ -134,7 +134,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](https://kafka.apache.org/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. For more information on how this is done inside Kafka Streams, see [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). As of the 2.6.0 release, Kafka Streams supports an improved implementation of exactly-once processing, named "exactly-once v2", which requires broker version 2.5.0 or newer. This implementation is more efficient, because it reduces client and broker resource utilization, like client threads and used network connections, and it enables higher throughput and improved scalability. As of the 3.0.0 release, the first version of exactly-once has been deprecated. Users are encouraged to use exactly-once v2 for exactly-once processing from now on, and prepare by upgrading their brokers if necessary. For more information on how this is done inside the brokers and Kafka Streams, see [KIP-447](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics). To enable exactly-once semantics when running Kafka Streams applications, set the `processing.guarantee` config value (default value is **at_least_once**) to **StreamsConfig.EXACTLY_ONCE_V2** (requires brokers version 2.5 or newer). For more information, see the [Kafka Streams Configs](/35/streams/developer-guide/config-streams.html) section. diff --git a/content/en/35/streams/developer-guide/datatypes.md b/content/en/35/streams/developer-guide/datatypes.md index 9d7db62a9..2a426d884 100644 --- a/content/en/35/streams/developer-guide/datatypes.md +++ b/content/en/35/streams/developer-guide/datatypes.md @@ -85,7 +85,7 @@ Apache Kafka includes several built-in serde implementations for Java primitives org.apache.kafka kafka-clients - 2.8.0 + 3.5.2 This artifact provides the following serde implementations under the package [org.apache.kafka.common.serialization](https://github.com/apache/kafka/blob/3.5/clients/src/main/java/org/apache/kafka/common/serialization), which you can leverage when e.g., defining default serializers in your Streams configuration. diff --git a/content/en/35/streams/developer-guide/dsl-api.md b/content/en/35/streams/developer-guide/dsl-api.md index b176b2bd5..29c5a9a7a 100644 --- a/content/en/35/streams/developer-guide/dsl-api.md +++ b/content/en/35/streams/developer-guide/dsl-api.md @@ -2015,6 +2015,8 @@ Transformation | Description | Applies a `ValueTransformer` to each record, while retaining the key of the original record. `transformValues()` allows you to leverage the [Processor API](processor-api.html#streams-developer-guide-processor-api) from the DSL. ([details](/35/javadoc/org/apache/kafka/streams/kstream/KStream.html#transformValues-org.apache.kafka.streams.kstream.ValueTransformerSupplier-java.lang.String...-)) Each input record is transformed into exactly one output record (zero output records or multiple output records are not possible). The `ValueTransformer` may return `null` as the new value for a record. `transformValues` is preferable to `transform` because it will not cause data re-partitioning. `transformValues` is essentially equivalent to adding the `ValueTransformer` via `Topology#addProcessor()` to your [processor topology](../core-concepts.html#streams_topology). An example is available in the [javadocs](/35/javadoc/org/apache/kafka/streams/kstream/KStream.html#transformValues-org.apache.kafka.streams.kstream.ValueTransformerSupplier-java.lang.String...-). +**CAUTION:** If you are using "merge repartition topics" optimization, it is not recommended to use `KStream#processValues` to avoid compatibility issues for future upgrades to newer versions of Kafka Streams. For more details, see the [migration guide](/40/streams/developer-guide/dsl-api.htm#transformers-removal-and-migration-to-processors) in the Kafka Streams 4.0 docs. + The following example shows how to leverage, via the `KStream#process()` method, a custom `Processor` that sends an email notification whenever a page view count reaches a predefined threshold. First, we need to implement a custom stream processor, `PopularPageEmailAlert`, that implements the `Processor` interface: diff --git a/content/en/36/streams/core-concepts.md b/content/en/36/streams/core-concepts.md index c525c1f14..7536f3491 100644 --- a/content/en/36/streams/core-concepts.md +++ b/content/en/36/streams/core-concepts.md @@ -134,7 +134,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](https://kafka.apache.org/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. For more information on how this is done inside Kafka Streams, see [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). As of the 2.6.0 release, Kafka Streams supports an improved implementation of exactly-once processing, named "exactly-once v2", which requires broker version 2.5.0 or newer. This implementation is more efficient, because it reduces client and broker resource utilization, like client threads and used network connections, and it enables higher throughput and improved scalability. As of the 3.0.0 release, the first version of exactly-once has been deprecated. Users are encouraged to use exactly-once v2 for exactly-once processing from now on, and prepare by upgrading their brokers if necessary. For more information on how this is done inside the brokers and Kafka Streams, see [KIP-447](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics). To enable exactly-once semantics when running Kafka Streams applications, set the `processing.guarantee` config value (default value is **at_least_once**) to **StreamsConfig.EXACTLY_ONCE_V2** (requires brokers version 2.5 or newer). For more information, see the [Kafka Streams Configs](/36/streams/developer-guide/config-streams.html) section. diff --git a/content/en/36/streams/developer-guide/datatypes.md b/content/en/36/streams/developer-guide/datatypes.md index dea361ea1..407a12c19 100644 --- a/content/en/36/streams/developer-guide/datatypes.md +++ b/content/en/36/streams/developer-guide/datatypes.md @@ -85,7 +85,7 @@ Apache Kafka includes several built-in serde implementations for Java primitives org.apache.kafka kafka-clients - 2.8.0 + 3.6.2 This artifact provides the following serde implementations under the package [org.apache.kafka.common.serialization](https://github.com/apache/kafka/blob/3.6/clients/src/main/java/org/apache/kafka/common/serialization), which you can leverage when e.g., defining default serializers in your Streams configuration. diff --git a/content/en/36/streams/developer-guide/dsl-api.md b/content/en/36/streams/developer-guide/dsl-api.md index 3bc97e32c..ab041bded 100644 --- a/content/en/36/streams/developer-guide/dsl-api.md +++ b/content/en/36/streams/developer-guide/dsl-api.md @@ -2021,6 +2021,8 @@ Transformation | Description | Applies a `ValueTransformer` to each record, while retaining the key of the original record. `transformValues()` allows you to leverage the [Processor API](processor-api.html#streams-developer-guide-processor-api) from the DSL. ([details](/36/javadoc/org/apache/kafka/streams/kstream/KStream.html#transformValues-org.apache.kafka.streams.kstream.ValueTransformerSupplier-java.lang.String...-)) Each input record is transformed into exactly one output record (zero output records or multiple output records are not possible). The `ValueTransformer` may return `null` as the new value for a record. `transformValues` is preferable to `transform` because it will not cause data re-partitioning. `transformValues` is essentially equivalent to adding the `ValueTransformer` via `Topology#addProcessor()` to your [processor topology](../core-concepts.html#streams_topology). An example is available in the [javadocs](/36/javadoc/org/apache/kafka/streams/kstream/KStream.html#transformValues-org.apache.kafka.streams.kstream.ValueTransformerSupplier-java.lang.String...-). +**CAUTION:** If you are using "merge repartition topics" optimization, it is not recommended to use `KStream#processValues` to avoid compatibility issues for future upgrades to newer versions of Kafka Streams. For more details, see the [migration guide](/40/streams/developer-guide/dsl-api.htm#transformers-removal-and-migration-to-processors) in the Kafka Streams 4.0 docs. + The following example shows how to leverage, via the `KStream#process()` method, a custom `Processor` that sends an email notification whenever a page view count reaches a predefined threshold. First, we need to implement a custom stream processor, `PopularPageEmailAlert`, that implements the `Processor` interface: diff --git a/content/en/37/streams/core-concepts.md b/content/en/37/streams/core-concepts.md index c731841d7..82527711d 100644 --- a/content/en/37/streams/core-concepts.md +++ b/content/en/37/streams/core-concepts.md @@ -134,7 +134,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](https://kafka.apache.org/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. For more information on how this is done inside Kafka Streams, see [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). As of the 2.6.0 release, Kafka Streams supports an improved implementation of exactly-once processing, named "exactly-once v2", which requires broker version 2.5.0 or newer. This implementation is more efficient, because it reduces client and broker resource utilization, like client threads and used network connections, and it enables higher throughput and improved scalability. As of the 3.0.0 release, the first version of exactly-once has been deprecated. Users are encouraged to use exactly-once v2 for exactly-once processing from now on, and prepare by upgrading their brokers if necessary. For more information on how this is done inside the brokers and Kafka Streams, see [KIP-447](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics). To enable exactly-once semantics when running Kafka Streams applications, set the `processing.guarantee` config value (default value is **at_least_once**) to **StreamsConfig.EXACTLY_ONCE_V2** (requires brokers version 2.5 or newer). For more information, see the [Kafka Streams Configs](/37/streams/developer-guide/config-streams.html) section. diff --git a/content/en/37/streams/developer-guide/datatypes.md b/content/en/37/streams/developer-guide/datatypes.md index 715dbbea2..d32c1c9c6 100644 --- a/content/en/37/streams/developer-guide/datatypes.md +++ b/content/en/37/streams/developer-guide/datatypes.md @@ -85,7 +85,7 @@ Apache Kafka includes several built-in serde implementations for Java primitives org.apache.kafka kafka-clients - 2.8.0 + 3.7.2 This artifact provides the following serde implementations under the package [org.apache.kafka.common.serialization](https://github.com/apache/kafka/blob/3.7/clients/src/main/java/org/apache/kafka/common/serialization), which you can leverage when e.g., defining default serializers in your Streams configuration. diff --git a/content/en/37/streams/developer-guide/dsl-api.md b/content/en/37/streams/developer-guide/dsl-api.md index c9e7fb00d..d8ae25cb8 100644 --- a/content/en/37/streams/developer-guide/dsl-api.md +++ b/content/en/37/streams/developer-guide/dsl-api.md @@ -2020,6 +2020,8 @@ Transformation | Description | Applies a `ValueTransformer` to each record, while retaining the key of the original record. `transformValues()` allows you to leverage the [Processor API](processor-api.html#streams-developer-guide-processor-api) from the DSL. ([details](/37/javadoc/org/apache/kafka/streams/kstream/KStream.html#transformValues-org.apache.kafka.streams.kstream.ValueTransformerSupplier-java.lang.String...-)) Each input record is transformed into exactly one output record (zero output records or multiple output records are not possible). The `ValueTransformer` may return `null` as the new value for a record. `transformValues` is preferable to `transform` because it will not cause data re-partitioning. `transformValues` is essentially equivalent to adding the `ValueTransformer` via `Topology#addProcessor()` to your [processor topology](../core-concepts.html#streams_topology). An example is available in the [javadocs](/37/javadoc/org/apache/kafka/streams/kstream/KStream.html#transformValues-org.apache.kafka.streams.kstream.ValueTransformerSupplier-java.lang.String...-). +**CAUTION:** If you are using "merge repartition topics" optimization, it is not recommended to use `KStream#processValues` to avoid compatibility issues for future upgrades to newer versions of Kafka Streams. For more details, see the [migration guide](/40/streams/developer-guide/dsl-api.htm#transformers-removal-and-migration-to-processors) in the Kafka Streams 4.0 docs. + The following example shows how to leverage, via the `KStream#process()` method, a custom `Processor` that sends an email notification whenever a page view count reaches a predefined threshold. First, we need to implement a custom stream processor, `PopularPageEmailAlert`, that implements the `Processor` interface: diff --git a/content/en/38/streams/core-concepts.md b/content/en/38/streams/core-concepts.md index c4eb3213e..ac03c1a14 100644 --- a/content/en/38/streams/core-concepts.md +++ b/content/en/38/streams/core-concepts.md @@ -134,7 +134,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](https://kafka.apache.org/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. For more information on how this is done inside Kafka Streams, see [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). As of the 2.6.0 release, Kafka Streams supports an improved implementation of exactly-once processing, named "exactly-once v2", which requires broker version 2.5.0 or newer. This implementation is more efficient, because it reduces client and broker resource utilization, like client threads and used network connections, and it enables higher throughput and improved scalability. As of the 3.0.0 release, the first version of exactly-once has been deprecated. Users are encouraged to use exactly-once v2 for exactly-once processing from now on, and prepare by upgrading their brokers if necessary. For more information on how this is done inside the brokers and Kafka Streams, see [KIP-447](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics). To enable exactly-once semantics when running Kafka Streams applications, set the `processing.guarantee` config value (default value is **at_least_once**) to **StreamsConfig.EXACTLY_ONCE_V2** (requires brokers version 2.5 or newer). For more information, see the [Kafka Streams Configs](/38/streams/developer-guide/config-streams.html) section. diff --git a/content/en/38/streams/developer-guide/datatypes.md b/content/en/38/streams/developer-guide/datatypes.md index 90b927c58..784ee5000 100644 --- a/content/en/38/streams/developer-guide/datatypes.md +++ b/content/en/38/streams/developer-guide/datatypes.md @@ -85,7 +85,7 @@ Apache Kafka includes several built-in serde implementations for Java primitives org.apache.kafka kafka-clients - 2.8.0 + 3.8.1 This artifact provides the following serde implementations under the package [org.apache.kafka.common.serialization](https://github.com/apache/kafka/blob/3.8/clients/src/main/java/org/apache/kafka/common/serialization), which you can leverage when e.g., defining default serializers in your Streams configuration. diff --git a/content/en/38/streams/developer-guide/dsl-api.md b/content/en/38/streams/developer-guide/dsl-api.md index bd7b508e2..f192d9bee 100644 --- a/content/en/38/streams/developer-guide/dsl-api.md +++ b/content/en/38/streams/developer-guide/dsl-api.md @@ -1660,6 +1660,8 @@ Transformation | Description | Applies a `ValueTransformer` to each record, while retaining the key of the original record. `transformValues()` allows you to leverage the [Processor API](processor-api.html#streams-developer-guide-processor-api) from the DSL. ([details](/38/javadoc/org/apache/kafka/streams/kstream/KStream.html#transformValues-org.apache.kafka.streams.kstream.ValueTransformerSupplier-java.lang.String...-)) Each input record is transformed into exactly one output record (zero output records or multiple output records are not possible). The `ValueTransformer` may return `null` as the new value for a record. `transformValues` is preferable to `transform` because it will not cause data re-partitioning. `transformValues` is essentially equivalent to adding the `ValueTransformer` via `Topology#addProcessor()` to your [processor topology](../core-concepts.html#streams_topology). An example is available in the [javadocs](/38/javadoc/org/apache/kafka/streams/kstream/KStream.html#transformValues-org.apache.kafka.streams.kstream.ValueTransformerSupplier-java.lang.String...-). +**CAUTION:** If you are using "merge repartition topics" optimization, it is not recommended to use `KStream#processValues` to avoid compatibility issues for future upgrades to newer versions of Kafka Streams. For more details, see the [migration guide](/40/streams/developer-guide/dsl-api.htm#transformers-removal-and-migration-to-processors) in the Kafka Streams 4.0 docs. + The following example shows how to leverage, via the `KStream#process()` method, a custom `Processor` that sends an email notification whenever a page view count reaches a predefined threshold. First, we need to implement a custom stream processor, `PopularPageEmailAlert`, that implements the `Processor` interface: diff --git a/content/en/39/streams/core-concepts.md b/content/en/39/streams/core-concepts.md index bebc022bb..956e67794 100644 --- a/content/en/39/streams/core-concepts.md +++ b/content/en/39/streams/core-concepts.md @@ -134,7 +134,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](https://kafka.apache.org/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. For more information on how this is done inside Kafka Streams, see [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). As of the 2.6.0 release, Kafka Streams supports an improved implementation of exactly-once processing, named "exactly-once v2", which requires broker version 2.5.0 or newer. This implementation is more efficient, because it reduces client and broker resource utilization, like client threads and used network connections, and it enables higher throughput and improved scalability. As of the 3.0.0 release, the first version of exactly-once has been deprecated. Users are encouraged to use exactly-once v2 for exactly-once processing from now on, and prepare by upgrading their brokers if necessary. For more information on how this is done inside the brokers and Kafka Streams, see [KIP-447](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics). To enable exactly-once semantics when running Kafka Streams applications, set the `processing.guarantee` config value (default value is **at_least_once**) to **StreamsConfig.EXACTLY_ONCE_V2** (requires brokers version 2.5 or newer). For more information, see the [Kafka Streams Configs](/39/streams/developer-guide/config-streams.html) section. diff --git a/content/en/39/streams/developer-guide/datatypes.md b/content/en/39/streams/developer-guide/datatypes.md index 35be24935..3ceb24df5 100644 --- a/content/en/39/streams/developer-guide/datatypes.md +++ b/content/en/39/streams/developer-guide/datatypes.md @@ -85,7 +85,7 @@ Apache Kafka includes several built-in serde implementations for Java primitives org.apache.kafka kafka-clients - 2.8.0 + 3.9.1 This artifact provides the following serde implementations under the package [org.apache.kafka.common.serialization](https://github.com/apache/kafka/blob/3.9/clients/src/main/java/org/apache/kafka/common/serialization), which you can leverage when e.g., defining default serializers in your Streams configuration. diff --git a/content/en/39/streams/developer-guide/dsl-api.md b/content/en/39/streams/developer-guide/dsl-api.md index e5f231b28..040e65c5f 100644 --- a/content/en/39/streams/developer-guide/dsl-api.md +++ b/content/en/39/streams/developer-guide/dsl-api.md @@ -1660,6 +1660,8 @@ Transformation | Description | Applies a `ValueTransformer` to each record, while retaining the key of the original record. `transformValues()` allows you to leverage the [Processor API](processor-api.html#streams-developer-guide-processor-api) from the DSL. ([details](/39/javadoc/org/apache/kafka/streams/kstream/KStream.html#transformValues-org.apache.kafka.streams.kstream.ValueTransformerSupplier-java.lang.String...-)) Each input record is transformed into exactly one output record (zero output records or multiple output records are not possible). The `ValueTransformer` may return `null` as the new value for a record. `transformValues` is preferable to `transform` because it will not cause data re-partitioning. `transformValues` is essentially equivalent to adding the `ValueTransformer` via `Topology#addProcessor()` to your [processor topology](../core-concepts.html#streams_topology). An example is available in the [javadocs](/39/javadoc/org/apache/kafka/streams/kstream/KStream.html#transformValues-org.apache.kafka.streams.kstream.ValueTransformerSupplier-java.lang.String...-). +**CAUTION:** If you are using "merge repartition topics" optimization, it is not recommended to use `KStream#processValues` to avoid compatibility issues for future upgrades to newer versions of Kafka Streams. For more details, see the [migration guide](/40/streams/developer-guide/dsl-api.htm#transformers-removal-and-migration-to-processors) in the Kafka Streams 4.0 docs. + The following example shows how to leverage, via the `KStream#process()` method, a custom `Processor` that sends an email notification whenever a page view count reaches a predefined threshold. First, we need to implement a custom stream processor, `PopularPageEmailAlert`, that implements the `Processor` interface: diff --git a/content/en/40/getting-started/upgrade.md b/content/en/40/getting-started/upgrade.md index 712e3deaa..49ee55337 100644 --- a/content/en/40/getting-started/upgrade.md +++ b/content/en/40/getting-started/upgrade.md @@ -116,6 +116,7 @@ Note: Apache Kafka 4.0 only supports KRaft mode - ZooKeeper mode has been remove * All public API, deprecated in Apache Kafka 3.6 or an earlier release, have been removed, with the exception of `JoinWindows.of()` and `JoinWindows#grace()`. See [KAFKA-17531](https://issues.apache.org/jira/browse/KAFKA-17531) for details. * The most important changes are highlighted in the [Kafka Streams upgrade guide](/40/streams/upgrade-guide.html#streams_api_changes_400). * For a full list of changes, see [KAFKA-12822](https://issues.apache.org/jira/browse/KAFKA-12822). + * If you are using `KStream#transformValues()` which was removed with Apache Kafka 4.0.0 release, and you need to rewrite your program to use `KStreams#processValues()` instead, pay close attention to the [migration guide](/40/streams/developer-guide/dsl-api.html#transformers-removal-and-migration-to-processors). * Other changes: * The minimum Java version required by clients and Kafka Streams applications has been increased from Java 8 to Java 11 while brokers, connect and tools now require Java 17. See [KIP-750](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=181308223) and [KIP-1013](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=284789510) for more details. * Java 23 support has been added in Apache Kafka 4.0 diff --git a/content/en/40/streams/core-concepts.md b/content/en/40/streams/core-concepts.md index 68a373a02..581d72da6 100644 --- a/content/en/40/streams/core-concepts.md +++ b/content/en/40/streams/core-concepts.md @@ -134,7 +134,7 @@ Kafka Streams allows direct read-only queries of the state stores by methods, th # Processing Guarantees -In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](http://lambda-architecture.net/). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](https://kafka.apache.org/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. For more information on how this is done inside Kafka Streams, see [KIP-129](https://cwiki.apache.org/confluence/display/KAFKA/KIP-129%3A+Streams+Exactly-Once+Semantics). As of the 2.6.0 release, Kafka Streams supports an improved implementation of exactly-once processing, named "exactly-once v2", which requires broker version 2.5.0 or newer. This implementation is more efficient, because it reduces client and broker resource utilization, like client threads and used network connections, and it enables higher throughput and improved scalability. As of the 3.0.0 release, the first version of exactly-once has been deprecated. Users are encouraged to use exactly-once v2 for exactly-once processing from now on, and prepare by upgrading their brokers if necessary. For more information on how this is done inside the brokers and Kafka Streams, see [KIP-447](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics). To enable exactly-once semantics when running Kafka Streams applications, set the `processing.guarantee` config value (default value is **at_least_once**) to **StreamsConfig.EXACTLY_ONCE_V2** (requires brokers version 2.5 or newer). For more information, see the [Kafka Streams Configs](/40/streams/developer-guide/config-streams.html) section. diff --git a/content/en/40/streams/developer-guide/datatypes.md b/content/en/40/streams/developer-guide/datatypes.md index e58fe7254..7db195272 100644 --- a/content/en/40/streams/developer-guide/datatypes.md +++ b/content/en/40/streams/developer-guide/datatypes.md @@ -85,7 +85,7 @@ Apache Kafka includes several built-in serde implementations for Java primitives org.apache.kafka kafka-clients - 2.8.0 + 4.0.0 This artifact provides the following serde implementations under the package [org.apache.kafka.common.serialization](https://github.com/apache/kafka/blob/4.0/clients/src/main/java/org/apache/kafka/common/serialization), which you can leverage when e.g., defining default serializers in your Streams configuration. diff --git a/content/en/40/streams/developer-guide/dsl-api.md b/content/en/40/streams/developer-guide/dsl-api.md index 1cacb087a..06858cab6 100644 --- a/content/en/40/streams/developer-guide/dsl-api.md +++ b/content/en/40/streams/developer-guide/dsl-api.md @@ -1644,11 +1644,11 @@ Beyond the aforementioned stateless and stateful transformations, you may also ## Operations and concepts * `KStream#process`: Process all records in a stream, one record at a time, by applying a `Processor` (provided by a given `ProcessorSupplier`); - * `KStream#processValues`: Process all records in a stream, one record at a time, by applying a `FixedKeyProcessor` (provided by a given `FixedKeyProcessorSupplier`); + * `KStream#processValues`: Process all records in a stream, one record at a time, by applying a `FixedKeyProcessor` (provided by a given `FixedKeyProcessorSupplier`) [**CAUTION:** If you are deploying a new Kafka Streams application, and you are using the "merge repartition topics" optimization, you should enable the fix for [KAFKA-19668](https://issues.apache.org/jira/browse/KAFKA-19668) to avoid compatibility issues for future upgrades to newer versions of Kafka Streams; For more details, see the migration guide below]; * `Processor`: A processor of key-value pair records; - * `ContextualProcessor`: An abstract implementation of `Processor` that manages the `ProcessorContext` instance. + * `ContextualProcessor`: An abstract implementation of `Processor` that manages the `ProcessorContext` instance; * `FixedKeyProcessor`: A processor of key-value pair records where keys are immutable; - * `ContextualFixedKeyProcessor`: An abstract implementation of `FixedKeyProcessor` that manages the `FixedKeyProcessorContext` instance. + * `ContextualFixedKeyProcessor`: An abstract implementation of `FixedKeyProcessor` that manages the `FixedKeyProcessorContext` instance; * `ProcessorSupplier`: A processor supplier that can create one or more `Processor` instances; and * `FixedKeyProcessorSupplier`: A processor supplier that can create one or more `FixedKeyProcessor` instances. @@ -1920,6 +1920,18 @@ The following deprecated methods are no longer available in Kafka Streams: The Processor API now serves as a unified replacement for all these methods. It simplifies the API surface while maintaining support for both stateless and stateful operations. +**CAUTION:** If you are using `KStream.transformValues()` and you have the "merge repartition topics" optimization enabled, rewriting your program to `KStream.processValues()` might not be safe due to [KAFKA-19668](https://issues.apache.org/jira/browse/KAFKA-19668). For this case, you should not upgrade to Kafka Streams 4.0.0 or 4.1.0, but use Kafka Streams 4.0.1 instead, which contains a fix. Note, that the fix is not enabled by default for backward compatibility reasons, and you would need to enable the fix by setting config `__enable.process.processValue.fix__ = true` and pass it into `StreamsBuilder()` constructor. + + + final Properties properties = new Properties(); + properties.put(StreamsConfig.APPLICATION_ID_CONFIG, ...); + properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, ...); + properties.put(TopologyConfig.InternalConfig.ENABLE_PROCESS_PROCESSVALUE_FIX, true); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(properties))); + +It is recommended, that you compare the output of `Topology.describe()` for the old and new topology, to verify if the rewrite to `processValues()` is correct, and that it does not introduce any incompatibilities. You should also test the upgrade in a non-production environment. + ## Migration Examples To migrate from the deprecated `transform`, `transformValues`, `flatTransform`, and `flatTransformValues` methods to the Processor API (PAPI) in Kafka Streams, let's resume the previouss examples. The new `process` and `processValues` methods enable a more flexible and reusable approach by requiring implementations of the `Processor` or `FixedKeyProcessor` interfaces. diff --git a/content/en/40/streams/upgrade-guide.md b/content/en/40/streams/upgrade-guide.md index 8ed5b4231..ee6dea2e3 100644 --- a/content/en/40/streams/upgrade-guide.md +++ b/content/en/40/streams/upgrade-guide.md @@ -58,6 +58,7 @@ In this release, eos-v1 (Exactly Once Semantics version 1) is no longer supporte * [Old processor APIs](https://issues.apache.org/jira/browse/KAFKA-12829) * [KStream#through() in both Java and Scala](https://issues.apache.org/jira/browse/KAFKA-12823) * ["transformer" methods and classes in both Java and Scala](https://issues.apache.org/jira/browse/KAFKA-16339) + * migrating from `KStreams#transformValues()` to `KStreams.processValues()` might not be safe due to [KAFKA-19668](https://issues.apache.org/jira/browse/KAFKA-19668). Please refer to the [migration guide](/40/streams/developer-guide/dsl-api.html#transformers-removal-and-migration-to-processors) for more details. * [kstream.KStream#branch in both Java and Scala](https://issues.apache.org/jira/browse/KAFKA-12824) * [builder methods for Time/Session/Join/SlidingWindows](https://issues.apache.org/jira/browse/KAFKA-16332) * [KafkaStreams#setUncaughtExceptionHandler()](https://issues.apache.org/jira/browse/KAFKA-12827) @@ -206,7 +207,7 @@ Kafka Streams does not send a "leave group" request when an instance is closed. * `KStream KStream.process(ProcessorSupplier, ...)` * `KStream KStream.processValues(FixedKeyProcessorSupplier, ...)` -Both new methods have multiple overloads and return a `KStream` instead of `void` as the deprecated `process()` methods did. In addition, `FixedKeyProcessor`, `FixedKeyRecord`, `FixedKeyProcessorContext`, and `ContextualFixedKeyProcessor` are introduced to guard against disallowed key modification inside `processValues()`. Furthermore, `ProcessingContext` is added for a better interface hierarchy. +Both new methods have multiple overloads and return a `KStream` instead of `void` as the deprecated `process()` methods did. In addition, `FixedKeyProcessor`, `FixedKeyRecord`, `FixedKeyProcessorContext`, and `ContextualFixedKeyProcessor` are introduced to guard against disallowed key modification inside `processValues()`. Furthermore, `ProcessingContext` is added for a better interface hierarchy. **CAUTION:** The newly added `KStream.processValues()` method introduced a regression bug ([KAFKA-19668](https://issues.apache.org/jira/browse/KAFKA-19668)). If you have "merge repartition topics" optimization enabled, it is not safe to migrate from `transformValues()` to `processValues()` in 3.3.0 release. The bug is only fixed with Kafka Streams 4.0.1, 4.1.1, and 4.2.0. For more details, please refer to the [migration guide](/40/streams/developer-guide/dsl-api.html#transformers-removal-and-migration-to-processors). Emitting a windowed aggregation result only after a window is closed is currently supported via the `suppress()` operator. However, `suppress()` uses an in-memory implementation and does not support RocksDB. To close this gap, [KIP-825](https://cwiki.apache.org/confluence/display/KAFKA/KIP-825%3A+introduce+a+new+API+to+control+when+aggregated+results+are+produced) introduces "emit strategies", which are built into the aggregation operator directly to use the already existing RocksDB store. `TimeWindowedKStream.emitStrategy(EmitStrategy)` and `SessionWindowedKStream.emitStrategy(EmitStrategy)` allow picking between "emit on window update" (default) and "emit on window close" strategies. Additionally, a few new emit metrics are added, as well as a necessary new method, `SessionStore.findSessions(long, long)`. diff --git a/content/en/41/_index.md b/content/en/41/_index.md new file mode 100644 index 000000000..344218095 --- /dev/null +++ b/content/en/41/_index.md @@ -0,0 +1,10 @@ +--- +title: AK 4.1.X +description: Documentation for AK 4.1.X +weight: +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + diff --git a/content/en/41/apis/_index.md b/content/en/41/apis/_index.md new file mode 100644 index 000000000..3b0756a52 --- /dev/null +++ b/content/en/41/apis/_index.md @@ -0,0 +1,10 @@ +--- +title: APIs +description: +weight: 2 +tags: ['kafka', 'docs', 'apis'] +aliases: +keywords: +type: docs +--- + diff --git a/content/en/41/apis/api.md b/content/en/41/apis/api.md new file mode 100644 index 000000000..b6679647f --- /dev/null +++ b/content/en/41/apis/api.md @@ -0,0 +1,114 @@ +--- +title: API +description: +weight: 1 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +Kafka includes five core apis: + + 1. The Producer API allows applications to send streams of data to topics in the Kafka cluster. + 2. The Consumer API allows applications to read streams of data from topics in the Kafka cluster. + 3. The Streams API allows transforming streams of data from input topics to output topics. + 4. The Connect API allows implementing connectors that continually pull from some source system or application into Kafka or push from Kafka into some sink system or application. + 5. The Admin API allows managing and inspecting topics, brokers, and other Kafka objects. +Kafka exposes all its functionality over a language independent protocol which has clients available in many programming languages. However only the Java clients are maintained as part of the main Kafka project, the others are available as independent open source projects. A list of non-Java clients is available [here](https://cwiki.apache.org/confluence/x/3gDVAQ). + +# Producer API + +The Producer API allows applications to send streams of data to topics in the Kafka cluster. + +Examples of using the producer are shown in the [javadocs](/41/javadoc/index.html?org/apache/kafka/clients/producer/KafkaProducer.html "Kafka 4.1 Javadoc"). + +To use the producer, add the following Maven dependency to your project: + + + + org.apache.kafka + kafka-clients + 4.1.0 + + +# Consumer API + +The Consumer API allows applications to read streams of data from topics in the Kafka cluster. + +Examples of using the consumer are shown in the [javadocs](/41/javadoc/index.html?org/apache/kafka/clients/consumer/KafkaConsumer.html "Kafka 4.1 Javadoc"). + +To use the consumer, add the following Maven dependency to your project: + + + + org.apache.kafka + kafka-clients + 4.1.0 + + +# Share Consumer API (Preview) + +The Share Consumer API (Preview) enables applications within a share group to cooperatively consume and process data from Kafka topics. + +Examples of using the share consumer are shown in the [javadocs](/41/javadoc/index.html?org/apache/kafka/clients/consumer/KafkaShareConsumer.html "Kafka 4.1 Javadoc"). + +To use the share consumer, add the following Maven dependency to your project: + + + + org.apache.kafka + kafka-clients + 4.1.0 + + +# Streams API + +The [Streams](/41/streams) API allows transforming streams of data from input topics to output topics. + +Examples of using this library are shown in the [javadocs](/41/javadoc/index.html?org/apache/kafka/streams/KafkaStreams.html "Kafka 4.1 Javadoc"). + +Additional documentation on using the Streams API is available [here](/41/streams). + +To use Kafka Streams, add the following Maven dependency to your project: + + + + org.apache.kafka + kafka-streams + 4.1.0 + + +When using Scala you may optionally include the `kafka-streams-scala` library. Additional documentation on using the Kafka Streams DSL for Scala is available [in the developer guide](/41/streams/developer-guide/dsl-api.html#scala-dsl). + +To use Kafka Streams DSL for Scala 2.13, add the following Maven dependency to your project: + + + + org.apache.kafka + kafka-streams-scala_2.13 + 4.1.0 + + +# Connect API + +The Connect API allows implementing connectors that continually pull from some source data system into Kafka or push from Kafka into some sink data system. + +Many users of Connect won't need to use this API directly, though, they can use pre-built connectors without needing to write any code. Additional information on using Connect is available [here](/documentation.html#connect). + +Those who want to implement custom connectors can see the [javadoc](/41/javadoc/index.html?org/apache/kafka/connect "Kafka 4.1 Javadoc"). + +# Admin API + +The Admin API supports managing and inspecting topics, brokers, acls, and other Kafka objects. + +To use the Admin API, add the following Maven dependency to your project: + + + + org.apache.kafka + kafka-clients + 4.1.0 + + +For more information about the Admin APIs, see the [javadoc](/41/javadoc/index.html?org/apache/kafka/clients/admin/Admin.html "Kafka 4.1 Javadoc"). diff --git a/content/en/41/configuration/_index.md b/content/en/41/configuration/_index.md new file mode 100644 index 000000000..70cf6016c --- /dev/null +++ b/content/en/41/configuration/_index.md @@ -0,0 +1,10 @@ +--- +title: Configuration +description: +weight: 3 +tags: ['kafka', 'docs', 'configuration'] +aliases: +keywords: +type: docs +--- + diff --git a/content/en/41/configuration/configuration.md b/content/en/41/configuration/configuration.md new file mode 100644 index 000000000..eef8ea503 --- /dev/null +++ b/content/en/41/configuration/configuration.md @@ -0,0 +1,448 @@ +--- +title: Configuration +description: +weight: 1 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +Kafka uses key-value pairs in the [property file format](https://en.wikipedia.org/wiki/.properties) for configuration. These values can be supplied either from a file or programmatically. + +# Broker Configs + +The essential configurations are the following: + + * `node.id` + * `log.dirs` + * `process.roles` + * `controller.quorum.bootstrap.servers` +Topic configurations and defaults are discussed in more detail below. {{< include-html file="/static/41/generated/kafka_config.html" >}} + +More details about broker configuration can be found in the scala class `kafka.server.KafkaConfig`. + +## Updating Broker Configs + +From Kafka version 1.1 onwards, some of the broker configs can be updated without restarting the broker. See the `Dynamic Update Mode` column in Broker Configs for the update mode of each broker config. + + * `read-only`: Requires a broker restart for update + * `per-broker`: May be updated dynamically for each broker + * `cluster-wide`: May be updated dynamically as a cluster-wide default. May also be updated as a per-broker value for testing. + +To alter the current broker configs for broker id 0 (for example, the number of log cleaner threads): + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --alter --add-config log.cleaner.threads=2 + +To describe the current dynamic broker configs for broker id 0: + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe + +To delete a config override and revert to the statically configured or default value for broker id 0 (for example, the number of log cleaner threads): + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --alter --delete-config log.cleaner.threads + +Some configs may be configured as a cluster-wide default to maintain consistent values across the whole cluster. All brokers in the cluster will process the cluster default update. For example, to update log cleaner threads on all brokers: + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-default --alter --add-config log.cleaner.threads=2 + +To describe the currently configured dynamic cluster-wide default configs: + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-default --describe + +All configs that are configurable at cluster level may also be configured at per-broker level (e.g. for testing). If a config value is defined at different levels, the following order of precedence is used: + + * Dynamic per-broker config stored in the metadata log + * Dynamic cluster-wide default config stored in the metadata log + * Static broker config from `server.properties` + * Kafka default, see broker configs + + + +### Updating SSL Keystore of an Existing Listener + +Brokers may be configured with SSL keystores with short validity periods to reduce the risk of compromised certificates. Keystores may be updated dynamically without restarting the broker. The config name must be prefixed with the listener prefix `listener.name.{listenerName}.` so that only the keystore config of a specific listener is updated. The following configs may be updated in a single alter request at per-broker level: + + * `ssl.keystore.type` + * `ssl.keystore.location` + * `ssl.keystore.password` + * `ssl.key.password` + +If the listener is the inter-broker listener, the update is allowed only if the new keystore is trusted by the truststore configured for that listener. For other listeners, no trust validation is performed on the keystore by the broker. Certificates must be signed by the same certificate authority that signed the old certificate to avoid any client authentication failures. + +### Updating SSL Truststore of an Existing Listener + +Broker truststores may be updated dynamically without restarting the broker to add or remove certificates. Updated truststore will be used to authenticate new client connections. The config name must be prefixed with the listener prefix `listener.name.{listenerName}.` so that only the truststore config of a specific listener is updated. The following configs may be updated in a single alter request at per-broker level: + + * `ssl.truststore.type` + * `ssl.truststore.location` + * `ssl.truststore.password` + +If the listener is the inter-broker listener, the update is allowed only if the existing keystore for that listener is trusted by the new truststore. For other listeners, no trust validation is performed by the broker before the update. Removal of CA certificates used to sign client certificates from the new truststore can lead to client authentication failures. + +### Updating Default Topic Configuration + +Default topic configuration options used by brokers may be updated without broker restart. The configs are applied to topics without a topic config override for the equivalent per-topic config. One or more of these configs may be overridden at cluster-default level used by all brokers. + + * `log.segment.bytes` + * `log.roll.ms` + * `log.roll.hours` + * `log.roll.jitter.ms` + * `log.roll.jitter.hours` + * `log.index.size.max.bytes` + * `log.flush.interval.messages` + * `log.flush.interval.ms` + * `log.retention.bytes` + * `log.retention.ms` + * `log.retention.minutes` + * `log.retention.hours` + * `log.index.interval.bytes` + * `log.cleaner.delete.retention.ms` + * `log.cleaner.min.compaction.lag.ms` + * `log.cleaner.max.compaction.lag.ms` + * `log.cleaner.min.cleanable.ratio` + * `log.cleanup.policy` + * `log.segment.delete.delay.ms` + * `unclean.leader.election.enable` + * `min.insync.replicas` + * `max.message.bytes` + * `compression.type` + * `log.preallocate` + * `log.message.timestamp.type` + + + +### Updating Log Cleaner Configs + +Log cleaner configs may be updated dynamically at cluster-default level used by all brokers. The changes take effect on the next iteration of log cleaning. One or more of these configs may be updated: + + * `log.cleaner.threads` + * `log.cleaner.io.max.bytes.per.second` + * `log.cleaner.dedupe.buffer.size` + * `log.cleaner.io.buffer.size` + * `log.cleaner.io.buffer.load.factor` + * `log.cleaner.backoff.ms` + + + +### Updating Thread Configs + +The size of various thread pools used by the broker may be updated dynamically at cluster-default level used by all brokers. Updates are restricted to the range `currentSize / 2` to `currentSize * 2` to ensure that config updates are handled gracefully. + + * `num.network.threads` + * `num.io.threads` + * `num.replica.fetchers` + * `num.recovery.threads.per.data.dir` + * `log.cleaner.threads` + * `background.threads` + * `remote.log.reader.threads` + * `remote.log.manager.copier.thread.pool.size` + * `remote.log.manager.expiration.thread.pool.size` + + + +### Updating ConnectionQuota Configs + +The maximum number of connections allowed for a given IP/host by the broker may be updated dynamically at cluster-default level used by all brokers. The changes will apply for new connection creations and the existing connections count will be taken into account by the new limits. + + * `max.connections.per.ip` + * `max.connections.per.ip.overrides` + + + +### Adding and Removing Listeners + +Listeners may be added or removed dynamically. When a new listener is added, security configs of the listener must be provided as listener configs with the listener prefix `listener.name.{listenerName}.`. If the new listener uses SASL, the JAAS configuration of the listener must be provided using the JAAS configuration property `sasl.jaas.config` with the listener and mechanism prefix. See JAAS configuration for Kafka brokers for details. + +In Kafka version 1.1.x, the listener used by the inter-broker listener may not be updated dynamically. To update the inter-broker listener to a new listener, the new listener may be added on all brokers without restarting the broker. A rolling restart is then required to update `inter.broker.listener.name`. + +In addition to all the security configs of new listeners, the following configs may be updated dynamically at per-broker level: + + * `listeners` + * `advertised.listeners` + * `listener.security.protocol.map` + +Inter-broker listener must be configured using the static broker configuration `inter.broker.listener.name` or `security.inter.broker.protocol`. + +# Topic Configs + +Configurations pertinent to topics have both a server default as well an optional per-topic override. If no per-topic configuration is given the server default is used. The override can be set at topic creation time by giving one or more `--config` options. This example creates a topic named _my-topic_ with a custom max message size and flush rate: + + + $ bin/kafka-topics.sh --bootstrap-server localhost:9092 --create --topic my-topic --partitions 1 \ + --replication-factor 1 --config max.message.bytes=64000 --config flush.messages=1 + +Overrides can also be changed or set later using the alter configs command. This example updates the max message size for _my-topic_ : + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type topics --entity-name my-topic + --alter --add-config max.message.bytes=128000 + +To check overrides set on the topic you can do + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type topics --entity-name my-topic --describe + +To remove an override you can do + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type topics --entity-name my-topic + --alter --delete-config max.message.bytes + +Below is the topic configuration. The server's default configuration for this property is given under the Server Default Property heading. A given server default config value only applies to a topic if it does not have an explicit topic config override. {{< include-html file="/static/41/generated/topic_config.html" >}} + +# Group Configs + +Below is the group configuration: {{< include-html file="/static/41/generated/group_config.html" >}} + +# Producer Configs + +Below is the producer configuration: {{< include-html file="/static/41/generated/producer_config.html" >}} + +# Consumer Configs + +Below is the consumer and share consumer configuration: {{< include-html file="/static/41/generated/consumer_config.html" >}} + +# Kafka Connect Configs + +Below is the Kafka Connect framework configuration. {{< include-html file="/static/41/generated/connect_config.html" >}} + +## Source Connector Configs + +Below is the source connector configuration. {{< include-html file="/static/41/generated/source_connector_config.html" >}} + +## Sink Connector Configs + +Below is the sink connector configuration. {{< include-html file="/static/41/generated/sink_connector_config.html" >}} + +# Kafka Streams Configs + +Below is the Kafka Streams client library configuration. {{< include-html file="/static/41/generated/streams_config.html" >}} + +# Admin Configs + +Below is the Kafka Admin client library configuration. {{< include-html file="/static/41/generated/admin_client_config.html" >}} + +# MirrorMaker Configs + +Below is the configuration of the connectors that make up MirrorMaker 2. + +## MirrorMaker Common Configs + +Below is the common configuration that applies to all three connectors. {{< include-html file="/static/41/generated/mirror_connector_config.html" >}} + +## MirrorMaker Source Configs + +Below is the configuration of MirrorMaker 2 source connector for replicating topics. {{< include-html file="/static/41/generated/mirror_source_config.html" >}} + +## MirrorMaker Checkpoint Configs + +Below is the configuration of MirrorMaker 2 checkpoint connector for emitting consumer offset checkpoints. {{< include-html file="/static/41/generated/mirror_checkpoint_config.html" >}} + +## MirrorMaker HeartBeat Configs + +Below is the configuration of MirrorMaker 2 heartbeat connector for checking connectivity between connectors and clusters. {{< include-html file="/static/41/generated/mirror_heartbeat_config.html" >}} + +# System Properties + +Kafka supports some configuration that can be enabled through Java system properties. System properties are usually set by passing the -D flag to the Java virtual machine in which Kafka components are running. Below are the supported system properties. + + * #### org.apache.kafka.sasl.oauthbearer.allowed.files + +This system property is used to determine which files, if any, are allowed to be read by the SASL OAUTHBEARER plugin. This property accepts comma-separated list of files. By default the value is an empty list. + +If users want to enable some files, users need to explicitly set the system property like below. + + -Dorg.apache.kafka.sasl.oauthbearer.allowed.files=/tmp/token,/tmp/private_key.pem + +Since:| 4.1.0 +---|--- +Default Value:| + * #### org.apache.kafka.sasl.oauthbearer.allowed.urls + +This system property is used to set the allowed URLs as SASL OAUTHBEARER token or jwks endpoints. This property accepts comma-separated list of URLs. By default the value is an empty list. + +If users want to enable some URLs, users need to explicitly set the system property like below. + + -Dorg.apache.kafka.sasl.oauthbearer.allowed.urls=https://www.example.com,file:///tmp/token + +Since:| 4.0.0 +---|--- +Default Value:| + * #### org.apache.kafka.disallowed.login.modules + +This system property is used to disable the problematic login modules usage in SASL JAAS configuration. This property accepts comma-separated list of loginModule names. By default **com.sun.security.auth.module.JndiLoginModule** loginModule is disabled. + +If users want to enable JndiLoginModule, users need to explicitly reset the system property like below. We advise the users to validate configurations and only allow trusted JNDI configurations. For more details [CVE-2023-25194](/community/cve-list/#CVE-2023-25194). + + -Dorg.apache.kafka.disallowed.login.modules= + +To disable more loginModules, update the system property with comma-separated loginModule names. Make sure to explicitly add **JndiLoginModule** module name to the comma-separated list like below. + + -Dorg.apache.kafka.disallowed.login.modules=com.sun.security.auth.module.JndiLoginModule,com.ibm.security.auth.module.LdapLoginModule,com.ibm.security.auth.module.Krb5LoginModule + +Since:| 3.4.0 +---|--- +Default Value:| com.sun.security.auth.module.JndiLoginModule + * #### org.apache.kafka.automatic.config.providers + +This system property controls the automatic loading of ConfigProvider implementations in Apache Kafka. ConfigProviders are used to dynamically supply configuration values from sources such as files, directories, or environment variables. This property accepts a comma-separated list of ConfigProvider names. By default, all built-in ConfigProviders are enabled, including **FileConfigProvider** , **DirectoryConfigProvider** , and **EnvVarConfigProvider**. + +If users want to disable all automatic ConfigProviders, they need to explicitly set the system property as shown below. Disabling automatic ConfigProviders is recommended in environments where configuration data comes from untrusted sources or where increased security is required. For more details, see [CVE-2024-31141](/community/cve-list/#CVE-2024-31141). + + -Dorg.apache.kafka.automatic.config.providers=none + +To allow specific ConfigProviders, update the system property with a comma-separated list of fully qualified ConfigProvider class names. For example, to enable only the **EnvVarConfigProvider** , set the property as follows: + + -Dorg.apache.kafka.automatic.config.providers=org.apache.kafka.common.config.provider.EnvVarConfigProvider + +To use multiple ConfigProviders, include their names in a comma-separated list as shown below: + + -Dorg.apache.kafka.automatic.config.providers=org.apache.kafka.common.config.provider.FileConfigProvider,org.apache.kafka.common.config.provider.EnvVarConfigProvider + +Since:| 3.8.0 +---|--- +Default Value:| All built-in ConfigProviders are enabled + + + +# Tiered Storage Configs + +Below is the Tiered Storage configuration. {{< include-html file="/static/41/generated/remote_log_manager_config.html" >}} {{< include-html file="/static/41/generated/remote_log_metadata_manager_config.html" >}} + +# Configuration Providers + +Use configuration providers to load configuration data from external sources. This might include sensitive information, such as passwords, API keys, or other credentials. + +You have the following options: + + * Use a custom provider by creating a class implementing the [`ConfigProvider`](/41/javadoc/org/apache/kafka/common/config/provider/ConfigProvider.html) interface and packaging it into a JAR file. + * Use a built-in provider: + * [`DirectoryConfigProvider`](/41/javadoc/org/apache/kafka/common/config/provider/DirectoryConfigProvider.html) + * [`EnvVarConfigProvider`](/41/javadoc/org/apache/kafka/common/config/provider/EnvVarConfigProvider.html) + * [`FileConfigProvider`](/41/javadoc/org/apache/kafka/common/config/provider/FileConfigProvider.html) + + + +To use a configuration provider, specify it in your configuration using the `config.providers` property. + +## Using Configuration Providers + +Configuration providers allow you to pass parameters and retrieve configuration data from various sources. + +To specify configuration providers, you use a comma-separated list of aliases and the fully-qualified class names that implement the configuration providers: + + + config.providers=provider1,provider2 + config.providers.provider1.class=com.example.Provider1 + config.providers.provider2.class=com.example.Provider2 + +Each provider can have its own set of parameters, which are passed in a specific format: + + + config.providers..param.= + +The `ConfigProvider` interface serves as a base for all configuration providers. Custom implementations of this interface can be created to retrieve configuration data from various sources. You can package the implementation as a JAR file, add the JAR to your classpath, and reference the provider's class in your configuration. + +**Example custom provider configuration** + + + config.providers=customProvider + config.providers.customProvider.class=com.example.customProvider + config.providers.customProvider.param.param1=value1 + config.providers.customProvider.param.param2=value2 + +## DirectoryConfigProvider + +The `DirectoryConfigProvider` retrieves configuration data from files stored in a specified directory. + +Each file represents a key, and its content is the value. This provider is useful for loading multiple configuration files and for organizing configuration data into separate files. + +To restrict the files that the `DirectoryConfigProvider` can access, use the `allowed.paths` parameter. This parameter accepts a comma-separated list of paths that the provider is allowed to access. If not set, all paths are allowed. + +**Example`DirectoryConfigProvider` configuration** + + + config.providers=dirProvider + config.providers.dirProvider.class=org.apache.kafka.common.config.provider.DirectoryConfigProvider + config.providers.dirProvider.param.allowed.paths=/path/to/dir1,/path/to/dir2 + +To reference a value supplied by the `DirectoryConfigProvider`, use the correct placeholder syntax: + + + ${dirProvider::} + +## EnvVarConfigProvider + +The `EnvVarConfigProvider` retrieves configuration data from environment variables. + +No specific parameters are required, as it reads directly from the specified environment variables. + +This provider is useful for configuring applications running in containers, for example, to load certificates or JAAS configuration from environment variables mapped from secrets. + +To restrict which environment variables the `EnvVarConfigProvider` can access, use the `allowlist.pattern` parameter. This parameter accepts a regular expression that environment variable names must match to be used by the provider. + +**Example`EnvVarConfigProvider` configuration** + + + config.providers=envVarProvider + config.providers.envVarProvider.class=org.apache.kafka.common.config.provider.EnvVarConfigProvider + config.providers.envVarProvider.param.allowlist.pattern=^MY_ENVAR1_.* + +To reference a value supplied by the `EnvVarConfigProvider`, use the correct placeholder syntax: + + + ${envVarProvider:} + +## FileConfigProvider + +The `FileConfigProvider` retrieves configuration data from a single properties file. + +This provider is useful for loading configuration data from mounted files. + +To restrict the file paths that the `FileConfigProvider` can access, use the `allowed.paths` parameter. This parameter accepts a comma-separated list of paths that the provider is allowed to access. If not set, all paths are allowed. + +**Example`FileConfigProvider` configuration** + + + config.providers=fileProvider + config.providers.fileProvider.class=org.apache.kafka.common.config.provider.FileConfigProvider + config.providers.fileProvider.param.allowed.paths=/path/to/config1,/path/to/config2 + +To reference a value supplied by the `FileConfigProvider`, use the correct placeholder syntax: + + + ${fileProvider::} + +## Example: Referencing files + +Here’s an example that uses a file configuration provider with Kafka Connect to provide authentication credentials to a database for a connector. + +First, create a `connector-credentials.properties` configuration file with the following credentials: + + + dbUsername=my-username + dbPassword=my-password + +Specify a `FileConfigProvider` in the Kafka Connect configuration: + +**Example Kafka Connect configuration with a`FileConfigProvider`** + + + config.providers=fileProvider + config.providers.fileProvider.class=org.apache.kafka.common.config.provider.FileConfigProvider + +Next, reference the properties from the file in the connector configuration. + +**Example connector configuration referencing file properties** + + + database.user=${fileProvider:/path/to/connector-credentials.properties:dbUsername} + database.password=${fileProvider:/path/to/connector-credentials.properties:dbPassword} + +At runtime, the configuration provider reads and extracts the values from the properties file. diff --git a/content/en/41/design/_index.md b/content/en/41/design/_index.md new file mode 100644 index 000000000..45615eb06 --- /dev/null +++ b/content/en/41/design/_index.md @@ -0,0 +1,10 @@ +--- +title: Design +description: +weight: 4 +tags: ['kafka', 'docs', 'design'] +aliases: +keywords: +type: docs +--- + diff --git a/content/en/41/design/design.md b/content/en/41/design/design.md new file mode 100644 index 000000000..988bdb408 --- /dev/null +++ b/content/en/41/design/design.md @@ -0,0 +1,488 @@ +--- +title: Design +description: +weight: 1 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Motivation + +We designed Kafka to be able to act as a unified platform for handling all the real-time data feeds a large company might have. To do this we had to think through a fairly broad set of use cases. + +It would have to have high-throughput to support high volume event streams such as real-time log aggregation. + +It would need to deal gracefully with large data backlogs to be able to support periodic data loads from offline systems. + +It also meant the system would have to handle low-latency delivery to handle more traditional messaging use-cases. + +We wanted to support partitioned, distributed, real-time processing of these feeds to create new, derived feeds. This motivated our partitioning and consumer model. + +Finally in cases where the stream is fed into other data systems for serving, we knew the system would have to be able to guarantee fault-tolerance in the presence of machine failures. + +Supporting these uses led us to a design with a number of unique elements, more akin to a database log than a traditional messaging system. We will outline some elements of the design in the following sections. + +# Persistence + +## Don't fear the filesystem! + +Kafka relies heavily on the filesystem for storing and caching messages. There is a general perception that "disks are slow" which makes people skeptical that a persistent structure can offer competitive performance. In fact disks are both much slower and much faster than people expect depending on how they are used; and a properly designed disk structure can often be as fast as the network. + +The key fact about disk performance is that the throughput of hard drives has been diverging from the latency of a disk seek for the last decade. As a result the performance of linear writes on a [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures) configuration with six 7200rpm SATA RAID-5 array is about 600MB/sec but the performance of random writes is only about 100k/sec--a difference of over 6000X. These linear reads and writes are the most predictable of all usage patterns, and are heavily optimized by the operating system. A modern operating system provides read-ahead and write-behind techniques that prefetch data in large block multiples and group smaller logical writes into large physical writes. A further discussion of this issue can be found in this [ACM Queue article](https://queue.acm.org/detail.cfm?id=1563874); they actually find that [sequential disk access can in some cases be faster than random memory access!](https://deliveryimages.acm.org/10.1145/1570000/1563874/jacobs3.jpg) + +To compensate for this performance divergence, modern operating systems have become increasingly aggressive in their use of main memory for disk caching. A modern OS will happily divert _all_ free memory to disk caching with little performance penalty when the memory is reclaimed. All disk reads and writes will go through this unified cache. This feature cannot easily be turned off without using direct I/O, so even if a process maintains an in-process cache of the data, this data will likely be duplicated in OS pagecache, effectively storing everything twice. + +Furthermore, we are building on top of the JVM, and anyone who has spent any time with Java memory usage knows two things: + + 1. The memory overhead of objects is very high, often doubling the size of the data stored (or worse). + 2. Java garbage collection becomes increasingly fiddly and slow as the in-heap data increases. + + + +As a result of these factors using the filesystem and relying on pagecache is superior to maintaining an in-memory cache or other structure--we at least double the available cache by having automatic access to all free memory, and likely double again by storing a compact byte structure rather than individual objects. Doing so will result in a cache of up to 28-30GB on a 32GB machine without GC penalties. Furthermore, this cache will stay warm even if the service is restarted, whereas the in-process cache will need to be rebuilt in memory (which for a 10GB cache may take 10 minutes) or else it will need to start with a completely cold cache (which likely means terrible initial performance). This also greatly simplifies the code as all logic for maintaining coherency between the cache and filesystem is now in the OS, which tends to do so more efficiently and more correctly than one-off in-process attempts. If your disk usage favors linear reads then read-ahead is effectively pre-populating this cache with useful data on each disk read. + +This suggests a design which is very simple: rather than maintain as much as possible in-memory and flush it all out to the filesystem in a panic when we run out of space, we invert that. All data is immediately written to a persistent log on the filesystem without necessarily flushing to disk. In effect this just means that it is transferred into the kernel's pagecache. + +This style of pagecache-centric design is described in an [article](https://varnish-cache.org/wiki/ArchitectNotes) on the design of Varnish here (along with a healthy dose of arrogance). + +## Constant Time Suffices + +The persistent data structure used in messaging systems are often a per-consumer queue with an associated BTree or other general-purpose random access data structures to maintain metadata about messages. BTrees are the most versatile data structure available, and make it possible to support a wide variety of transactional and non-transactional semantics in the messaging system. They do come with a fairly high cost, though: Btree operations are O(log N). Normally O(log N) is considered essentially equivalent to constant time, but this is not true for disk operations. Disk seeks come at 10 ms a pop, and each disk can do only one seek at a time so parallelism is limited. Hence even a handful of disk seeks leads to very high overhead. Since storage systems mix very fast cached operations with very slow physical disk operations, the observed performance of tree structures is often superlinear as data increases with fixed cache--i.e. doubling your data makes things much worse than twice as slow. + +Intuitively a persistent queue could be built on simple reads and appends to files as is commonly the case with logging solutions. This structure has the advantage that all operations are O(1) and reads do not block writes or each other. This has obvious performance advantages since the performance is completely decoupled from the data size--one server can now take full advantage of a number of cheap, low-rotational speed 1+TB SATA drives. Though they have poor seek performance, these drives have acceptable performance for large reads and writes and come at 1/3 the price and 3x the capacity. + +Having access to virtually unlimited disk space without any performance penalty means that we can provide some features not usually found in a messaging system. For example, in Kafka, instead of attempting to delete messages as soon as they are consumed, we can retain messages for a relatively long period (say a week). This leads to a great deal of flexibility for consumers, as we will describe. + +# Efficiency + +We have put significant effort into efficiency. One of our primary use cases is handling web activity data, which is very high volume: each page view may generate dozens of writes. Furthermore, we assume each message published is read by at least one consumer (often many), hence we strive to make consumption as cheap as possible. + +We have also found, from experience building and running a number of similar systems, that efficiency is a key to effective multi-tenant operations. If the downstream infrastructure service can easily become a bottleneck due to a small bump in usage by the application, such small changes will often create problems. By being very fast we help ensure that the application will tip-over under load before the infrastructure. This is particularly important when trying to run a centralized service that supports dozens or hundreds of applications on a centralized cluster as changes in usage patterns are a near-daily occurrence. + +We discussed disk efficiency in the previous section. Once poor disk access patterns have been eliminated, there are two common causes of inefficiency in this type of system: too many small I/O operations, and excessive byte copying. + +The small I/O problem happens both between the client and the server and in the server's own persistent operations. + +To avoid this, our protocol is built around a "message set" abstraction that naturally groups messages together. This allows network requests to group messages together and amortize the overhead of the network roundtrip rather than sending a single message at a time. The server in turn appends chunks of messages to its log in one go, and the consumer fetches large linear chunks at a time. + +This simple optimization produces orders of magnitude speed up. Batching leads to larger network packets, larger sequential disk operations, contiguous memory blocks, and so on, all of which allows Kafka to turn a bursty stream of random message writes into linear writes that flow to the consumers. + +The other inefficiency is in byte copying. At low message rates this is not an issue, but under load the impact is significant. To avoid this we employ a standardized binary message format that is shared by the producer, the broker, and the consumer (so data chunks can be transferred without modification between them). + +The message log maintained by the broker is itself just a directory of files, each populated by a sequence of message sets that have been written to disk in the same format used by the producer and consumer. Maintaining this common format allows optimization of the most important operation: network transfer of persistent log chunks. Modern unix operating systems offer a highly optimized code path for transferring data out of pagecache to a socket; in Linux this is done with the [sendfile system call](https://man7.org/linux/man-pages/man2/sendfile.2.html). + +To understand the impact of sendfile, it is important to understand the common data path for transfer of data from file to socket: + + 1. The operating system reads data from the disk into pagecache in kernel space + 2. The application reads the data from kernel space into a user-space buffer + 3. The application writes the data back into kernel space into a socket buffer + 4. The operating system copies the data from the socket buffer to the NIC buffer where it is sent over the network + + + +This is clearly inefficient, there are four copies and two system calls. Using sendfile, this re-copying is avoided by allowing the OS to send the data from pagecache to the network directly. So in this optimized path, only the final copy to the NIC buffer is needed. + +We expect a common use case to be multiple consumers on a topic. Using the zero-copy optimization above, data is copied into pagecache exactly once and reused on each consumption instead of being stored in memory and copied out to user-space every time it is read. This allows messages to be consumed at a rate that approaches the limit of the network connection. + +This combination of pagecache and sendfile means that on a Kafka cluster where the consumers are mostly caught up you will see no read activity on the disks whatsoever as they will be serving data entirely from cache. + +TLS/SSL libraries operate at the user space (in-kernel `SSL_sendfile` is currently not supported by Kafka). Due to this restriction, `sendfile` is not used when SSL is enabled. For enabling SSL configuration, refer to `security.protocol` and `security.inter.broker.protocol` + +For more background on the sendfile and zero-copy support in Java, see this [article](https://developer.ibm.com/articles/j-zerocopy/). + +## End-to-end Batch Compression + +In some cases the bottleneck is actually not CPU or disk but network bandwidth. This is particularly true for a data pipeline that needs to send messages between data centers over a wide-area network. Of course, the user can always compress its messages one at a time without any support needed from Kafka, but this can lead to very poor compression ratios as much of the redundancy is due to repetition between messages of the same type (e.g. field names in JSON or user agents in web logs or common string values). Efficient compression requires compressing multiple messages together rather than compressing each message individually. + +Kafka supports this with an efficient batching format. A batch of messages can be grouped together, compressed, and sent to the server in this form. The broker decompresses the batch in order to validate it. For example, it validates that the number of records in the batch is same as what batch header states. This batch of messages is then written to disk in compressed form. The batch will remain compressed in the log and it will also be transmitted to the consumer in compressed form. The consumer decompresses any compressed data that it receives. + +Kafka supports GZIP, Snappy, LZ4 and ZStandard compression protocols. More details on compression can be found [here](https://cwiki.apache.org/confluence/x/S5qoAQ). + +# The Producer + +## Load balancing + +The producer sends data directly to the broker that is the leader for the partition without any intervening routing tier. To help the producer do this all Kafka nodes can answer a request for metadata about which servers are alive and where the leaders for the partitions of a topic are at any given time to allow the producer to appropriately direct its requests. + +The client controls which partition it publishes messages to. This can be done at random, implementing a kind of random load balancing, or it can be done by some semantic partitioning function. We expose the interface for semantic partitioning by allowing the user to specify a key to partition by and using this to hash to a partition (there is also an option to override the partition function if need be). For example if the key chosen was a user id then all data for a given user would be sent to the same partition. This in turn will allow consumers to make locality assumptions about their consumption. This style of partitioning is explicitly designed to allow locality-sensitive processing in consumers. + +## Asynchronous send + +Batching is one of the big drivers of efficiency, and to enable batching the Kafka producer will attempt to accumulate data in memory and to send out larger batches in a single request. The batching can be configured to accumulate no more than a fixed number of messages and to wait no longer than some fixed latency bound (say 64k or 10 ms). This allows the accumulation of more bytes to send, and few larger I/O operations on the servers. This buffering is configurable and gives a mechanism to trade off a small amount of additional latency for better throughput. + +Details on configuration and the api for the producer can be found elsewhere in the documentation. + +# The Consumer + +The Kafka consumer works by issuing "fetch" requests to the brokers leading the partitions it wants to consume. The consumer specifies its offset in the log with each request and receives back a chunk of log beginning from that position. The consumer thus has significant control over this position and can rewind it to re-consume data if need be. + +## Push vs. pull + +An initial question we considered is whether consumers should pull data from brokers or brokers should push data to the consumer. In this respect Kafka follows a more traditional design, shared by most messaging systems, where data is pushed to the broker from the producer and pulled from the broker by the consumer. Some logging-centric systems, such as [Scribe](https://github.com/facebook/scribe) and [Apache Flume](https://flume.apache.org/), follow a very different push-based path where data is pushed downstream. There are pros and cons to both approaches. However, a push-based system has difficulty dealing with diverse consumers as the broker controls the rate at which data is transferred. The goal is generally for the consumer to be able to consume at the maximum possible rate; unfortunately, in a push system this means the consumer tends to be overwhelmed when its rate of consumption falls below the rate of production (a denial of service attack, in essence). A pull-based system has the nicer property that the consumer simply falls behind and catches up when it can. This can be mitigated with some kind of backoff protocol by which the consumer can indicate it is overwhelmed, but getting the rate of transfer to fully utilize (but never over-utilize) the consumer is trickier than it seems. Previous attempts at building systems in this fashion led us to go with a more traditional pull model. + +Another advantage of a pull-based system is that it lends itself to aggressive batching of data sent to the consumer. A push-based system must choose to either send a request immediately or accumulate more data and then send it later without knowledge of whether the downstream consumer will be able to immediately process it. If tuned for low latency, this will result in sending a single message at a time only for the transfer to end up being buffered anyway, which is wasteful. A pull-based design fixes this as the consumer always pulls all available messages after its current position in the log (or up to some configurable max size). So one gets optimal batching without introducing unnecessary latency. + +The deficiency of a naive pull-based system is that if the broker has no data the consumer may end up polling in a tight loop, effectively busy-waiting for data to arrive. To avoid this we have parameters in our pull request that allow the consumer request to block in a "long poll" waiting until data arrives (and optionally waiting until a given number of bytes is available to ensure large transfer sizes). + +You could imagine other possible designs which would be only pull, end-to-end. The producer would locally write to a local log, and brokers would pull from that with consumers pulling from them. A similar type of "store-and-forward" producer is often proposed. This is intriguing but we felt not very suitable for our target use cases which have thousands of producers. Our experience running persistent data systems at scale led us to feel that involving thousands of disks in the system across many applications would not actually make things more reliable and would be a nightmare to operate. And in practice we have found that we can run a pipeline with strong SLAs at large scale without a need for producer persistence. + +## Consumer Position + +Keeping track of _what_ has been consumed is, surprisingly, one of the key performance points of a messaging system. + +Most messaging systems keep metadata about what messages have been consumed on the broker. That is, as a message is handed out to a consumer, the broker either records that fact locally immediately or it may wait for acknowledgement from the consumer. This is a fairly intuitive choice, and indeed for a single machine server it is not clear where else this state could go. Since the data structures used for storage in many messaging systems scale poorly, this is also a pragmatic choice--since the broker knows what is consumed it can immediately delete it, keeping the data size small. + +What is perhaps not obvious is that getting the broker and consumer to come into agreement about what has been consumed is not a trivial problem. If the broker records a message as **consumed** immediately every time it is handed out over the network, then if the consumer fails to process the message (say because it crashes or the request times out or whatever) that message will be lost. To solve this problem, many messaging systems add an acknowledgement feature which means that messages are only marked as **sent** not **consumed** when they are sent; the broker waits for a specific acknowledgement from the consumer to record the message as **consumed**. This strategy fixes the problem of losing messages, but creates new problems. First of all, if the consumer processes the message but fails before it can send an acknowledgement then the message will be consumed twice. The second problem is around performance, now the broker must keep multiple states about every single message (first to lock it so it is not given out a second time, and then to mark it as permanently consumed so that it can be removed). Tricky problems must be dealt with, like what to do with messages that are sent but never acknowledged. + +Kafka handles this differently. Our topic is divided into a set of totally ordered partitions, each of which is consumed by exactly one consumer within each subscribing consumer group at any given time. This means that the position of a consumer in each partition is just a single integer, the offset of the next message to consume. This makes the state about what has been consumed very small, just one number for each partition. This state can be periodically checkpointed. This makes the equivalent of message acknowledgements very cheap. + +There is a side benefit of this decision. A consumer can deliberately _rewind_ back to an old offset and re-consume data. This violates the common contract of a queue, but turns out to be an essential feature for many consumers. For example, if the consumer code has a bug and is discovered after some messages are consumed, the consumer can re-consume those messages once the bug is fixed. + +## Offline Data Load + +Scalable persistence allows for the possibility of consumers that only periodically consume such as batch data loads that periodically bulk-load data into an offline system such as Hadoop or a relational data warehouse. + +In the case of Hadoop we parallelize the data load by splitting the load over individual map tasks, one for each node/topic/partition combination, allowing full parallelism in the loading. Hadoop provides the task management, and tasks which fail can restart without danger of duplicate data--they simply restart from their original position. + +## Static Membership + +Static membership aims to improve the availability of stream applications, consumer groups and other applications built on top of the group rebalance protocol. The rebalance protocol relies on the group coordinator to allocate entity ids to group members. These generated ids are ephemeral and will change when members restart and rejoin. For consumer based apps, this "dynamic membership" can cause a large percentage of tasks re-assigned to different instances during administrative operations such as code deploys, configuration updates and periodic restarts. For large state applications, shuffled tasks need a long time to recover their local states before processing and cause applications to be partially or entirely unavailable. Motivated by this observation, Kafka’s group management protocol allows group members to provide persistent entity ids. Group membership remains unchanged based on those ids, thus no rebalance will be triggered. + +If you want to use static membership, + + * Upgrade both broker cluster and client apps to 2.3 or beyond, and also make sure the upgraded brokers are using `inter.broker.protocol.version` of 2.3 or beyond as well. + * Set the config `ConsumerConfig#GROUP_INSTANCE_ID_CONFIG` to a unique value for each consumer instance under one group. + * For Kafka Streams applications, it is sufficient to set a unique `ConsumerConfig#GROUP_INSTANCE_ID_CONFIG` per KafkaStreams instance, independent of the number of used threads for an instance. + +If your broker is on an older version than 2.3, but you choose to set `ConsumerConfig#GROUP_INSTANCE_ID_CONFIG` on the client side, the application will detect the broker version and then throws an UnsupportedException. If you accidentally configure duplicate ids for different instances, a fencing mechanism on broker side will inform your duplicate client to shutdown immediately by triggering a `org.apache.kafka.common.errors.FencedInstanceIdException`. For more details, see [KIP-345](https://cwiki.apache.org/confluence/x/kRg0BQ) + +# Message Delivery Semantics + +Now that we understand a little about how producers and consumers work, let's discuss the semantic guarantees Kafka provides between producer and consumer. Clearly there are multiple possible message delivery guarantees that could be provided: + + * _At most once_ --Messages may be lost but are never redelivered. + * _At least once_ --Messages are never lost but may be redelivered. + * _Exactly once_ --Each message is processed once and only once. + +It's worth noting that this breaks down into two problems: the durability guarantees for publishing a message and the guarantees when consuming a message. + +Many systems claim to provide "exactly-once" delivery semantics, but it is important to read the fine print, because sometimes these claims are misleading (i.e. they don't translate to the case where consumers or producers can fail, cases where there are multiple consumer processes, or cases where data written to disk can be lost). + +Kafka's semantics are straightforward. When publishing a message we have a notion of the message being "committed" to the log. A message is considered committed only when all replicas in the in-sync replicas (ISR) for that partition have applied it to their log. Once a published message is committed, it will not be lost as long as one broker that replicates the partition to which this message was written remains "alive". The definition of committed message and alive partition as well as a description of which types of failures we attempt to handle will be described in more detail in the next section. For now let's assume a perfect, lossless broker and try to understand the guarantees to the producer and consumer. If a producer attempts to publish a message and experiences a network error, it cannot be sure if this error happened before or after the message was committed. This is similar to the semantics of inserting into a database table with an autogenerated key. + +Prior to 0.11.0.0, if a producer failed to receive a response indicating that a message was committed, it had little choice but to resend the message. This provides at-least-once delivery semantics since the message may be written to the log again during resending if the original request had in fact succeeded. Since 0.11.0.0, the Kafka producer also supports an idempotent delivery option which guarantees that resending will not result in duplicate entries in the log. To achieve this, the broker assigns each producer an ID and deduplicates messages using a sequence number that is sent by the producer along with every message. Also beginning with 0.11.0.0, the producer supports the ability to send messages atomically to multiple topic partitions using transactions, so that either all messages are successfully written or none of them are. + +Not all use cases require such strong guarantees. For use cases which are latency-sensitive, we allow the producer to specify the durability level it desires. If the producer specifies that it wants to wait on the message being committed, this can take on the order of 10 ms. However the producer can also specify that it wants to perform the send completely asynchronously or that it wants to wait only until the leader (but not necessarily the followers) have the message. + +Now let's describe the semantics from the point of view of the consumer. All replicas have the exact same log with the same offsets. The consumer controls its position in this log. If the consumer never crashed it could just store this position in memory, but if the consumer fails and we want this topic partition to be taken over by another process, the new process will need to choose an appropriate position from which to start processing. Let's say the consumer reads some messages -- it has several options for processing the messages and updating its position. + + 1. It can read the messages, then save its position in the log, and finally process the messages. In this case there is a possibility that the consumer process crashes after saving its position but before saving the output of its message processing. In this case the process that took over processing would start at the saved position even though a few messages prior to that position had not been processed. This corresponds to "at-most-once" semantics as in the case of a consumer failure messages may not be processed. + 2. It can read the messages, process the messages, and finally save its position. In this case there is a possibility that the consumer process crashes after processing messages but before saving its position. In this case when the new process takes over the first few messages it receives will already have been processed. This corresponds to the "at-least-once" semantics in the case of consumer failure. In many cases messages have a primary key and so the updates are idempotent (receiving the same message twice just overwrites a record with another copy of itself). + + +So what about exactly-once semantics? When consuming from a Kafka topic and producing to another topic (as in a [Kafka Streams](https://kafka.apache.org/streams) application), we can leverage the new transactional producer capabilities in 0.11.0.0 that were mentioned above. The consumer's position is stored as a message in an internal topic, so we can write the offset to Kafka in the same transaction as the output topics receiving the processed data. If the transaction is aborted, the consumer's stored position will revert to its old value (although the consumer has to refetch the committed offset because it does not automatically rewind) and the produced data on the output topics will not be visible to other consumers, depending on their "isolation level". In the default "read_uncommitted" isolation level, all messages are visible to consumers even if they were part of an aborted transaction, but in "read_committed" isolation level, the consumer will only return messages from transactions which were committed (and any messages which were not part of a transaction). + +When writing to an external system, the limitation is in the need to coordinate the consumer's position with what is actually stored as output. The classic way of achieving this would be to introduce a two-phase commit between the storage of the consumer position and the storage of the consumers output. This can be handled more simply and generally by letting the consumer store its offset in the same place as its output. This is better because many of the output systems a consumer might want to write to will not support a two-phase commit. As an example of this, consider a [Kafka Connect](https://kafka.apache.org/#connect) connector which populates data in HDFS along with the offsets of the data it reads so that it is guaranteed that either data and offsets are both updated or neither is. We follow similar patterns for many other data systems which require these stronger semantics and for which the messages do not have a primary key to allow for deduplication. + +As a result, Kafka supports exactly-once delivery in [Kafka Streams](https://kafka.apache.org/streams), and the transactional producer and the consumer using read-committed isolation level can be used generally to provide exactly-once delivery when reading, processing and writing data on Kafka topics. Exactly-once delivery for other destination systems generally requires cooperation with such systems, but Kafka provides the primitives which makes implementing this feasible (see also [Kafka Connect](https://kafka.apache.org/#connect)). Otherwise, Kafka guarantees at-least-once delivery by default, and allows the user to implement at-most-once delivery by disabling retries on the producer and committing offsets in the consumer prior to processing a batch of messages. + +# Using Transactions + +As mentioned above, the simplest way to get exactly-once semantics from Kafka is to use [Kafka Streams](https://kafka.apache.org/streams). However, it is also possible to achieve the same transactional guarantees using the Kafka producer and consumer directly by using them in the same way as Kafka Streams does. + +Kafka transactions are a bit different from transactions in other messaging systems. In Kafka, the consumer and producer are separate, and it is only the producer which is transactional. It is however able to make transactional updates to the consumer's position (confusingly called the "committed offset"), and it is this which gives the overall exactly-once behavior. + +There are three key aspects to exactly-once processing using the producer and consumer, which match how Kafka Streams works. + + 1. The consumer uses partition assignment to ensure that it is the only consumer in the consumer group currently processing each partition. + 2. The producer uses transactions so that all the records it produces, and any offsets it updates on behalf of the consumer, are performed atomically. + 3. In order to handle transactions properly in combination with rebalancing, it is advisable to use one producer instance for each consumer instance. More complicated and efficient schemes are possible, but at the cost of greater complexity. + + + +In addition, it is generally considered a good practice to use the read-committed isolation level if trying to achieve exactly-once processing. Strictly speaking, the consumer doesn't have to use read-committed isolation level, but if it does not, it will see records from aborted transactions and also open transactions which have not yet completed. + +The consumer configuration must include `isolation.level=read_committed` and `enable.auto.commit=false`. The producer configuration must set `transactional.id` to the name of the transactional ID to be used, which configures the producer for transactional delivery and also makes sure that a restarted application causes any in-flight transaction from the previous instance to abort. Only the producer has the `transactional.id` configuration. + +Here's an example of a [transactional message copier](https://github.com/apache/kafka/blob/trunk/tools/src/main/java/org/apache/kafka/tools/TransactionalMessageCopier.java) which uses these principles. It uses a `KafkaConsumer` to consume records from one topic and a `KafkaProducer` to produce records to another topic. It uses transactions to ensure that there is no duplication or loss of records as they are copied, provided that the `--use-group-metadata` option is set. + +It is important to handle exceptions and aborted transactions correctly. Any records written by the transactional producer will be marked as being part of the transactions, and then when the transaction commits or aborts, transaction marker records are written to indicate the outcome of the transaction. This is how the read-committed consumer does not see records from aborted transactions. However, in the event of a transaction abort, the application's state and in particular the current position of the consumer must be reset explicitly so that it can reprocess the records processed by the aborted transaction. + +The error handling for transactional producer has been standardized which ensures consistent behavior and clearer error handling patterns. The exception categories are now more precisely defined: + + 1. **RetriableException** : Temporary exceptions that are retried automatically by the client. These are handled internally and don't bubble up to the application. + 2. **RefreshRetriableException** : Exceptions requiring metadata refresh before retry. These are handled internally by the client after refreshing metadata and don't bubble up to the application. + 3. **AbortableException** : Exceptions that require transaction abort and reprocessing. These bubble up to the application, which must handle them by aborting the transaction and resetting the consumer position. + 4. **ApplicationRecoverableException** : Exceptions that bubble up to the application and require application handling. The application must implement its own recovery strategy, which must include restarting the producer. + 5. **InvalidConfigurationException** : Configuration-related exceptions that bubble up to the application and require application handling. The producer doesn't need to restart, but the application may choose to restart it. + 6. **KafkaException** : General Kafka exceptions that don't fit into the above categories. These bubble up to the application for handling. + + + +Example template code for handling transaction exceptions link : [ Transaction Client Demo](https://github.com/apache/kafka/blob/trunk/examples/src/main/java/kafka/examples/TransactionalClientDemo.java) + +A simple policy for handling exceptions and aborted transactions is to discard and recreate the Kafka producer and consumer objects and start afresh. As part of recreating the consumer, the consumer group will rebalance and fetch the last committed offset, which has the effect of rewinding back to the state before the transaction aborted. Alternatively, a more sophisticated application (such as the transactional message copier) can choose not to use `KafkaConsumer.committed` to retrieve the committed offset from Kafka, and then `KafkaConsumer.seek` to rewind the current position. + +# Share groups + +Share groups are available as a preview in Apache Kafka 4.1. + +Share groups are a new type of group, existing alongside traditional consumer groups. Share groups enable Kafka consumers to cooperatively consume and process records from topics. They offer an alternative to traditional consumer groups, particularly when applications require finer-grained sharing of partitions and records. + +The fundamental differences between a share group and a consumer group are: + + * The consumers within a share group cooperatively consume records, and partitions may be assigned to multiple consumers. + * The number of consumers in a share group can exceed the number of partitions in a topic. + * Records are acknowledged individually, though the system is optimized for batch processing to improve efficiency. + * Delivery attempts to consumers in a share group are counted, which enables automated handling of unprocessable records. + + + +All consumers in the same share group subscribed to the same topic will cooperatively consume the records of that topic. If a topic is accessed by consumers in multiple share groups, each share group consumes from that topic independently of the others. + +Each consumer can dynamically set its list of subscribed topics. In practice, all consumers within a share group typically subscribe to the same topic or topics. + +When a consumer in a share-group fetches records, it receives available records from any of the topic-partitions matching its subscriptions. Records are acquired for delivery to this consumer with a time-limited acquisition lock. While a record is acquired, it is unavailable to other consumers. + +By default, the lock duration is 30 seconds, but you can control it using the group configuration parameter `share.record.lock.duration.ms`. The lock is released automatically once its duration elapses, making the record available to another consumer. A consumer holding the lock can handle the record in the following ways: + + * Acknowledge successful processing of the record. + * Release the record, making it available for another delivery attempt. + * Reject the record, indicating it's unprocessable and preventing further delivery attempts for that record. + * Do nothing, in which case the lock is automatically released when its duration expires. + + + +The Kafka cluster limits the number of records acquired for consumers for each topic-partition within a share group. Once this limit is reached, fetching operations will temporarily yield no further records until the number of acquired records decreases (as locks naturally time out). This limit is controlled by the broker configuration property `group.share.partition.max.record.locks`. By limiting the duration of the acquisition lock and automatically releasing the locks, the broker ensures delivery progresses even in the presence of consumer failures. + +# Replication + +Kafka replicates the log for each topic's partitions across a configurable number of servers (you can set this replication factor on a topic-by-topic basis). This allows automatic failover to these replicas when a server in the cluster fails so messages remain available in the presence of failures. + +Other messaging systems provide some replication-related features, but, in our (totally biased) opinion, this appears to be a tacked-on thing, not heavily used, and with large downsides: replicas are inactive, throughput is heavily impacted, it requires fiddly manual configuration, etc. Kafka is meant to be used with replication by default--in fact we implement un-replicated topics as replicated topics where the replication factor is one. + +The unit of replication is the topic partition. Under non-failure conditions, each partition in Kafka has a single leader and zero or more followers. The total number of replicas including the leader constitute the replication factor. All writes go to the leader of the partition, and reads can go to the leader or the followers of the partition. Typically, there are many more partitions than brokers and the leaders are evenly distributed among brokers. The logs on the followers are identical to the leader's log--all have the same offsets and messages in the same order (though, of course, at any given time the leader may have a few as-yet unreplicated messages at the end of its log). + +Followers consume messages from the leader just as a normal Kafka consumer would and apply them to their own log. Having the followers pull from the leader has the nice property of allowing the follower to naturally batch together log entries they are applying to their log. + +As with most distributed systems, automatically handling failures requires a precise definition of what it means for a node to be "alive." In Kafka, a special node known as the "controller" is responsible for managing the registration of brokers in the cluster. Broker liveness has two conditions: + + 1. Brokers must maintain an active session with the controller in order to receive regular metadata updates. + 2. Brokers acting as followers must replicate the writes from the leader and not fall "too far" behind. + + + +What is meant by an "active session" depends on the cluster configuration. For KRaft clusters, an active session is maintained by sending periodic heartbeats to the controller. If the controller fails to receive a heartbeat before the timeout configured by `broker.session.timeout.ms` expires, then the node is considered offline. + +We refer to nodes satisfying these two conditions as being "in sync" to avoid the vagueness of "alive" or "failed". The leader keeps track of the set of "in sync" replicas, which is known as the ISR. If either of these conditions fail to be satisfied, then the broker will be removed from the ISR. For example, if a follower dies, then the controller will notice the failure through the loss of its session, and will remove the broker from the ISR. On the other hand, if the follower lags too far behind the leader but still has an active session, then the leader can also remove it from the ISR. The determination of lagging replicas is controlled through the `replica.lag.time.max.ms` configuration. Replicas that cannot catch up to the end of the log on the leader within the max time set by this configuration are removed from the ISR. + +In distributed systems terminology we only attempt to handle a "fail/recover" model of failures where nodes suddenly cease working and then later recover (perhaps without knowing that they have died). Kafka does not handle so-called "Byzantine" failures in which nodes produce arbitrary or malicious responses (perhaps due to bugs or foul play). + +Only committed messages are ever given out to the consumer. This means that the consumer need not worry about potentially seeing a message that could be lost if the leader fails. Producers, on the other hand, have the option of either waiting for the message to be committed or not, depending on their preference for tradeoff between latency and durability. This preference is controlled by the `acks` setting that the producer uses. Note that topics have a setting for the minimum number of in-sync replicas (`min.insync.replicas`) that is checked when the producer requests acknowledgment that a message has been written to the full set of in-sync replicas. If a less stringent acknowledgment is requested by the producer, then the message is committed asynchronously across the set of in-sync replicas if `acks=0`, or synchronously only on the leader if `acks=1`. Regardless of the `acks` setting, the messages will not be visible to the consumers until all the following conditions are met: + + 1. The messages are replicated to all the in-sync replicas. + 2. The number of the in-sync replicas is no less than the `min.insync.replicas` setting. + + + +The guarantee that Kafka offers is that a committed message will not be lost, as long as there is at least one in sync replica alive, at all times. + +Kafka will remain available in the presence of node failures after a short fail-over period, but may not remain available in the presence of network partitions. + +## Replicated Logs: Quorums, ISRs, and State Machines (Oh my!) + +At its heart a Kafka partition is a replicated log. The replicated log is one of the most basic primitives in distributed data systems, and there are many approaches for implementing one. A replicated log can be used by other systems as a primitive for implementing other distributed systems in the [state-machine style](https://en.wikipedia.org/wiki/State_machine_replication). + +A replicated log models the process of coming into consensus on the order of a series of values (generally numbering the log entries 0, 1, 2, ...). There are many ways to implement this, but the simplest and fastest is with a leader who chooses the ordering of values provided to it. As long as the leader remains alive, all followers need to only copy the values and ordering the leader chooses. + +Of course if leaders didn't fail we wouldn't need followers! When the leader does die we need to choose a new leader from among the followers. But followers themselves may fall behind or crash so we must ensure we choose an up-to-date follower. The fundamental guarantee a log replication algorithm must provide is that if we tell the client a message is committed, and the leader fails, the new leader we elect must also have that message. This yields a tradeoff: if the leader waits for more followers to acknowledge a message before declaring it committed then there will be more potentially electable leaders. + +If you choose the number of acknowledgements required and the number of logs that must be compared to elect a leader such that there is guaranteed to be an overlap, then this is called a Quorum. + +A common approach to this tradeoff is to use a majority vote for both the commit decision and the leader election. This is not what Kafka does, but let's explore it anyway to understand the tradeoffs. Let's say we have 2 _f_ +1 replicas. If _f_ +1 replicas must receive a message prior to a commit being declared by the leader, and if we elect a new leader by electing the follower with the most complete log from at least _f_ +1 replicas, then, with no more than _f_ failures, the leader is guaranteed to have all committed messages. This is because among any _f_ +1 replicas, there must be at least one replica that contains all committed messages. That replica's log will be the most complete and therefore will be selected as the new leader. There are many remaining details that each algorithm must handle (such as precisely defined what makes a log more complete, ensuring log consistency during leader failure or changing the set of servers in the replica set) but we will ignore these for now. + +This majority vote approach has a very nice property: the latency is dependent on only the fastest servers. That is, if the replication factor is three, the latency is determined by the faster follower not the slower one. + +There are a rich variety of algorithms in this family including ZooKeeper's [Zab](https://web.archive.org/web/20140602093727/https://www.stanford.edu/class/cs347/reading/zab.pdf), [Raft](https://www.usenix.org/system/files/conference/atc14/atc14-paper-ongaro.pdf), and [Viewstamped Replication](https://pmg.csail.mit.edu/papers/vr-revisited.pdf). The most similar academic publication we are aware of to Kafka's actual implementation is [PacificA](https://research.microsoft.com/apps/pubs/default.aspx?id=66814) from Microsoft. + +The downside of majority vote is that it doesn't take many failures to leave you with no electable leaders. To tolerate one failure requires three copies of the data, and to tolerate two failures requires five copies of the data. In our experience having only enough redundancy to tolerate a single failure is not enough for a practical system, but doing every write five times, with 5x the disk space requirements and 1/5th the throughput, is not very practical for large volume data problems. This is likely why quorum algorithms more commonly appear for shared cluster configuration such as ZooKeeper but are less common for primary data storage. For example in HDFS the namenode's high-availability feature is built on a [majority-vote-based journal](https://blog.cloudera.com/blog/2012/10/quorum-based-journaling-in-cdh4-1), but this more expensive approach is not used for the data itself. + +Kafka takes a slightly different approach to choosing its quorum set. Instead of majority vote, Kafka dynamically maintains a set of in-sync replicas (ISR) that are caught-up to the leader. Only members of this set are eligible for election as leader. A write to a Kafka partition is not considered committed until _all_ in-sync replicas have received the write. This ISR set is persisted in the cluster metadata whenever it changes. Because of this, any replica in the ISR is eligible to be elected leader. This is an important factor for Kafka's usage model where there are many partitions and ensuring leadership balance is important. With this ISR model and _f+1_ replicas, a Kafka topic can tolerate _f_ failures without losing committed messages. + +For most use cases we hope to handle, we think this tradeoff is a reasonable one. In practice, to tolerate _f_ failures, both the majority vote and the ISR approach will wait for the same number of replicas to acknowledge before committing a message (e.g. to survive one failure a majority quorum needs three replicas and one acknowledgement and the ISR approach requires two replicas and one acknowledgement). The ability to commit without the slowest servers is an advantage of the majority vote approach. However, we think it is ameliorated by allowing the client to choose whether they block on the message commit or not, and the additional throughput and disk space due to the lower required replication factor is worth it. + +Another important design distinction is that Kafka does not require that crashed nodes recover with all their data intact. It is not uncommon for replication algorithms in this space to depend on the existence of "stable storage" that cannot be lost in any failure-recovery scenario without potential consistency violations. There are two primary problems with this assumption. First, disk errors are the most common problem we observe in real operation of persistent data systems and they often do not leave data intact. Secondly, even if this were not a problem, we do not want to require the use of fsync on every write for our consistency guarantees as this can reduce performance by two to three orders of magnitude. Our protocol for allowing a replica to rejoin the ISR ensures that before rejoining, it must fully re-sync again even if it lost unflushed data in its crash. + +## Unclean leader election: What if they all die? + +Note that Kafka's guarantee with respect to data loss is predicated on at least one replica remaining in sync. If all the nodes replicating a partition die, this guarantee no longer holds. + +However a practical system needs to do something reasonable when all the replicas die. If you are unlucky enough to have this occur, it is important to consider what will happen. There are two behaviors that could be implemented: + + 1. Wait for a replica in the ISR to come back to life and choose this replica as the leader (hopefully it still has all its data). + 2. Choose the first replica (not necessarily in the ISR) that comes back to life as the leader. + + +This is a simple tradeoff between availability and consistency. If we wait for replicas in the ISR, then we will remain unavailable as long as those replicas are down. If such replicas were destroyed or their data was lost, then we are permanently down. If, on the other hand, a non-in-sync replica comes back to life and we allow it to become leader, then its log becomes the source of truth even though it is not guaranteed to have every committed message. By default from version 0.11.0.0, Kafka chooses the first strategy and favor waiting for a consistent replica. This behavior can be changed using configuration property `unclean.leader.election.enable`, to support use cases where uptime is preferable to consistency. + +This dilemma is not specific to Kafka. It exists in any quorum-based scheme. For example in a majority voting scheme, if a majority of servers suffer a permanent failure, then you must either choose to lose 100% of your data or violate consistency by taking what remains on an existing server as your new source of truth. + +## Availability and Durability Guarantees + +When writing to Kafka, producers can choose whether they wait for the message to be acknowledged by 0,1 or all (-1) replicas. Note that "acknowledgement by all replicas" does not guarantee that the full set of assigned replicas have received the message. By default, when acks=all, acknowledgement happens as soon as all the current in-sync replicas have received the message. For example, if a topic is configured with only two replicas and one fails (i.e., only one in sync replica remains), then writes that specify acks=all will succeed. However, these writes could be lost if the remaining replica also fails. Although this ensures maximum availability of the partition, this behavior may be undesirable to some users who prefer durability over availability. Therefore, we provide two topic configurations that can be used to prefer message durability over availability: + + 1. Disable unclean leader election - if all replicas become unavailable, then the partition will remain unavailable until the most recent leader becomes available again. This effectively prefers unavailability over the risk of message loss. See the previous section on Unclean Leader Election for clarification. + 2. Specify a minimum ISR size - the partition will only accept writes if the size of the ISR is above a certain minimum, in order to prevent the loss of messages that were written to just a single replica, which subsequently becomes unavailable. This setting only takes effect if the producer uses acks=all and guarantees that the message will be acknowledged by at least this many in-sync replicas. This setting offers a trade-off between consistency and availability. A higher setting for minimum ISR size guarantees better consistency since the message is guaranteed to be written to more replicas which reduces the probability that it will be lost. However, it reduces availability since the partition will be unavailable for writes if the number of in-sync replicas drops below the minimum threshold. + + + +## Replica Management + +The above discussion on replicated logs really covers only a single log, i.e. one topic partition. However a Kafka cluster will manage hundreds or thousands of these partitions. We attempt to balance partitions within a cluster in a round-robin fashion to avoid clustering all partitions for high-volume topics on a small number of nodes. Likewise we try to balance leadership so that each node is the leader for a proportional share of its partitions. + +It is also important to optimize the leadership election process as that is the critical window of unavailability. A naive implementation of leader election would end up running an election per partition for all partitions a node hosted when that node failed. As discussed above in the section on replication, Kafka clusters have a special role known as the "controller" which is responsible for managing the registration of brokers. If the controller detects the failure of a broker, it is responsible for electing one of the remaining members of the ISR to serve as the new leader. The result is that we are able to batch together many of the required leadership change notifications which makes the election process far cheaper and faster for a large number of partitions. If the controller itself fails, then another controller will be elected. + +# Log Compaction + +Log compaction ensures that Kafka will always retain at least the last known value for each message key within the log of data for a single topic partition. It addresses use cases and scenarios such as restoring state after application crashes or system failure, or reloading caches after application restarts during operational maintenance. Let's dive into these use cases in more detail and then describe how compaction works. + +So far we have described only the simpler approach to data retention where old log data is discarded after a fixed period of time or when the log reaches some predetermined size. This works well for temporal event data such as logging where each record stands alone. However an important class of data streams are the log of changes to keyed, mutable data (for example, the changes to a database table). + +Let's discuss a concrete example of such a stream. Say we have a topic containing user email addresses; every time a user updates their email address we send a message to this topic using their user id as the primary key. Now say we send the following messages over some time period for a user with id 123, each message corresponding to a change in email address (messages for other ids are omitted): + + + 123 => bill@microsoft.com + . + . + . + 123 => bill@gatesfoundation.org + . + . + . + 123 => bill@gmail.com + +Log compaction gives us a more granular retention mechanism so that we are guaranteed to retain at least the last update for each primary key (e.g. `bill@gmail.com`). By doing this we guarantee that the log contains a full snapshot of the final value for every key not just keys that changed recently. This means downstream consumers can restore their own state off this topic without us having to retain a complete log of all changes. + +Let's start by looking at a few use cases where this is useful, then we'll see how it can be used. + + 1. _Database change subscription_. It is often necessary to have a data set in multiple data systems, and often one of these systems is a database of some kind (either a RDBMS or perhaps a new-fangled key-value store). For example you might have a database, a cache, a search cluster, and a Hadoop cluster. Each change to the database will need to be reflected in the cache, the search cluster, and eventually in Hadoop. In the case that one is only handling the real-time updates you only need recent log. But if you want to be able to reload the cache or restore a failed search node you may need a complete data set. + 2. _Event sourcing_. This is a style of application design which co-locates query processing with application design and uses a log of changes as the primary store for the application. + 3. _Journaling for high-availability_. A process that does local computation can be made fault-tolerant by logging out changes that it makes to its local state so another process can reload these changes and carry on if it should fail. A concrete example of this is handling counts, aggregations, and other "group by"-like processing in a stream query system. Samza, a real-time stream-processing framework, [uses this feature](https://samza.apache.org/learn/0.7.0/container/state-management.html) for exactly this purpose. +In each of these cases one needs primarily to handle the real-time feed of changes, but occasionally, when a machine crashes or data needs to be re-loaded or re-processed, one needs to do a full load. Log compaction allows feeding both of these use cases off the same backing topic. This style of usage of a log is described in more detail in [this blog post](https://engineering.linkedin.com/distributed-systems/log-what-every-software-engineer-should-know-about-real-time-datas-unifying). + +The general idea is quite simple. If we had infinite log retention, and we logged each change in the above cases, then we would have captured the state of the system at each time from when it first began. Using this complete log, we could restore to any point in time by replaying the first N records in the log. This hypothetical complete log is not very practical for systems that update a single record many times as the log will grow without bound even for a stable dataset. The simple log retention mechanism which throws away old updates will bound space but the log is no longer a way to restore the current state--now restoring from the beginning of the log no longer recreates the current state as old updates may not be captured at all. + +Log compaction is a mechanism to give finer-grained per-record retention, rather than the coarser-grained time-based retention. The idea is to selectively remove records where we have a more recent update with the same primary key. This way the log is guaranteed to have at least the last state for each key. + +This retention policy can be set per-topic, so a single cluster can have some topics where retention is enforced by size or time and other topics where retention is enforced by compaction. + +This functionality is inspired by one of LinkedIn's oldest and most successful pieces of infrastructure--a database changelog caching service called [Databus](https://github.com/linkedin/databus). Unlike most log-structured storage systems Kafka is built for subscription and organizes data for fast linear reads and writes. Unlike Databus, Kafka acts as a source-of-truth store so it is useful even in situations where the upstream data source would not otherwise be replayable. + +## Log Compaction Basics + +Here is a high-level picture that shows the logical structure of a Kafka log with the offset for each message. + +![](/41/images/log_cleaner_anatomy.png) + +The head of the log is identical to a traditional Kafka log. It has dense, sequential offsets and retains all messages. Log compaction adds an option for handling the tail of the log. The picture above shows a log with a compacted tail. Note that the messages in the tail of the log retain the original offset assigned when they were first written--that never changes. Note also that all offsets remain valid positions in the log, even if the message with that offset has been compacted away; in this case this position is indistinguishable from the next highest offset that does appear in the log. For example, in the picture above the offsets 36, 37, and 38 are all equivalent positions and a read beginning at any of these offsets would return a message set beginning with 38. + +Compaction also allows for deletes. A message with a key and a null payload will be treated as a delete from the log. Such a record is sometimes referred to as a _tombstone_. This delete marker will cause any prior message with that key to be removed (as would any new message with that key), but delete markers are special in that they will themselves be cleaned out of the log after a period of time to free up space. The point in time at which deletes are no longer retained is marked as the "delete retention point" in the above diagram. + +The compaction is done in the background by periodically recopying log segments. Cleaning does not block reads and can be throttled to use no more than a configurable amount of I/O throughput to avoid impacting producers and consumers. The actual process of compacting a log segment looks something like this: + +![](/41/images/log_compaction.png) + +## What guarantees does log compaction provide? + +Log compaction guarantees the following: + + 1. Any consumer that stays caught-up to within the head of the log will see every message that is written; these messages will have sequential offsets. The topic's `min.compaction.lag.ms` can be used to guarantee the minimum length of time must pass after a message is written before it could be compacted. I.e. it provides a lower bound on how long each message will remain in the (uncompacted) head. The topic's `max.compaction.lag.ms` can be used to guarantee the maximum delay between the time a message is written and the time the message becomes eligible for compaction. + 2. Ordering of messages is always maintained. Compaction will never re-order messages, just remove some. + 3. The offset for a message never changes. It is the permanent identifier for a position in the log. + 4. Any consumer progressing from the start of the log will see at least the final state of all records in the order they were written. Additionally, all delete markers for deleted records will be seen, provided the consumer reaches the head of the log in a time period less than the topic's `delete.retention.ms` setting (the default is 24 hours). In other words: since the removal of delete markers happens concurrently with reads, it is possible for a consumer to miss delete markers if it lags by more than `delete.retention.ms`. + + +## Log Compaction Details + +Log compaction is handled by the log cleaner, a pool of background threads that recopy log segment files, removing records whose key appears in the head of the log. Each compactor thread works as follows: + + 1. It chooses the log that has the highest ratio of log head to log tail + 2. It creates a succinct summary of the last offset for each key in the head of the log + 3. It recopies the log from beginning to end removing keys which have a later occurrence in the log. New, clean segments are swapped into the log immediately so the additional disk space required is just one additional log segment (not a fully copy of the log). + 4. The summary of the log head is essentially just a space-compact hash table. It uses exactly 24 bytes per entry. As a result with 8GB of cleaner buffer one cleaner iteration can clean around 366GB of log head (assuming 1k messages). + + +## Configuring The Log Cleaner + +The log cleaner is enabled by default. This will start the pool of cleaner threads. To enable log cleaning on a particular topic, add the log-specific property + + + log.cleanup.policy=compact + +The `log.cleanup.policy` property is a broker configuration setting defined in the broker's `server.properties` file; it affects all of the topics in the cluster that do not have a configuration override in place as documented [here](/documentation.html#brokerconfigs). The log cleaner can be configured to retain a minimum amount of the uncompacted "head" of the log. This is enabled by setting the compaction time lag. + + + log.cleaner.min.compaction.lag.ms + +This can be used to prevent messages newer than a minimum message age from being subject to compaction. If not set, all log segments are eligible for compaction except for the last segment, i.e. the one currently being written to. The active segment will not be compacted even if all of its messages are older than the minimum compaction time lag. The log cleaner can be configured to ensure a maximum delay after which the uncompacted "head" of the log becomes eligible for log compaction. + + + log.cleaner.max.compaction.lag.ms + +This can be used to prevent log with low produce rate from remaining ineligible for compaction for an unbounded duration. If not set, logs that do not exceed min.cleanable.dirty.ratio are not compacted. Note that this compaction deadline is not a hard guarantee since it is still subjected to the availability of log cleaner threads and the actual compaction time. You will want to monitor the uncleanable-partitions-count, max-clean-time-secs and max-compaction-delay-secs metrics. + +Further cleaner configurations are described [here](/documentation.html#brokerconfigs). + +# Quotas + +Kafka cluster has the ability to enforce quotas on requests to control the broker resources used by clients. Two types of client quotas can be enforced by Kafka brokers for each group of clients sharing a quota: + + 1. Network bandwidth quotas define byte-rate thresholds (since 0.9) + 2. Request rate quotas define CPU utilization thresholds as a percentage of network and I/O threads (since 0.11) + + + +## Why are quotas necessary? + +It is possible for producers and consumers to produce/consume very high volumes of data or generate requests at a very high rate and thus monopolize broker resources, cause network saturation and generally DOS other clients and the brokers themselves. Having quotas protects against these issues and is all the more important in large multi-tenant clusters where a small set of badly behaved clients can degrade user experience for the well behaved ones. In fact, when running Kafka as a service this even makes it possible to enforce API limits according to an agreed upon contract. + +## Client groups + +The identity of Kafka clients is the user principal which represents an authenticated user in a secure cluster. In a cluster that supports unauthenticated clients, user principal is a grouping of unauthenticated users chosen by the broker using a configurable `PrincipalBuilder`. Client-id is a logical grouping of clients with a meaningful name chosen by the client application. The tuple (user, client-id) defines a secure logical group of clients that share both user principal and client-id. + +Quotas can be applied to (user, client-id), user or client-id groups. For a given connection, the most specific quota matching the connection is applied. All connections of a quota group share the quota configured for the group. For example, if (user="test-user", client-id="test-client") has a produce quota of 10MB/sec, this is shared across all producer instances of user "test-user" with the client-id "test-client". + +## Quota Configuration + +Quota configuration may be defined for (user, client-id), user and client-id groups. It is possible to override the default quota at any of the quota levels that needs a higher (or even lower) quota. The mechanism is similar to the per-topic log config overrides. User and (user, client-id) quota overrides are written to the metadata log. These overrides are read by all brokers and are effective immediately. This lets us change quotas without having to do a rolling restart of the entire cluster. See here for details. Default quotas for each group may also be updated dynamically using the same mechanism. + +The order of precedence for quota configuration is: + + 1. matching user and client-id quotas + 2. matching user and default client-id quotas + 3. matching user quota + 4. default user and matching client-id quotas + 5. default user and default client-id quotas + 6. default user quota + 7. matching client-id quota + 8. default client-id quota + + + +## Network Bandwidth Quotas + +Network bandwidth quotas are defined as the byte rate threshold for each group of clients sharing a quota. By default, each unique client group receives a fixed quota in bytes/sec as configured by the cluster. This quota is defined on a per-broker basis. Each group of clients can publish/fetch a maximum of X bytes/sec per broker before clients are throttled. + +## Request Rate Quotas + +Request rate quotas are defined as the percentage of time a client can utilize on request handler I/O threads and network threads of each broker within a quota window. A quota of `n%` represents `n%` of one thread, so the quota is out of a total capacity of `((num.io.threads + num.network.threads) * 100)%`. Each group of clients may use a total percentage of upto `n%` across all I/O and network threads in a quota window before being throttled. Since the number of threads allocated for I/O and network threads are typically based on the number of cores available on the broker host, request rate quotas represent the total percentage of CPU that may be used by each group of clients sharing the quota. + +## Enforcement + +By default, each unique client group receives a fixed quota as configured by the cluster. This quota is defined on a per-broker basis. Each client can utilize this quota per broker before it gets throttled. We decided that defining these quotas per broker is much better than having a fixed cluster wide bandwidth per client because that would require a mechanism to share client quota usage among all the brokers. This can be harder to get right than the quota implementation itself! + +How does a broker react when it detects a quota violation? In our solution, the broker first computes the amount of delay needed to bring the violating client under its quota and returns a response with the delay immediately. In case of a fetch request, the response will not contain any data. Then, the broker mutes the channel to the client, not to process requests from the client anymore, until the delay is over. Upon receiving a response with a non-zero delay duration, the Kafka client will also refrain from sending further requests to the broker during the delay. Therefore, requests from a throttled client are effectively blocked from both sides. Even with older client implementations that do not respect the delay response from the broker, the back pressure applied by the broker via muting its socket channel can still handle the throttling of badly behaving clients. Those clients who sent further requests to the throttled channel will receive responses only after the delay is over. + +Byte-rate and thread utilization are measured over multiple small windows (e.g. 30 windows of 1 second each) in order to detect and correct quota violations quickly. Typically, having large measurement windows (for e.g. 10 windows of 30 seconds each) leads to large bursts of traffic followed by long delays which is not great in terms of user experience. diff --git a/content/en/41/design/protocol.md b/content/en/41/design/protocol.md new file mode 100644 index 000000000..f28db3b64 --- /dev/null +++ b/content/en/41/design/protocol.md @@ -0,0 +1,203 @@ +--- +title: Protocol +description: +weight: 2 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Kafka protocol guide + +This document covers the wire protocol implemented in Kafka. It is meant to give a readable guide to the protocol that covers the available requests, their binary format, and the proper way to make use of them to implement a client. This document assumes you understand the basic design and terminology described [here](https://kafka.apache.org/documentation.html#design) + + * Preliminaries + * Network + * Partitioning and bootstrapping + * Partitioning Strategies + * Batching + * Versioning and Compatibility + * Retrieving Supported API versions + * SASL Authentication Sequence + * The Protocol + * Protocol Primitive Types + * Notes on reading the request format grammars + * Common Request and Response Structure + * Request and Response Headers + * Record Batch + * Constants + * Error Codes + * Api Keys + * The Messages + * Some Common Philosophical Questions + + + +## Preliminaries + +### Network + +Kafka uses a binary protocol over TCP. The protocol defines all APIs as request response message pairs. All messages are size delimited and are made up of the following primitive types. + +The client initiates a socket connection and then writes a sequence of request messages and reads back the corresponding response message. No handshake is required on connection or disconnection. TCP is happier if you maintain persistent connections used for many requests to amortize the cost of the TCP handshake, but beyond this penalty connecting is pretty cheap. + +The client will likely need to maintain a connection to multiple brokers, as data is partitioned and the clients will need to talk to the server that has their data. However it should not generally be necessary to maintain multiple connections to a single broker from a single client instance (i.e. connection pooling). + +The server guarantees that on a single TCP connection, requests will be processed in the order they are sent and responses will return in that order as well. The broker's request processing allows only a single in-flight request per connection in order to guarantee this ordering. Note that clients can (and ideally should) use non-blocking IO to implement request pipelining and achieve higher throughput. i.e., clients can send requests even while awaiting responses for preceding requests since the outstanding requests will be buffered in the underlying OS socket buffer. All requests are initiated by the client, and result in a corresponding response message from the server except where noted. + +The server has a configurable maximum limit on request size and any request that exceeds this limit will result in the socket being disconnected. + +### Partitioning and bootstrapping + +Kafka is a partitioned system so not all servers have the complete data set. Instead recall that topics are split into a pre-defined number of partitions, P, and each partition is replicated with some replication factor, N. Topic partitions themselves are just ordered "commit logs" numbered 0, 1, ..., P-1. + +All systems of this nature have the question of how a particular piece of data is assigned to a particular partition. Kafka clients directly control this assignment, the brokers themselves enforce no particular semantics of which messages should be published to a particular partition. Rather, to publish messages the client directly addresses messages to a particular partition, and when fetching messages, fetches from a particular partition. If two clients want to use the same partitioning scheme they must use the same method to compute the mapping of key to partition. + +These requests to publish or fetch data must be sent to the broker that is currently acting as the leader for a given partition. This condition is enforced by the broker, so a request for a particular partition to the wrong broker will result in an the NotLeaderForPartition error code (described below). + +How can the client find out which topics exist, what partitions they have, and which brokers currently host those partitions so that it can direct its requests to the right hosts? This information is dynamic, so you can't just configure each client with some static mapping file. Instead all Kafka brokers can answer a metadata request that describes the current state of the cluster: what topics there are, which partitions those topics have, which broker is the leader for those partitions, and the host and port information for these brokers. + +In other words, the client needs to somehow find one broker and that broker will tell the client about all the other brokers that exist and what partitions they host. This first broker may itself go down so the best practice for a client implementation is to take a list of two or three URLs to bootstrap from. The user can then choose to use a load balancer or just statically configure two or three of their Kafka hosts in the clients. + +The client does not need to keep polling to see if the cluster has changed; it can fetch metadata once when it is instantiated cache that metadata until it receives an error indicating that the metadata is out of date. This error can come in two forms: (1) a socket error indicating the client cannot communicate with a particular broker, (2) an error code in the response to a request indicating that this broker no longer hosts the partition for which data was requested. + + 1. Cycle through a list of "bootstrap" Kafka URLs until we find one we can connect to. Fetch cluster metadata. + 2. Process fetch or produce requests, directing them to the appropriate broker based on the topic/partitions they send to or fetch from. + 3. If we get an appropriate error, refresh the metadata and try again. + + + +### Partitioning Strategies + +As mentioned above the assignment of messages to partitions is something the producing client controls. That said, how should this functionality be exposed to the end-user? + +Partitioning really serves two purposes in Kafka: + + 1. It balances data and request load over brokers + 2. It serves as a way to divvy up processing among consumer processes while allowing local state and preserving order within the partition. We call this semantic partitioning. + + + +For a given use case you may care about only one of these or both. + +To accomplish simple load balancing a simple approach would be for the client to just round robin requests over all brokers. Another alternative, in an environment where there are many more producers than brokers, would be to have each client chose a single partition at random and publish to that. This later strategy will result in far fewer TCP connections. + +Semantic partitioning means using some key in the message to assign messages to partitions. For example if you were processing a click message stream you might want to partition the stream by the user id so that all data for a particular user would go to a single consumer. To accomplish this the client can take a key associated with the message and use some hash of this key to choose the partition to which to deliver the message. + +### Batching + +Our APIs encourage batching small things together for efficiency. We have found this is a very significant performance win. Both our API to send messages and our API to fetch messages always work with a sequence of messages not a single message to encourage this. A clever client can make use of this and support an "asynchronous" mode in which it batches together messages sent individually and sends them in larger clumps. We go even further with this and allow the batching across multiple topics and partitions, so a produce request may contain data to append to many partitions and a fetch request may pull data from many partitions all at once. + +The client implementer can choose to ignore this and send everything one at a time if they like. + +### Compatibility + +Kafka has a "bidirectional" client compatibility policy. In other words, new clients can talk to old servers, and old clients can talk to new servers. This allows users to upgrade either clients or servers without experiencing any downtime. + +Since the Kafka protocol has changed over time, clients and servers need to agree on the schema of the message that they are sending over the wire. This is done through API versioning. + +Before each request is sent, the client sends the API key and the API version. These two 16-bit numbers, when taken together, uniquely identify the schema of the message to follow. + +The intention is that clients will support a range of API versions. When communicating with a particular broker, a given client should use the highest API version supported by both and indicate this version in their requests. + +The server will reject requests with a version it does not support, and will always respond to the client with exactly the protocol format it expects based on the version it included in its request. The intended upgrade path is that new features would first be rolled out on the server (with the older clients not making use of them) and then as newer clients are deployed these new features would gradually be taken advantage of. Note there is an exceptional case while retrieving supported API versions where the server can respond with a different version. + +Note that [KIP-482 tagged fields](https://cwiki.apache.org/confluence/x/OhMyBw) can be added to a request without incrementing the version number. This offers an additional way of evolving the message schema without breaking compatibility. Tagged fields do not take up any space when the field is not set. Therefore, if a field is rarely used, it is more efficient to make it a tagged field than to put it in the mandatory schema. However, tagged fields are ignored by recipients that don't know about them, which could pose a challenge if this is not the behavior that the sender wants. In such cases, a version bump may be more appropriate. + +### Retrieving Supported API versions + +In order to work against multiple broker versions, clients need to know what versions of various APIs a broker supports. The broker exposes this information since 0.10.0.0 as described in [KIP-35](https://cwiki.apache.org/confluence/x/KK6nAw). Clients should use the supported API versions information to choose the highest API version supported by both client and broker. If no such version exists, an error should be reported to the user. + +The following sequence may be used by a client to obtain supported API versions from a broker. + + 1. Client sends `ApiVersionsRequest` to a broker after connection has been established with the broker. If SSL is enabled, this happens after SSL connection has been established. + 2. On receiving `ApiVersionsRequest`, a broker returns its full list of supported ApiKeys and versions regardless of current authentication state (e.g., before SASL authentication on an SASL listener, do note that no Kafka protocol requests may take place on an SSL listener before the SSL handshake is finished). If this is considered to leak information about the broker version a workaround is to use SSL with client authentication which is performed at an earlier stage of the connection where the `ApiVersionRequest` is not available. Also, note that broker versions older than 0.10.0.0 do not support this API and will either ignore the request or close connection in response to the request. Also note that if the client `ApiVersionsRequest` version is unsupported by the broker (client is ahead), and the broker version is 2.4.0 or greater, then the broker will respond with a version 0 ApiVersionsResponse with the error code set to `UNSUPPORTED_VERSION` and the `api_versions` field populated with the supported version of the `ApiVersionsRequest`. It is then up to the client to retry, making another `ApiVersionsRequest` using the highest version supported by the client and broker. See [KIP-511: Collect and Expose Client's Name and Version in the Brokers](https://cwiki.apache.org/confluence/x/qRJ4Bw) + 3. If multiple versions of an API are supported by broker and client, clients are recommended to use the latest version supported by the broker and itself. + 4. Deprecation of a protocol version is done by marking an API version as deprecated in the protocol documentation. + 5. Supported API versions obtained from a broker are only valid for the connection on which that information is obtained. In the event of disconnection, the client should obtain the information from the broker again, as the broker might have been upgraded/downgraded in the mean time. + + + +### SASL Authentication Sequence + +The following sequence is used for SASL authentication: + + 1. Kafka `ApiVersionsRequest` may be sent by the client to obtain the version ranges of requests supported by the broker. This is optional. + 2. Kafka `SaslHandshakeRequest` containing the SASL mechanism for authentication is sent by the client. If the requested mechanism is not enabled in the server, the server responds with the list of supported mechanisms and closes the client connection. If the mechanism is enabled in the server, the server sends a successful response and continues with SASL authentication. + 3. The actual SASL authentication is now performed. If `SaslHandshakeRequest` version is v0, a series of SASL client and server tokens corresponding to the mechanism are sent as opaque packets without wrapping the messages with Kafka protocol headers. If `SaslHandshakeRequest` version is v1, the `SaslAuthenticate` request/response are used, where the actual SASL tokens are wrapped in the Kafka protocol. The error code in the final message from the broker will indicate if authentication succeeded or failed. + 4. If authentication succeeds, subsequent packets are handled as Kafka API requests. Otherwise, the client connection is closed. + + + +For interoperability with 0.9.0.x clients, the first packet received by the server is handled as a SASL/GSSAPI client token if it is not a valid Kafka request. SASL/GSSAPI authentication is performed starting with this packet, skipping the first two steps above. + +## The Protocol + +### Protocol Primitive Types + +The protocol is built out of the following primitive types. + +{{< include-html file="/static/41/generated/protocol_types.html" >}} + +### Notes on reading the request format grammars + +The [BNF](https://en.wikipedia.org/wiki/Backus%E2%80%93Naur_Form)s below give an exact context free grammar for the request and response binary format. The BNF is intentionally not compact in order to give human-readable name. As always in a BNF a sequence of productions indicates concatenation. When there are multiple possible productions these are separated with '|' and may be enclosed in parenthesis for grouping. The top-level definition is always given first and subsequent sub-parts are indented. + +### Common Request and Response Structure + +All requests and responses originate from the following grammar which will be incrementally describe through the rest of this document: + + + RequestOrResponse => Size (RequestMessage | ResponseMessage) + Size => int32 + +Field| Description +---|--- +message_size| The message_size field gives the size of the subsequent request or response message in bytes. The client can read requests by first reading this 4 byte size as an integer N, and then reading and parsing the subsequent N bytes of the request. + +### Request and Response Headers + +Different request and response versions require different versions of the corresponding headers. These header versions are specified below together with API message descriptions. + +### Record Batch + +A description of the record batch format can be found [here](/#recordbatch). + +## Constants + +### Error Codes + +We use numeric codes to indicate what problem occurred on the server. These can be translated by the client into exceptions or whatever the appropriate error handling mechanism in the client language. Here is a table of the error codes currently in use: + +{{< include-html file="/static/41/generated/protocol_errors.html" >}} + +### Api Keys + +The following are the numeric codes that the stable ApiKey in the request can take for each of the below request types. + +{{< include-html file="/static/41/generated/protocol_api_keys.html" >}} + +## The Messages + +This section gives details on each of the individual API Messages, their usage, their binary format, and the meaning of their fields. + +The message consists of the header and body: + + + Message => RequestOrResponseHeader Body + + +`RequestOrResponseHeader` is the versioned request or response header. `Body` is the message-specific body. + +{{< include-html file="/static/41/generated/protocol_messages.html" >}} + +## Some Common Philosophical Questions + +Some people have asked why we don't use HTTP. There are a number of reasons, the best is that client implementors can make use of some of the more advanced TCP features--the ability to multiplex requests, the ability to simultaneously poll many connections, etc. We have also found HTTP libraries in many languages to be surprisingly shabby. + +Others have asked if maybe we shouldn't support many different protocols. Prior experience with this was that it makes it very hard to add and test new features if they have to be ported across many protocol implementations. Our feeling is that most users don't really see multiple protocols as a feature, they just want a good reliable client in the language of their choice. + +Another question is why we don't adopt XMPP, STOMP, AMQP or an existing protocol. The answer to this varies by protocol, but in general the problem is that the protocol does determine large parts of the implementation and we couldn't do what we are doing if we didn't have control over the protocol. Our belief is that it is possible to do better than existing messaging systems have in providing a truly distributed messaging system, and to do this we need to build something that works differently. + +A final question is why we don't use a system like Protocol Buffers or Thrift to define our request messages. These packages excel at helping you to managing lots and lots of serialized messages. However we have only a few messages. Support across languages is somewhat spotty (depending on the package). Finally the mapping between binary log format and wire protocol is something we manage somewhat carefully and this would not be possible with these systems. Finally we prefer the style of versioning APIs explicitly and checking this to inferring new values as nulls as it allows more nuanced control of compatibility. diff --git a/content/en/41/getting-started/_index.md b/content/en/41/getting-started/_index.md new file mode 100644 index 000000000..c5eccc36a --- /dev/null +++ b/content/en/41/getting-started/_index.md @@ -0,0 +1,10 @@ +--- +title: Getting Started +description: This section provides an overview of what Kafka is, why it is useful, and how to get started using it. +weight: 1 +tags: ['kafka', 'docs', 'getting-started'] +aliases: +keywords: +type: docs +--- + diff --git a/content/en/41/getting-started/compatibility.md b/content/en/41/getting-started/compatibility.md new file mode 100644 index 000000000..4bc1417ad --- /dev/null +++ b/content/en/41/getting-started/compatibility.md @@ -0,0 +1,56 @@ +--- +title: Compatibility +description: +weight: 7 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Compatibility + +With the release of Kafka 4.0, significant changes have been introduced that impact compatibility across various components. To assist users in planning upgrades and ensuring seamless interoperability, a comprehensive compatibility matrix has been prepared. + +# JDK Compatibility Across Kafka Versions + +Module | Kafka Version | Java 11 | Java 17 | Java 23 +---|---|---|---|--- +Clients | 4.0.0 | ✅ | ✅ | ✅ +Streams | 4.0.0 | ✅ | ✅ | ✅ +Connect | 4.0.0 | ❌ | ✅ | ✅ +Server | 4.0.0 | ❌ | ✅ | ✅ + +**Note: Java 8 is removed in Kafka 4.0 and is no longer supported.** + +# Server Compatibility + +KRaft Cluster Version | Compatibility 4.0 Server (dynamic voter) | Compatibility 4.0 Server (static voter) +---|---|--- +before 3.2.x | ❌ | ❌ +3.3.x | ❌ | ✅ +3.4.x | ❌ | ✅ +3.5.x | ❌ | ✅ +3.6.x | ❌ | ✅ +3.7.x | ❌ | ✅ +3.8.x | ❌ | ✅ +3.9.x | ✅ | ✅ +4.0.x | ✅ | ✅ + +**Note: Can’t upgrade server from static voter to dynamic voter, see[KAFKA-16538](https://issues.apache.org/jira/browse/KAFKA-16538).** + +## Client/Broker Forward Compatibility + +Kafka Version | Module | Compatibility with Kafka 4.0 | Key Differences/Limitations +---|---|---|--- +0.x, 1.x, 2.0 | Client | ❌ Not Compatible | Pre-0.10.x protocols are fully removed in Kafka 4.0 ([KIP-896](https://cwiki.apache.org/confluence/x/K5sODg)). +Streams | ❌ Not Compatible | Pre-0.10.x protocols are fully removed in Kafka 4.0 ([KIP-896](https://cwiki.apache.org/confluence/x/K5sODg)). +Connect | ❌ Not Compatible | Pre-0.10.x protocols are fully removed in Kafka 4.0 ([KIP-896](https://cwiki.apache.org/confluence/x/K5sODg)). +2.1 ~ 2.8 | Client | ⚠️ Partially Compatible | More details in the [Consumer](/40/documentation.html#upgrade_400_notable_consumer), [Producer](/40/documentation.html#upgrade_400_notable_producer), and [Admin Client](/40/documentation.html#upgrade_400_notable_admin_client) section. +Streams | ⚠️ Limited Compatibility | More details in the [Kafka Streams](/40/documentation.html#upgrade_400_notable_kafka_streams) section. +Connect | ⚠️ Limited Compatibility | More details in the [Connect](/40/documentation.html#upgrade_400_notable_connect) section. +3.x | Client | ✅ Fully Compatible | +Streams | ✅ Fully Compatible | +Connect | ✅ Fully Compatible | + +Note: Starting with Kafka 4.0, the `--zookeeper` option in AdminClient commands has been removed. Users must use the `--bootstrap-server` option to interact with the Kafka cluster. This change aligns with the transition to KRaft mode. diff --git a/content/en/41/getting-started/docker.md b/content/en/41/getting-started/docker.md new file mode 100644 index 000000000..50e96b527 --- /dev/null +++ b/content/en/41/getting-started/docker.md @@ -0,0 +1,52 @@ +--- +title: Docker +description: +weight: 8 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +## JVM Based Apache Kafka Docker Image + +[Docker](https://www.docker.com/) is a popular container runtime. Docker images for the JVM based Apache Kafka can be found on [Docker Hub](https://hub.docker.com/r/apache/kafka) and are available from version 3.7.0. + +Docker image can be pulled from Docker Hub using the following command: + + + $ docker pull apache/kafka:4.1.0 + +If you want to fetch the latest version of the Docker image use following command: + + + $ docker pull apache/kafka:latest + +To start the Kafka container using this Docker image with default configs and on default port 9092: + + + $ docker run -p 9092:9092 apache/kafka:4.1.0 + +## GraalVM Based Native Apache Kafka Docker Image + +Docker images for the GraalVM Based Native Apache Kafka can be found on [Docker Hub](https://hub.docker.com/r/apache/kafka-native) and are available from version 3.8.0. +NOTE: This image is experimental and intended for local development and testing purposes only; it is not recommended for production use. + +Docker image can be pulled from Docker Hub using the following command: + + + $ docker pull apache/kafka-native:4.1.0 + +If you want to fetch the latest version of the Docker image use following command: + + + $ docker pull apache/kafka-native:latest + +To start the Kafka container using this Docker image with default configs and on default port 9092: + + + $ docker run -p 9092:9092 apache/kafka-native:4.1.0 + +## Usage guide + +Detailed instructions for using the Docker image are mentioned [here](https://github.com/apache/kafka/blob/trunk/docker/examples/README.md). diff --git a/content/en/41/getting-started/ecosystem.md b/content/en/41/getting-started/ecosystem.md new file mode 100644 index 000000000..5d916e268 --- /dev/null +++ b/content/en/41/getting-started/ecosystem.md @@ -0,0 +1,11 @@ +--- +title: Ecosystem +description: +weight: 4 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +There are a plethora of tools that integrate with Kafka outside the main distribution. The [ ecosystem page](https://cwiki.apache.org/confluence/x/Ri3VAQ) lists many of these, including stream processing systems, Hadoop integration, monitoring, and deployment tools. diff --git a/content/en/41/getting-started/introduction.md b/content/en/41/getting-started/introduction.md new file mode 100644 index 000000000..069e9fcb0 --- /dev/null +++ b/content/en/41/getting-started/introduction.md @@ -0,0 +1,92 @@ +--- +title: Introduction +description: +weight: 1 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +## What is event streaming? + +Event streaming is the digital equivalent of the human body's central nervous system. It is the technological foundation for the 'always-on' world where businesses are increasingly software-defined and automated, and where the user of software is more software. + +Technically speaking, event streaming is the practice of capturing data in real-time from event sources like databases, sensors, mobile devices, cloud services, and software applications in the form of streams of events; storing these event streams durably for later retrieval; manipulating, processing, and reacting to the event streams in real-time as well as retrospectively; and routing the event streams to different destination technologies as needed. Event streaming thus ensures a continuous flow and interpretation of data so that the right information is at the right place, at the right time. + +## What can I use event streaming for? + +Event streaming is applied to a [wide variety of use cases](/powered-by) across a plethora of industries and organizations. Its many examples include: + + * To process payments and financial transactions in real-time, such as in stock exchanges, banks, and insurances. + * To track and monitor cars, trucks, fleets, and shipments in real-time, such as in logistics and the automotive industry. + * To continuously capture and analyze sensor data from IoT devices or other equipment, such as in factories and wind parks. + * To collect and immediately react to customer interactions and orders, such as in retail, the hotel and travel industry, and mobile applications. + * To monitor patients in hospital care and predict changes in condition to ensure timely treatment in emergencies. + * To connect, store, and make available data produced by different divisions of a company. + * To serve as the foundation for data platforms, event-driven architectures, and microservices. + + + +## Apache Kafka® is an event streaming platform. What does that mean? + +Kafka combines three key capabilities so you can implement [your use cases](/powered-by) for event streaming end-to-end with a single battle-tested solution: + + 1. To **publish** (write) and **subscribe to** (read) streams of events, including continuous import/export of your data from other systems. + 2. To **store** streams of events durably and reliably for as long as you want. + 3. To **process** streams of events as they occur or retrospectively. + + + +And all this functionality is provided in a distributed, highly scalable, elastic, fault-tolerant, and secure manner. Kafka can be deployed on bare-metal hardware, virtual machines, and containers, and on-premises as well as in the cloud. You can choose between self-managing your Kafka environments and using fully managed services offered by a variety of vendors. + +## How does Kafka work in a nutshell? + +Kafka is a distributed system consisting of **servers** and **clients** that communicate via a high-performance [TCP network protocol](/protocol.html). It can be deployed on bare-metal hardware, virtual machines, and containers in on-premise as well as cloud environments. + +**Servers** : Kafka is run as a cluster of one or more servers that can span multiple datacenters or cloud regions. Some of these servers form the storage layer, called the brokers. Other servers run [Kafka Connect](/#connect) to continuously import and export data as event streams to integrate Kafka with your existing systems such as relational databases as well as other Kafka clusters. To let you implement mission-critical use cases, a Kafka cluster is highly scalable and fault-tolerant: if any of its servers fails, the other servers will take over their work to ensure continuous operations without any data loss. + +**Clients** : They allow you to write distributed applications and microservices that read, write, and process streams of events in parallel, at scale, and in a fault-tolerant manner even in the case of network problems or machine failures. Kafka ships with some such clients included, which are augmented by [dozens of clients](https://cwiki.apache.org/confluence/x/3gDVAQ) provided by the Kafka community: clients are available for Java and Scala including the higher-level [Kafka Streams](/streams/) library, for Go, Python, C/C++, and many other programming languages as well as REST APIs. + +## Main Concepts and Terminology + +An **event** records the fact that "something happened" in the world or in your business. It is also called record or message in the documentation. When you read or write data to Kafka, you do this in the form of events. Conceptually, an event has a key, value, timestamp, and optional metadata headers. Here's an example event: + + * Event key: "Alice" + * Event value: "Made a payment of $200 to Bob" + * Event timestamp: "Jun. 25, 2020 at 2:06 p.m." + + + +**Producers** are those client applications that publish (write) events to Kafka, and **consumers** are those that subscribe to (read and process) these events. In Kafka, producers and consumers are fully decoupled and agnostic of each other, which is a key design element to achieve the high scalability that Kafka is known for. For example, producers never need to wait for consumers. Kafka provides various [guarantees](/#semantics) such as the ability to process events exactly-once. + +Events are organized and durably stored in **topics**. Very simplified, a topic is similar to a folder in a filesystem, and the events are the files in that folder. An example topic name could be "payments". Topics in Kafka are always multi-producer and multi-subscriber: a topic can have zero, one, or many producers that write events to it, as well as zero, one, or many consumers that subscribe to these events. Events in a topic can be read as often as needed—unlike traditional messaging systems, events are not deleted after consumption. Instead, you define for how long Kafka should retain your events through a per-topic configuration setting, after which old events will be discarded. Kafka's performance is effectively constant with respect to data size, so storing data for a long time is perfectly fine. + +Topics are **partitioned** , meaning a topic is spread over a number of "buckets" located on different Kafka brokers. This distributed placement of your data is very important for scalability because it allows client applications to both read and write the data from/to many brokers at the same time. When a new event is published to a topic, it is actually appended to one of the topic's partitions. Events with the same event key (e.g., a customer or vehicle ID) are written to the same partition, and Kafka [guarantees](/#semantics) that any consumer of a given topic-partition will always read that partition's events in exactly the same order as they were written. + +![](/images/streams-and-tables-p1_p4.png) Figure: This example topic has four partitions P1–P4. Two different producer clients are publishing, independently from each other, new events to the topic by writing events over the network to the topic's partitions. Events with the same key (denoted by their color in the figure) are written to the same partition. Note that both producers can write to the same partition if appropriate. + +To make your data fault-tolerant and highly-available, every topic can be **replicated** , even across geo-regions or datacenters, so that there are always multiple brokers that have a copy of the data just in case things go wrong, you want to do maintenance on the brokers, and so on. A common production setting is a replication factor of 3, i.e., there will always be three copies of your data. This replication is performed at the level of topic-partitions. + +This primer should be sufficient for an introduction. The [Design](/#design) section of the documentation explains Kafka's various concepts in full detail, if you are interested. + +## Kafka APIs + +In addition to command line tooling for management and administration tasks, Kafka has five core APIs for Java and Scala: + + * The [Admin API](/documentation.html#adminapi) to manage and inspect topics, brokers, and other Kafka objects. + * The [Producer API](/documentation.html#producerapi) to publish (write) a stream of events to one or more Kafka topics. + * The [Consumer API](/documentation.html#consumerapi) to subscribe to (read) one or more topics and to process the stream of events produced to them. + * The [Kafka Streams API](/streams) to implement stream processing applications and microservices. It provides higher-level functions to process event streams, including transformations, stateful operations like aggregations and joins, windowing, processing based on event-time, and more. Input is read from one or more topics in order to generate output to one or more topics, effectively transforming the input streams to output streams. + * The [Kafka Connect API](/documentation.html#connect) to build and run reusable data import/export connectors that consume (read) or produce (write) streams of events from and to external systems and applications so they can integrate with Kafka. For example, a connector to a relational database like PostgreSQL might capture every change to a set of tables. However, in practice, you typically don't need to implement your own connectors because the Kafka community already provides hundreds of ready-to-use connectors. + + + +## Where to go from here + + * To get hands-on experience with Kafka, follow the [Quickstart](/quickstart). + * To understand Kafka in more detail, read the [Documentation](/). You also have your choice of [Kafka books and academic papers](/books-and-papers). + * Browse through the [Use Cases](/powered-by) to learn how other users in our world-wide community are getting value out of Kafka. + * Join a [local Kafka meetup group](/events) and [watch talks from Kafka Summit](https://kafka-summit.org/past-events/), the main conference of the Kafka community. + + diff --git a/content/en/41/getting-started/quickstart.md b/content/en/41/getting-started/quickstart.md new file mode 100644 index 000000000..46319932b --- /dev/null +++ b/content/en/41/getting-started/quickstart.md @@ -0,0 +1,210 @@ +--- +title: Quick Start +description: +weight: 3 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +## Step 1: Get Kafka + +[Download](https://www.apache.org/dyn/closer.cgi?path=/kafka/4.1.0/kafka_2.13-4.1.0.tgz) the latest Kafka release and extract it: + + + $ tar -xzf kafka_2.13-4.1.0.tgz + $ cd kafka_2.13-4.1.0 + +## Step 2: Start the Kafka environment + +NOTE: Your local environment must have Java 17+ installed. + +Kafka can be run using local scripts and downloaded files or the docker image. + +### Using downloaded files + +Generate a Cluster UUID + + + $ KAFKA_CLUSTER_ID="$(bin/kafka-storage.sh random-uuid)" + +Format Log Directories + + + $ bin/kafka-storage.sh format --standalone -t $KAFKA_CLUSTER_ID -c config/server.properties + +Start the Kafka Server + + + $ bin/kafka-server-start.sh config/server.properties + +Once the Kafka server has successfully launched, you will have a basic Kafka environment running and ready to use. + +### Using JVM Based Apache Kafka Docker Image + +Get the Docker image: + + + $ docker pull apache/kafka:4.1.0 + +Start the Kafka Docker container: + + + $ docker run -p 9092:9092 apache/kafka:4.1.0 + +### Using GraalVM Based Native Apache Kafka Docker Image + +Get the Docker image: + + + $ docker pull apache/kafka-native:4.1.0 + +Start the Kafka Docker container: + + + $ docker run -p 9092:9092 apache/kafka-native:4.1.0 + +## Step 3: Create a topic to store your events + +Kafka is a distributed _event streaming platform_ that lets you read, write, store, and process [_events_](/#messages) (also called _records_ or _messages_ in the documentation) across many machines. + +Example events are payment transactions, geolocation updates from mobile phones, shipping orders, sensor measurements from IoT devices or medical equipment, and much more. These events are organized and stored in [_topics_](/#intro_concepts_and_terms). Very simplified, a topic is similar to a folder in a filesystem, and the events are the files in that folder. + +So before you can write your first events, you must create a topic. Open another terminal session and run: + + + $ bin/kafka-topics.sh --create --topic quickstart-events --bootstrap-server localhost:9092 + +All of Kafka's command line tools have additional options: run the `kafka-topics.sh` command without any arguments to display usage information. For example, it can also show you [details such as the partition count](/#intro_concepts_and_terms) of the new topic: + + + $ bin/kafka-topics.sh --describe --topic quickstart-events --bootstrap-server localhost:9092 + Topic: quickstart-events TopicId: NPmZHyhbR9y00wMglMH2sg PartitionCount: 1 ReplicationFactor: 1 Configs: + Topic: quickstart-events Partition: 0 Leader: 0 Replicas: 0 Isr: 0 + +## Step 4: Write some events into the topic + +A Kafka client communicates with the Kafka brokers via the network for writing (or reading) events. Once received, the brokers will store the events in a durable and fault-tolerant manner for as long as you need—even forever. + +Run the console producer client to write a few events into your topic. By default, each line you enter will result in a separate event being written to the topic. + + + $ bin/kafka-console-producer.sh --topic quickstart-events --bootstrap-server localhost:9092 + >This is my first event + >This is my second event + +You can stop the producer client with `Ctrl-C` at any time. + +## Step 5: Read the events + +Open another terminal session and run the console consumer client to read the events you just created: + + + $ bin/kafka-console-consumer.sh --topic quickstart-events --from-beginning --bootstrap-server localhost:9092 + This is my first event + This is my second event + +You can stop the consumer client with `Ctrl-C` at any time. + +Feel free to experiment: for example, switch back to your producer terminal (previous step) to write additional events, and see how the events immediately show up in your consumer terminal. + +Because events are durably stored in Kafka, they can be read as many times and by as many consumers as you want. You can easily verify this by opening yet another terminal session and re-running the previous command again. + +## Step 6: Import/export your data as streams of events with Kafka Connect + +You probably have lots of data in existing systems like relational databases or traditional messaging systems, along with many applications that already use these systems. [Kafka Connect](/#connect) allows you to continuously ingest data from external systems into Kafka, and vice versa. It is an extensible tool that runs _connectors_ , which implement the custom logic for interacting with an external system. It is thus very easy to integrate existing systems with Kafka. To make this process even easier, there are hundreds of such connectors readily available. + +In this quickstart we'll see how to run Kafka Connect with simple connectors that import data from a file to a Kafka topic and export data from a Kafka topic to a file. + +First, make sure to add `connect-file-4.1.0.jar` to the `plugin.path` property in the Connect worker's configuration. For the purpose of this quickstart we'll use a relative path and consider the connectors' package as an uber jar, which works when the quickstart commands are run from the installation directory. However, it's worth noting that for production deployments using absolute paths is always preferable. See [plugin.path](/#connectconfigs_plugin.path) for a detailed description of how to set this config. + +Edit the `config/connect-standalone.properties` file, add or change the `plugin.path` configuration property match the following, and save the file: + + + $ echo "plugin.path=libs/connect-file-4.1.0.jar" >> config/connect-standalone.properties + +Then, start by creating some seed data to test with: + + + $ echo -e "foo + bar" > test.txt + +Or on Windows: + + + $ echo foo > test.txt + $ echo bar >> test.txt + +Next, we'll start two connectors running in _standalone_ mode, which means they run in a single, local, dedicated process. We provide three configuration files as parameters. The first is always the configuration for the Kafka Connect process, containing common configuration such as the Kafka brokers to connect to and the serialization format for data. The remaining configuration files each specify a connector to create. These files include a unique connector name, the connector class to instantiate, and any other configuration required by the connector. + + + $ bin/connect-standalone.sh config/connect-standalone.properties config/connect-file-source.properties config/connect-file-sink.properties + +These sample configuration files, included with Kafka, use the default local cluster configuration you started earlier and create two connectors: the first is a source connector that reads lines from an input file and produces each to a Kafka topic and the second is a sink connector that reads messages from a Kafka topic and produces each as a line in an output file. + +During startup you'll see a number of log messages, including some indicating that the connectors are being instantiated. Once the Kafka Connect process has started, the source connector should start reading lines from `test.txt` and producing them to the topic `connect-test`, and the sink connector should start reading messages from the topic `connect-test` and write them to the file `test.sink.txt`. We can verify the data has been delivered through the entire pipeline by examining the contents of the output file: + + + $ more test.sink.txt + foo + bar + +Note that the data is being stored in the Kafka topic `connect-test`, so we can also run a console consumer to see the data in the topic (or use custom consumer code to process it): + + + $ bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic connect-test --from-beginning + {"schema":{"type":"string","optional":false},"payload":"foo"} + {"schema":{"type":"string","optional":false},"payload":"bar"} + … + +The connectors continue to process data, so we can add data to the file and see it move through the pipeline: + + + $ echo "Another line" >> test.txt + +You should see the line appear in the console consumer output and in the sink file. + +## Step 7: Process your events with Kafka Streams + +Once your data is stored in Kafka as events, you can process the data with the [Kafka Streams](/streams) client library for Java/Scala. It allows you to implement mission-critical real-time applications and microservices, where the input and/or output data is stored in Kafka topics. Kafka Streams combines the simplicity of writing and deploying standard Java and Scala applications on the client side with the benefits of Kafka's server-side cluster technology to make these applications highly scalable, elastic, fault-tolerant, and distributed. The library supports exactly-once processing, stateful operations and aggregations, windowing, joins, processing based on event-time, and much more. + +To give you a first taste, here's how one would implement the popular `WordCount` algorithm: + + + KStream textLines = builder.stream("quickstart-events"); + + KTable wordCounts = textLines + .flatMapValues(line -> Arrays.asList(line.toLowerCase().split(" "))) + .groupBy((keyIgnored, word) -> word) + .count(); + + wordCounts.toStream().to("output-topic", Produced.with(Serdes.String(), Serdes.Long())); + +The [Kafka Streams demo](/streams/quickstart) and the [app development tutorial](/41/streams/tutorial) demonstrate how to code and run such a streaming application from start to finish. + +## Step 8: Terminate the Kafka environment + +Now that you reached the end of the quickstart, feel free to tear down the Kafka environment—or continue playing around. + + 1. Stop the producer and consumer clients with `Ctrl-C`, if you haven't done so already. + 2. Stop the Kafka broker with `Ctrl-C`. + + + +If you also want to delete any data of your local Kafka environment including any events you have created along the way, run the command: + + + $ rm -rf /tmp/kafka-logs /tmp/kraft-combined-logs + +## Congratulations! + +You have successfully finished the Apache Kafka quickstart. + +To learn more, we suggest the following next steps: + + * Read through the brief [Introduction](/intro) to learn how Kafka works at a high level, its main concepts, and how it compares to other technologies. To understand Kafka in more detail, head over to the [Documentation](/). + * Browse through the [Use Cases](/powered-by) to learn how other users in our world-wide community are getting value out of Kafka. + * Join a [local Kafka meetup group](/events) and [watch talks from Kafka Summit](https://kafka-summit.org/past-events/), the main conference of the Kafka community. + + diff --git a/content/en/41/getting-started/upgrade.md b/content/en/41/getting-started/upgrade.md new file mode 100644 index 000000000..ab2281797 --- /dev/null +++ b/content/en/41/getting-started/upgrade.md @@ -0,0 +1,173 @@ +--- +title: Upgrading +description: +weight: 5 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +## Upgrading to 4.1.0 + +### Upgrading Servers to 4.1.0 from any version 3.3.x through 4.0.x + +### Notable changes in 4.1.0 + + * Apache Kafka 4.1 ships with a preview of Queues for Kafka ([KIP-932](https://cwiki.apache.org/confluence/x/4hA0Dw)). This feature introduces a new kind of group called share groups, as an alternative to consumer groups. Consumers in a share group cooperatively consume records from topics, without assigning each partition to just one consumer. Share groups also introduce per-record acknowledgement and counting of delivery attempts. Use share groups in cases where records are processed one at a time, rather than as part of an ordered stream. To enable share groups, use the `kafka-features.sh` tool to upgrade to `share.version=1`. For more information, please read the [ release notes](https://cwiki.apache.org/confluence/x/CIq3FQ). + * **Common** + * The logger class name for LogCleaner has been updated from `kafka.log.LogCleaner` to `org.apache.kafka.storage.internals.log.LogCleaner` in the log4j2.yaml configuration file. Added loggers for `org.apache.kafka.storage.internals.log.LogCleaner$CleanerThread` and `org.apache.kafka.storage.internals.log.Cleaner` classes to CleanerAppender. + * The filename for rotated `state-change.log` files has been updated from `stage-change.log.[date]` to `state-change.log.[date]` in the log4j2.yaml configuration file. + * **Broker** + * The configuration `log.cleaner.enable` is deprecated. Users should no longer set it to `false` to prepare for future removal. After the removal, `log.cleaner.threads` will also have a lower bound of 1. For further details, please refer to [KIP-1148](https://cwiki.apache.org/confluence/x/XAyWF). + * The KIP-966 part 1: Eligible Leader Replicas(ELR) will be enabled by default on the new clusters. After the ELR feature enabled, the previously set `min.insync.replicas` value at the broker-level config will be removed. Please set at the cluster-level if necessary. For further details, please refer to [here](/41/documentation.html#eligible_leader_replicas). + * **Producer** + * The `flush` method now detects potential deadlocks and prohibits its use inside a callback. This change prevents unintended blocking behavior, which was a known risk in earlier versions. + * **Command** + * The `force` option of `ConfigCommand` has been removed, as it has been non-operational since version 0.10.1.0. + * **Admin** + * The `listConsumerGroups()` and `listConsumerGroups(ListConsumerGroupsOptions)` methods in `Admin` are deprecated, and will be removed in the next major version. Use `Admin.listGroups(ListGroupsOptions.forConsumerGroups())` instead. + * **Kafka Streams** + * The `window.size.ms` and `window.inner.serde.class` in `StreamsConfig` are deprecated. Use the corresponding string constants defined in `TimeWindowedSerializer`, `TimeWindowedDeserializer`, `SessionWindowedSerializer` and `SessionWindowedDeserializer` instead. + + + +## Upgrading to 4.0.1 + +### Upgrading Clients to 4.0.1 + +**For a rolling upgrade:** + + 1. Upgrade the clients one at a time: shut down the client, update the code, and restart it. + 2. Clients (including Streams and Connect) must be on version 2.1 or higher before upgrading to 4.0. Many deprecated APIs were removed in Kafka 4.0. For more information about the compatibility, please refer to the [compatibility matrix](/41/compatibility.html) or [KIP-1124](https://cwiki.apache.org/confluence/x/y4kgF). + + + +### Upgrading Servers to 4.0.1 from any version 3.3.x through 3.9.x + +Note: Apache Kafka 4.0 only supports KRaft mode - ZooKeeper mode has been removed. As such, **broker upgrades to 4.0.0 (and higher) require KRaft mode and the software and metadata versions must be at least 3.3.x** (the first version when KRaft mode was deemed production ready). For clusters in KRaft mode with versions older than 3.3.x, we recommend upgrading to 3.9.x before upgrading to 4.0.x. Clusters in ZooKeeper mode have to be [migrated to KRaft mode](/41/documentation.html#kraft_zk_migration) before they can be upgraded to 4.0.x. + +**For a rolling upgrade:** + + 1. Upgrade the brokers one at a time: shut down the broker, update the code, and restart it. Once you have done so, the brokers will be running the latest version and you can verify that the cluster's behavior and performance meet expectations. + 2. Once the cluster's behavior and performance have been verified, finalize the upgrade by running ` bin/kafka-features.sh --bootstrap-server localhost:9092 upgrade --release-version 4.0 ` + 3. Note that cluster metadata downgrade is not supported in this version since it has metadata changes. Every [MetadataVersion](https://github.com/apache/kafka/blob/trunk/server-common/src/main/java/org/apache/kafka/server/common/MetadataVersion.java) has a boolean parameter that indicates if there are metadata changes (i.e. `IBP_4_0_IV1(23, "4.0", "IV1", true)` means this version has metadata changes). Given your current and target versions, a downgrade is only possible if there are no metadata changes in the versions between. + + + +### Notable changes in 4.0.1 + + * The filename for rotated `state-change.log` files has been updated from `stage-change.log.[date]` to `state-change.log.[date]` in the log4j2.yaml configuration file. See [KAFKA-19576](https://issues.apache.org/jira/browse/KAFKA-19576) for details. + + + +### Notable changes in 4.0.0 + + * Old protocol API versions have been removed. Users should ensure brokers are version 2.1 or higher before upgrading Java clients (including Connect and Kafka Streams which use the clients internally) to 4.0. Similarly, users should ensure their Java clients (including Connect and Kafka Streams) version is 2.1 or higher before upgrading brokers to 4.0. Finally, care also needs to be taken when it comes to kafka clients that are not part of Apache Kafka, please see [KIP-896](https://cwiki.apache.org/confluence/x/K5sODg) for the details. + * Apache Kafka 4.0 only supports KRaft mode - ZooKeeper mode has been removed. About version upgrade, check [Upgrading to 4.0.1 from any version 3.3.x through 3.9.x](/41/documentation.html#upgrade_4_0_1) for more info. + * Apache Kafka 4.0 ships with a brand-new group coordinator implementation (See [here](https://cwiki.apache.org/confluence/x/HhD1D)). Functionally speaking, it implements all the same APIs. There are reasonable defaults, but the behavior of the new group coordinator can be tuned by setting the configurations with prefix `group.coordinator`. + * The Next Generation of the Consumer Rebalance Protocol ([KIP-848](https://cwiki.apache.org/confluence/x/HhD1D)) is now Generally Available (GA) in Apache Kafka 4.0. The protocol is automatically enabled on the server when the upgrade to 4.0 is finalized. Note that once the new protocol is used by consumer groups, the cluster can only be downgraded to version 3.4.1 or newer. For more information check [here](/41/documentation.html#consumer_rebalance_protocol). + * Transactions Server-Side Defense ([KIP-890](https://cwiki.apache.org/confluence/x/B40ODg)) brings a strengthened transactional protocol to Apache Kafka 4.0. The new and improved transactional protocol is enabled when the upgrade to 4.0 is finalized. When using 4.0 producer clients, the producer epoch is bumped on every transaction to ensure every transaction includes the intended messages and duplicates are not written as part of the next transaction. Downgrading the protocol is safe. For more information check [here](/41/documentation.html#transaction_protocol). + * Eligible Leader Replicas ([KIP-966 Part 1](https://cwiki.apache.org/confluence/x/mpOzDw)) enhances the replication protocol for the Apache Kafka 4.0. Now the KRaft controller keeps track of the data partition replicas that are not included in ISR but are safe to be elected as leader without data loss. Such replicas are stored in the partition metadata as the `Eligible Leader Replicas`(ELR). For more information check [here](/41/documentation.html#eligible_leader_replicas). + * Since Apache Kafka 4.0.0, we have added a system property (`org.apache.kafka.sasl.oauthbearer.allowed.urls`) to set the allowed URLs as SASL OAUTHBEARER token or jwks endpoints. By default, the value is an empty list. Users should explicitly set the allowed list if necessary. + * A number of deprecated classes, methods, configurations and tools have been removed. + * **Common** + * The `metrics.jmx.blacklist` and `metrics.jmx.whitelist` configurations were removed from the `org.apache.kafka.common.metrics.JmxReporter` Please use `metrics.jmx.exclude` and `metrics.jmx.include` respectively instead. + * The `auto.include.jmx.reporter` configuration was removed. The `metric.reporters` configuration is now set to `org.apache.kafka.common.metrics.JmxReporter` by default. + * The constructor `org.apache.kafka.common.metrics.JmxReporter` with string argument was removed. See [KIP-606](https://cwiki.apache.org/confluence/x/SxIRCQ) for details. + * The `bufferpool-wait-time-total`, `io-waittime-total`, and `iotime-total` metrics were removed. Please use `bufferpool-wait-time-ns-total`, `io-wait-time-ns-total`, and `io-time-ns-total` metrics as replacements, respectively. + * The `kafka.common.requests.DescribeLogDirsResponse.LogDirInfo` class was removed. Please use the `kafka.clients.admin.DescribeLogDirsResult.descriptions()` class and `kafka.clients.admin.DescribeLogDirsResult.allDescriptions()` instead. + * The `kafka.common.requests.DescribeLogDirsResponse.ReplicaInfo` class was removed. Please use the `kafka.clients.admin.DescribeLogDirsResult.descriptions()` class and `kafka.clients.admin.DescribeLogDirsResult.allDescriptions()` instead. + * The `org.apache.kafka.common.security.oauthbearer.secured.OAuthBearerLoginCallbackHandler` class was removed. Please use the `org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler` class instead. + * The `org.apache.kafka.common.security.oauthbearer.secured.OAuthBearerValidatorCallbackHandler` class was removed. Please use the `org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallbackHandler` class instead. + * The `org.apache.kafka.common.errors.NotLeaderForPartitionException` class was removed. The `org.apache.kafka.common.errors.NotLeaderOrFollowerException` is returned if a request could not be processed because the broker is not the leader or follower for a topic partition. + * The `org.apache.kafka.clients.producer.internals.DefaultPartitioner` and `org.apache.kafka.clients.producer.UniformStickyPartitioner` classes were removed. + * The `log.message.format.version` and `message.format.version` configs were removed. + * The function `onNewBatch` in `org.apache.kafka.clients.producer.Partitioner` class was removed. + * The default properties files for KRaft mode are no longer stored in the separate `config/kraft` directory since Zookeeper has been removed. These files have been consolidated with other configuration files. Now all configuration files are in `config` directory. + * The valid format for `--bootstrap-server` only supports comma-separated value, such as `host1:port1,host2:port2,...`. Providing other formats, like space-separated bootstrap servers (e.g., `host1:port1 host2:port2 host3:port3`), will result in an exception, even though this was allowed in Apache Kafka versions prior to 4.0. + * **Broker** + * The `delegation.token.master.key` configuration was removed. Please use `delegation.token.secret.key` instead. + * The `offsets.commit.required.acks` configuration was removed. See [KIP-1041](https://cwiki.apache.org/confluence/x/9YobEg) for details. + * The `log.message.timestamp.difference.max.ms` configuration was removed. Please use `log.message.timestamp.before.max.ms` and `log.message.timestamp.after.max.ms` instead. See [KIP-937](https://cwiki.apache.org/confluence/x/thQ0Dw) for details. + * The `remote.log.manager.copier.thread.pool.size` configuration default value was changed to 10 from -1. Values of -1 are no longer valid; a minimum value of 1 or higher is required. See [KIP-1030](https://cwiki.apache.org/confluence/x/FAqpEQ) + * The `remote.log.manager.expiration.thread.pool.size` configuration default value was changed to 10 from -1. Values of -1 are no longer valid; a minimum value of 1 or higher is required. See [KIP-1030](https://cwiki.apache.org/confluence/x/FAqpEQ) + * The `remote.log.manager.thread.pool.size` configuration default value was changed to 2 from 10. See [KIP-1030](https://cwiki.apache.org/confluence/x/FAqpEQ) + * The minimum `segment.bytes/log.segment.bytes` has changed from 14 bytes to 1MB. See [KIP-1030](https://cwiki.apache.org/confluence/x/FAqpEQ) + * **MirrorMaker** + * The original MirrorMaker (MM1) and related classes were removed. Please use the Connect-based MirrorMaker (MM2), as described in the [Geo-Replication section.](/41/#georeplication). + * The `use.incremental.alter.configs` configuration was removed from `MirrorSourceConnector`. The modified behavior is now identical to the previous `required` configuration, therefore users should ensure that brokers in the target cluster are at least running 2.3.0. + * The `add.source.alias.to.metrics` configuration was removed from `MirrorSourceConnector`. The source cluster alias is now always added to the metrics. + * The `config.properties.blacklist` was removed from the `org.apache.kafka.connect.mirror.MirrorSourceConfig` Please use `config.properties.exclude` instead. + * The `topics.blacklist` was removed from the `org.apache.kafka.connect.mirror.MirrorSourceConfig` Please use `topics.exclude` instead. + * The `groups.blacklist` was removed from the `org.apache.kafka.connect.mirror.MirrorSourceConfig` Please use `groups.exclude` instead. + * **Tools** + * The `kafka.common.MessageReader` class was removed. Please use the [`org.apache.kafka.tools.api.RecordReader`](/41/javadoc/org/apache/kafka/tools/api/RecordReader.html) interface to build custom readers for the `kafka-console-producer` tool. + * The `kafka.tools.DefaultMessageFormatter` class was removed. Please use the `org.apache.kafka.tools.consumer.DefaultMessageFormatter` class instead. + * The `kafka.tools.LoggingMessageFormatter` class was removed. Please use the `org.apache.kafka.tools.consumer.LoggingMessageFormatter` class instead. + * The `kafka.tools.NoOpMessageFormatter` class was removed. Please use the `org.apache.kafka.tools.consumer.NoOpMessageFormatter` class instead. + * The `--whitelist` option was removed from the `kafka-console-consumer` command line tool. Please use `--include` instead. + * Redirections from the old tools packages have been removed: `kafka.admin.FeatureCommand`, `kafka.tools.ClusterTool`, `kafka.tools.EndToEndLatency`, `kafka.tools.StateChangeLogMerger`, `kafka.tools.StreamsResetter`, `kafka.tools.JmxTool`. + * The `--authorizer`, `--authorizer-properties`, and `--zk-tls-config-file` options were removed from the `kafka-acls` command line tool. Please use `--bootstrap-server` or `--bootstrap-controller` instead. + * The `kafka.serializer.Decoder` trait was removed, please use the [`org.apache.kafka.tools.api.Decoder`](/41/javadoc/org/apache/kafka/tools/api/Decoder.html) interface to build custom decoders for the `kafka-dump-log` tool. + * The `kafka.coordinator.group.OffsetsMessageFormatter` class was removed. Please use the `org.apache.kafka.tools.consumer.OffsetsMessageFormatter` class instead. + * The `kafka.coordinator.group.GroupMetadataMessageFormatter` class was removed. Please use the `org.apache.kafka.tools.consumer.GroupMetadataMessageFormatter` class instead. + * The `kafka.coordinator.transaction.TransactionLogMessageFormatter` class was removed. Please use the `org.apache.kafka.tools.consumer.TransactionLogMessageFormatter` class instead. + * The `--topic-white-list` option was removed from the `kafka-replica-verification` command line tool. Please use `--topics-include` instead. + * The `--broker-list` option was removed from the `kafka-verifiable-consumer` command line tool. Please use `--bootstrap-server` instead. + * `kafka-configs.sh` now uses incrementalAlterConfigs API to alter broker configurations instead of the deprecated alterConfigs API, and it will fall directly if the broker doesn't support incrementalAlterConfigs API, which means the broker version is prior to 2.3.x. See [KIP-1011](https://cwiki.apache.org/confluence/x/wIn5E) for more details. + * The `kafka.admin.ZkSecurityMigrator` tool was removed. + * **Connect** + * The `whitelist` and `blacklist` configurations were removed from the `org.apache.kafka.connect.transforms.ReplaceField` transformation. Please use `include` and `exclude` respectively instead. + * The `onPartitionsRevoked(Collection)` and `onPartitionsAssigned(Collection)` methods were removed from `SinkTask`. + * The `commitRecord(SourceRecord)` method was removed from `SourceTask`. + * **Consumer** + * The `poll(long)` method was removed from the consumer. Please use `poll(Duration)` instead. Note that there is a difference in behavior between the two methods. The `poll(Duration)` method does not block beyond the timeout awaiting partition assignment, whereas the earlier `poll(long)` method used to wait beyond the timeout. + * The `committed(TopicPartition)` and `committed(TopicPartition, Duration)` methods were removed from the consumer. Please use `committed(Set)` and `committed(Set, Duration)` instead. + * The `setException(KafkaException)` method was removed from the `org.apache.kafka.clients.consumer.MockConsumer`. Please use `setPollException(KafkaException)` instead. + * **Producer** + * The `enable.idempotence` configuration will no longer automatically fall back when the `max.in.flight.requests.per.connection` value exceeds 5. + * The deprecated `sendOffsetsToTransaction(Map, String)` method has been removed from the Producer API. + * The default `linger.ms` changed from 0 to 5 in Apache Kafka 4.0 as the efficiency gains from larger batches typically result in similar or lower producer latency despite the increased linger. + * **Admin client** + * The `alterConfigs` method was removed from the `org.apache.kafka.clients.admin.Admin`. Please use `incrementalAlterConfigs` instead. + * The `org.apache.kafka.common.ConsumerGroupState` enumeration and related methods have been deprecated. Please use `GroupState` instead which applies to all types of group. + * The `Admin.describeConsumerGroups` method used to return a `ConsumerGroupDescription` in state `DEAD` if the group ID was not found. In Apache Kafka 4.0, the `GroupIdNotFoundException` is thrown instead as part of the support for new types of group. + * The `org.apache.kafka.clients.admin.DeleteTopicsResult.values()` method was removed. Please use `org.apache.kafka.clients.admin.DeleteTopicsResult.topicNameValues()` instead. + * The `org.apache.kafka.clients.admin.TopicListing.TopicListing(String, boolean)` method was removed. Please use `org.apache.kafka.clients.admin.TopicListing.TopicListing(String, Uuid, boolean)` instead. + * The `org.apache.kafka.clients.admin.ListConsumerGroupOffsetsOptions.topicPartitions(List)` method was removed. Please use `org.apache.kafka.clients.admin.Admin.listConsumerGroupOffsets(Map, ListConsumerGroupOffsetsOptions)` instead. + * The deprecated `dryRun` methods were removed from the `org.apache.kafka.clients.admin.UpdateFeaturesOptions`. Please use `validateOnly` instead. + * The constructor `org.apache.kafka.clients.admin.FeatureUpdate` with short and boolean arguments was removed. Please use the constructor that accepts short and the specified `UpgradeType` enum instead. + * The `allowDowngrade` method was removed from the `org.apache.kafka.clients.admin.FeatureUpdate`. + * The `org.apache.kafka.clients.admin.DescribeTopicsResult.DescribeTopicsResult(Map>)` method was removed. Please use `org.apache.kafka.clients.admin.DescribeTopicsResult.DescribeTopicsResult(Map>, Map>)` instead. + * The `values()` method was removed from the `org.apache.kafka.clients.admin.DescribeTopicsResult`. Please use `topicNameValues()` instead. + * The `all()` method was removed from the `org.apache.kafka.clients.admin.DescribeTopicsResult`. Please use `allTopicNames()` instead. + * **Kafka Streams** + * All public APIs, deprecated in Apache Kafka 3.6 or an earlier release, have been removed, with the exception of `JoinWindows.of()` and `JoinWindows#grace()`. See [KAFKA-17531](https://issues.apache.org/jira/browse/KAFKA-17531) for details. + * The most important changes are highlighted in the [Kafka Streams upgrade guide](/41/streams/upgrade-guide.html#streams_api_changes_400). + * For a full list of changes, see [KAFKA-12822](https://issues.apache.org/jira/browse/KAFKA-12822). + * If you are using `KStream#transformValues()` which was removed with Apache Kafka 4.0.0 release, and you need to rewrite your program to use `KStreams#processValues()` instead, pay close attention to the [migration guide](/41/streams/developer-guide/dsl-api.html#transformers-removal-and-migration-to-processors). + * Other changes: + * The minimum Java version required by clients and Kafka Streams applications has been increased from Java 8 to Java 11 while brokers, connect and tools now require Java 17. See [KIP-750](https://cwiki.apache.org/confluence/x/P4vOCg) and [KIP-1013](https://cwiki.apache.org/confluence/x/Bov5E) for more details. + * Java 23 support has been added in Apache Kafka 4.0. + * Scala 2.12 support has been removed in Apache Kafka 4.0. See [KIP-751](https://cwiki.apache.org/confluence/x/OovOCg) for more details + * Logging framework has been migrated from Log4j to Log4j2. Users can use the log4j-transform-cli tool to automatically convert their existing Log4j configuration files to Log4j2 format. See [log4j-transform-cli](https://logging.staged.apache.org/log4j/transform/cli.html#log4j-transform-cli) for more details. Log4j2 provides limited compatibility for Log4j configurations. See [Use Log4j 1 to Log4j 2 bridge](https://logging.apache.org/log4j/2.x/migrate-from-log4j1.html#ConfigurationCompatibility) for more information, + * KafkaLog4jAppender has been removed, users should migrate to the log4j2 appender See [KafkaAppender](https://logging.apache.org/log4j/2.x/manual/appenders.html#KafkaAppender) for more details + * The `--delete-config` option in the `kafka-topics` command line tool has been deprecated. + * For implementors of RemoteLogMetadataManager (RLMM), a new API `nextSegmentWithTxnIndex` is introduced in RLMM to allow the implementation to return the next segment metadata with a transaction index. This API is used when the consumers are enabled with isolation level as READ_COMMITTED. See [KIP-1058](https://cwiki.apache.org/confluence/x/BwuTEg) for more details. + * The criteria for identifying internal topics in ReplicationPolicy and DefaultReplicationPolicy have been updated to enable the replication of topics that appear to be internal but aren't truly internal to Kafka and Mirror Maker 2. See [KIP-1074](https://cwiki.apache.org/confluence/x/jA3OEg) for more details. + * [KIP-714](https://cwiki.apache.org/confluence/x/2xRRCg) is now enabled for Kafka Streams via [KIP-1076](https://cwiki.apache.org/confluence/x/XA-OEg). This allows to not only collect the metric of the internally used clients of a Kafka Streams application via a broker-side plugin, but also to collect the [metrics](/41/#kafka_streams_monitoring) of the Kafka Streams runtime itself. + * The default value of `num.recovery.threads.per.data.dir` has been changed from 1 to 2. The impact of this is faster recovery post unclean shutdown at the expense of extra IO cycles. See [KIP-1030](https://cwiki.apache.org/confluence/x/FAqpEQ) + * The default value of `message.timestamp.after.max.ms` has been changed from Long.Max to 1 hour. The impact of this messages with a timestamp of more than 1 hour in the future will be rejected when message.timestamp.type=CreateTime is set. See [KIP-1030](https://cwiki.apache.org/confluence/x/FAqpEQ) + * Introduced in [KIP-890](https://cwiki.apache.org/confluence/x/B40ODg), the `TransactionAbortableException` enhances error handling within transactional operations by clearly indicating scenarios where transactions should be aborted due to errors. It is important for applications to properly manage both `TimeoutException` and `TransactionAbortableException` when working with transaction producers. + * **TimeoutException:** This exception indicates that a transactional operation has timed out. Given the risk of message duplication that can arise from retrying operations after a timeout (potentially violating exactly-once semantics), applications should treat timeouts as reasons to abort the ongoing transaction. + * **TransactionAbortableException:** Specifically introduced to signal errors that should lead to transaction abortion, ensuring this exception is properly handled is critical for maintaining the integrity of transactional processing. + * To ensure seamless operation and compatibility with future Kafka versions, developers are encouraged to update their error-handling logic to treat both exceptions as triggers for aborting transactions. This approach is pivotal for preserving exactly-once semantics. + * See [KIP-890](https://cwiki.apache.org/confluence/x/B40ODg) and [KIP-1050](https://cwiki.apache.org/confluence/x/8ItyEg) for more details + * The filename for rotated `state-change.log` files incorrectly rotates to `stage-change.log.[date]` (changing state to stage). This issue is corrected in 4.0.1. See [KAFKA-19576](https://issues.apache.org/jira/browse/KAFKA-19576) for details. + + + +## Upgrading to 3.9.0 and older versions + +See [Upgrading From Previous Versions](/39/#upgrade) in the 3.9 documentation. diff --git a/content/en/41/getting-started/uses.md b/content/en/41/getting-started/uses.md new file mode 100644 index 000000000..e3715df79 --- /dev/null +++ b/content/en/41/getting-started/uses.md @@ -0,0 +1,45 @@ +--- +title: Use Cases +description: +weight: 2 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +Here is a description of a few of the popular use cases for Apache Kafka®. For an overview of a number of these areas in action, see [this blog post](https://engineering.linkedin.com/distributed-systems/log-what-every-software-engineer-should-know-about-real-time-datas-unifying/). + +## Messaging + +Kafka works well as a replacement for a more traditional message broker. Message brokers are used for a variety of reasons (to decouple processing from data producers, to buffer unprocessed messages, etc). In comparison to most messaging systems Kafka has better throughput, built-in partitioning, replication, and fault-tolerance which makes it a good solution for large scale message processing applications. + +In our experience messaging uses are often comparatively low-throughput, but may require low end-to-end latency and often depend on the strong durability guarantees Kafka provides. + +In this domain Kafka is comparable to traditional messaging systems such as [ActiveMQ](https://activemq.apache.org) or [RabbitMQ](https://www.rabbitmq.com). + +## Website Activity Tracking + +The original use case for Kafka was to be able to rebuild a user activity tracking pipeline as a set of real-time publish-subscribe feeds. This means site activity (page views, searches, or other actions users may take) is published to central topics with one topic per activity type. These feeds are available for subscription for a range of use cases including real-time processing, real-time monitoring, and loading into Hadoop or offline data warehousing systems for offline processing and reporting. + +Activity tracking is often very high volume as many activity messages are generated for each user page view. + +## Metrics + +Kafka is often used for operational monitoring data. This involves aggregating statistics from distributed applications to produce centralized feeds of operational data. + +## Log Aggregation + +Many people use Kafka as a replacement for a log aggregation solution. Log aggregation typically collects physical log files off servers and puts them in a central place (a file server or HDFS perhaps) for processing. Kafka abstracts away the details of files and gives a cleaner abstraction of log or event data as a stream of messages. This allows for lower-latency processing and easier support for multiple data sources and distributed data consumption. In comparison to log-centric systems like Scribe or Flume, Kafka offers equally good performance, stronger durability guarantees due to replication, and much lower end-to-end latency. + +## Stream Processing + +Many users of Kafka process data in processing pipelines consisting of multiple stages, where raw input data is consumed from Kafka topics and then aggregated, enriched, or otherwise transformed into new topics for further consumption or follow-up processing. For example, a processing pipeline for recommending news articles might crawl article content from RSS feeds and publish it to an "articles" topic; further processing might normalize or deduplicate this content and publish the cleansed article content to a new topic; a final processing stage might attempt to recommend this content to users. Such processing pipelines create graphs of real-time data flows based on the individual topics. Starting in 0.10.0.0, a light-weight but powerful stream processing library called [Kafka Streams](/streams) is available in Apache Kafka to perform such data processing as described above. Apart from Kafka Streams, alternative open source stream processing tools include [Apache Storm](https://storm.apache.org/) and [Apache Samza](https://samza.apache.org/). + +## Event Sourcing + +[Event sourcing](https://martinfowler.com/eaaDev/EventSourcing.html) is a style of application design where state changes are logged as a time-ordered sequence of records. Kafka's support for very large stored log data makes it an excellent backend for an application built in this style. + +## Commit Log + +Kafka can serve as a kind of external commit-log for a distributed system. The log helps replicate data between nodes and acts as a re-syncing mechanism for failed nodes to restore their data. The [log compaction](/documentation.html#compaction) feature in Kafka helps support this usage. In this usage Kafka is similar to [Apache BookKeeper](https://bookkeeper.apache.org/) project. diff --git a/content/en/41/getting-started/zk2kraft.md b/content/en/41/getting-started/zk2kraft.md new file mode 100644 index 000000000..fb4776f8c --- /dev/null +++ b/content/en/41/getting-started/zk2kraft.md @@ -0,0 +1,219 @@ +--- +title: KRaft vs ZooKeeper +description: +weight: 6 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Differences Between KRaft mode and ZooKeeper mode + +# Removed ZooKeeper Features + +This section documents differences in behavior between KRaft mode and ZooKeeper mode. Specifically, several configurations, metrics and features have changed or are no longer required in KRaft mode. To migrate an existing cluster from ZooKeeper mode to KRaft mode, please refer to the [ZooKeeper to KRaft Migration](/39/documentation.html#kraft_zk_migration) section. + +# Configurations + + * Removed password encoder-related configurations. These configurations were used in ZooKeeper mode to define the key and backup key for encrypting sensitive data (e.g., passwords), specify the algorithm and key generation method for password encryption (e.g., AES, RSA), and control the key length and encryption strength. + + * `password.encoder.secret` + * `password.encoder.old.secret` + * `password.encoder.keyfactory.algorithm` + * `password.encoder.cipher.algorithm` + * `password.encoder.key.length` + * `password.encoder.iterations` + +In KRaft mode, Kafka stores sensitive data in records, and the data is not encrypted in Kafka. + + * Removed `control.plane.listener.name`. Kafka relies on ZooKeeper to manage metadata, but some internal operations (e.g., communication between controllers (a.k.a., broker controller) and brokers) still require Kafka’s internal control plane for coordination. + +In KRaft mode, Kafka eliminates its dependency on ZooKeeper, and the control plane functionality is fully integrated into Kafka itself. The process roles are clearly separated: brokers handle data-related requests, while the controllers (a.k.a., quorum controller) manages metadata-related requests. The controllers use the Raft protocol for internal communication, which operates differently from the ZooKeeper model. Use the following parameters to configure the control plane listener: + + * `controller.listener.names` + * `listeners` + * `listener.security.protocol.map` + * Removed graceful broker shutdowns-related configurations. These configurations were used in ZooKeeper mode to define the maximum number of retries and the retry backoff time for controlled shutdowns. It can reduce the risk of unplanned leader changes and data inconsistencies. + + * `controlled.shutdown.max.retries` + * `controlled.shutdown.retry.backoff.ms` + +In KRaft mode, Kafka uses the Raft protocol to manage metadata. The broker shutdown process differs from ZooKeeper mode as it is managed by the quorum-based controller. The shutdown process is more reliable and efficient due to automated leader transfers and metadata updates handled by the controller. + + * Removed the broker id generation-related configurations. These configurations were used in ZooKeeper mode to specify the broker id auto generation and control the broker id generation process. + + * `reserved.broker.max.id` + * `broker.id.generation.enable` + +Kafka uses the node id in KRaft mode to identify servers. + + * `node.id` + * Removed broker protocol version-related configurations. These configurations were used in ZooKeeper mode to define communication protocol version between brokers. In KRaft mode, Kafka uses `metadata.version` to control the feature level of the cluster, which can be managed using `bin/kafka-features.sh`. + + * `inter.broker.protocol.version` + * Removed dynamic configurations which relied on ZooKeeper. In KRaft mode, to change these configurations, you need to restart the broker/controller. + + * `advertised.listeners` + * Removed the leader imbalance configuration used only in ZooKeeper. `leader.imbalance.per.broker.percentage` was used to limit the preferred leader election frequency in ZooKeeper. + + * `leader.imbalance.per.broker.percentage` + * Removed ZooKeeper related configurations. + + * `zookeeper.connect` + * `zookeeper.session.timeout.ms` + * `zookeeper.connection.timeout.ms` + * `zookeeper.set.acl` + * `zookeeper.max.in.flight.requests` + * `zookeeper.ssl.client.enable` + * `zookeeper.clientCnxnSocket` + * `zookeeper.ssl.keystore.location` + * `zookeeper.ssl.keystore.password` + * `zookeeper.ssl.keystore.type` + * `zookeeper.ssl.truststore.location` + * `zookeeper.ssl.truststore.password` + * `zookeeper.ssl.truststore.type` + * `zookeeper.ssl.protocol` + * `zookeeper.ssl.enabled.protocols` + * `zookeeper.ssl.cipher.suites` + * `zookeeper.ssl.endpoint.identification.algorithm` + * `zookeeper.ssl.crl.enable` + * `zookeeper.ssl.ocsp.enable` + + + +# Dynamic Log Levels + + * The dynamic log levels feature allows you to change the log4j settings of a running broker or controller process without restarting it. The command-line syntax for setting dynamic log levels on brokers has not changed in KRaft mode. Here is an example of setting the log level on a broker: + + + ./bin/kafka-configs.sh --bootstrap-server localhost:9092 \ + --entity-type broker-loggers \ + --entity-name 1 \ + --alter \ + --add-config org.apache.kafka.raft.KafkaNetworkChannel=TRACE + + + * When setting dynamic log levels on the controllers, the `--bootstrap-controller` flag must be used. Here is an example of setting the log level ona controller: + + + ./bin/kafka-configs.sh --bootstrap-controller localhost:9093 \ + --entity-type broker-loggers \ + --entity-name 1 \ + --alter \ + --add-config org.apache.kafka.raft.KafkaNetworkChannel=TRACE + + + +Note that the entity-type must be specified as `broker-loggers`, even though we are changing a controller's log level rather than a broker's log level. + + * When changing the log level of a combined node, which has both broker and controller roles, either --bootstrap-servers or --bootstrap-controllers may be used. Combined nodes have only a single set of log levels; there are not different log levels for the broker and controller parts of the process. + + + + +# Dynamic Controller Configurations + + * Some Kafka configurations can be changed dynamically, without restarting the process. The command-line syntax for setting dynamic log levels on brokers has not changed in KRaft mode. Here is an example of setting the number of IO threads on a broker: + + + ./bin/kafka-configs.sh --bootstrap-server localhost:9092 \ + --entity-type brokers \ + --entity-name 1 \ + --alter \ + --add-config num.io.threads=5 + + + * Controllers will apply all applicable cluster-level dynamic configurations. For example, the following command-line will change the `max.connections` setting on all of the brokers and all of the controllers in the cluster: + + + ./bin/kafka-configs.sh --bootstrap-server localhost:9092 \ + --entity-type brokers \ + --entity-default \ + --alter \ + --add-config max.connections=10000 + + + +It is not currently possible to apply a dynamic configuration on only a single controller. + + + + +# Metrics + + * Removed the following metrics related to ZooKeeper. `ControlPlaneNetworkProcessorAvgIdlePercent` is to monitor the average fraction of time the network processors are idle. The other `ControlPlaneExpiredConnectionsKilledCount` is to monitor the total number of connections disconnected, across all processors. + + * `ControlPlaneNetworkProcessorAvgIdlePercent` + * `ControlPlaneExpiredConnectionsKilledCount` + +In KRaft mode, Kafka also provides metrics to monitor the network processors and expired connections. Use the following metrics to monitor the network processors and expired connections: + + * `NetworkProcessorAvgIdlePercent` + * `ExpiredConnectionsKilledCount` + * Removed the metrics which are only used in ZooKeeper mode. + + * `kafka.controller:type=ControllerChannelManager,name=QueueSize` + * `kafka.controller:type=ControllerChannelManager,name=RequestRateAndQueueTimeMs` + * `kafka.controller:type=ControllerEventManager,name=EventQueueSize` + * `kafka.controller:type=ControllerEventManager,name=EventQueueTimeMs` + * `kafka.controller:type=ControllerStats,name=AutoLeaderBalanceRateAndTimeMs` + * `kafka.controller:type=ControllerStats,name=ControlledShutdownRateAndTimeMs` + * `kafka.controller:type=ControllerStats,name=ControllerChangeRateAndTimeMs` + * `kafka.controller:type=ControllerStats,name=ControllerShutdownRateAndTimeMs` + * `kafka.controller:type=ControllerStats,name=IdleRateAndTimeMs` + * `kafka.controller:type=ControllerStats,name=IsrChangeRateAndTimeMs` + * `kafka.controller:type=ControllerStats,name=LeaderAndIsrResponseReceivedRateAndTimeMs` + * `kafka.controller:type=ControllerStats,name=LeaderElectionRateAndTimeMs` + * `kafka.controller:type=ControllerStats,name=ListPartitionReassignmentRateAndTimeMs` + * `kafka.controller:type=ControllerStats,name=LogDirChangeRateAndTimeMs` + * `kafka.controller:type=ControllerStats,name=ManualLeaderBalanceRateAndTimeMs` + * `kafka.controller:type=KafkaController,name=MigratingZkBrokerCount` + * `kafka.controller:type=ControllerStats,name=PartitionReassignmentRateAndTimeMs` + * `kafka.controller:type=ControllerStats,name=TopicChangeRateAndTimeMs` + * `kafka.controller:type=ControllerStats,name=TopicDeletionRateAndTimeMs` + * `kafka.controller:type=KafkaController,name=TopicsIneligibleToDeleteCount` + * `kafka.controller:type=ControllerStats,name=TopicUncleanLeaderElectionEnableRateAndTimeMs` + * `kafka.controller:type=ControllerStats,name=UncleanLeaderElectionEnableRateAndTimeMs` + * `kafka.controller:type=ControllerStats,name=UncleanLeaderElectionsPerSec` + * `kafka.controller:type=ControllerStats,name=UpdateFeaturesRateAndTimeMs` + * `kafka.controller:type=ControllerStats,name=UpdateMetadataResponseReceivedRateAndTimeMs` + * `kafka.controller:type=KafkaController,name=ActiveBrokerCount` + * `kafka.controller:type=KafkaController,name=ActiveControllerCount` + * `kafka.controller:type=KafkaController,name=ControllerState` + * `kafka.controller:type=KafkaController,name=FencedBrokerCount` + * `kafka.controller:type=KafkaController,name=GlobalPartitionCount` + * `kafka.controller:type=KafkaController,name=GlobalTopicCount` + * `kafka.controller:type=KafkaController,name=OfflinePartitionsCount` + * `kafka.controller:type=KafkaController,name=PreferredReplicaImbalanceCount` + * `kafka.controller:type=KafkaController,name=ReplicasIneligibleToDeleteCount` + * `kafka.controller:type=KafkaController,name=ReplicasToDeleteCount` + * `kafka.controller:type=KafkaController,name=TopicsToDeleteCount` + * `kafka.controller:type=KafkaController,name=ZkMigrationState` + * `kafka.server:type=DelayedOperationPurgatory,name=PurgatorySize,delayedOperation=ElectLeader` + * `kafka.server:type=DelayedOperationPurgatory,name=PurgatorySize,delayedOperation=topic` + * `kafka.server:type=DelayedOperationPurgatory,name=NumDelayedOperations,delayedOperation=ElectLeader` + * `kafka.server:type=DelayedOperationPurgatory,name=NumDelayedOperations,delayedOperation=topic` + * `kafka.server:type=SessionExpireListener,name=SessionState` + * `kafka.server:type=SessionExpireListener,name=ZooKeeperAuthFailuresPerSec` + * `kafka.server:type=SessionExpireListener,name=ZooKeeperDisconnectsPerSec` + * `kafka.server:type=SessionExpireListener,name=ZooKeeperExpiresPerSec` + * `kafka.server:type=SessionExpireListener,name=ZooKeeperReadOnlyConnectsPerSec` + * `kafka.server:type=SessionExpireListener,name=ZooKeeperSaslAuthenticationsPerSec` + * `kafka.server:type=SessionExpireListener,name=ZooKeeperSyncConnectsPerSec` + * `kafka.server:type=ZooKeeperClientMetrics,name=ZooKeeperRequestLatencyMs` + + + +# Behavioral Change Reference + +This document catalogs the functional and operational differences between ZooKeeper mode and KRaft mode. + + * **Configuration Value Size Limitation** : KRaft mode restricts configuration values to a maximum size of `Short.MAX_VALUE`, which prevents using the append operation to create larger configuration values. + * **Policy Class Deployment** : In KRaft mode, the `CreateTopicPolicy` and `AlterConfigPolicy` plugins run on the controller instead of the broker. This requires users to deploy the policy class JAR files on the controller and configure the parameters (`create.topic.policy.class.name` and `alter.config.policy.class.name`) on the controller. + +Note: If migrating from ZooKeeper mode, ensure policy JARs are moved from brokers to controllers. + + * **Custom implementations of`KafkaPrincipalBuilder`**: In KRaft mode, custom implementations of `KafkaPrincipalBuilder` must also implement `KafkaPrincipalSerde`; otherwise brokers will not be able to forward requests to the controller. + + diff --git a/content/en/41/implementation/_index.md b/content/en/41/implementation/_index.md new file mode 100644 index 000000000..7426a2273 --- /dev/null +++ b/content/en/41/implementation/_index.md @@ -0,0 +1,10 @@ +--- +title: Implementation +description: +weight: 5 +tags: ['kafka', 'docs', 'implementation'] +aliases: +keywords: +type: docs +--- + diff --git a/content/en/41/implementation/distribution.md b/content/en/41/implementation/distribution.md new file mode 100644 index 000000000..7c3b1c1ba --- /dev/null +++ b/content/en/41/implementation/distribution.md @@ -0,0 +1,19 @@ +--- +title: Distribution +description: Distribution +weight: 5 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Distribution + +## Consumer Offset Tracking + +Kafka consumer tracks the maximum offset it has consumed in each partition and has the capability to commit offsets so that it can resume from those offsets in the event of a restart. Kafka provides the option to store all the offsets for a given consumer group in a designated broker (for that group) called the group coordinator. i.e., any consumer instance in that consumer group should send its offset commits and fetches to that group coordinator (broker). Consumer groups are assigned to coordinators based on their group names. A consumer can look up its coordinator by issuing a FindCoordinatorRequest to any Kafka broker and reading the FindCoordinatorResponse which will contain the coordinator details. The consumer can then proceed to commit or fetch offsets from the coordinator broker. In case the coordinator moves, the consumer will need to rediscover the coordinator. Offset commits can be done automatically or manually by consumer instance. + +When the group coordinator receives an OffsetCommitRequest, it appends the request to a special compacted Kafka topic named ___consumer_offsets_. The broker sends a successful offset commit response to the consumer only after all the replicas of the offsets topic receive the offsets. In case the offsets fail to replicate within a configurable timeout, the offset commit will fail and the consumer may retry the commit after backing off. The brokers periodically compact the offsets topic since it only needs to maintain the most recent offset commit per partition. The coordinator also caches the offsets in an in-memory table in order to serve offset fetches quickly. + +When the coordinator receives an offset fetch request, it simply returns the last committed offset vector from the offsets cache. In case coordinator was just started or if it just became the coordinator for a new set of consumer groups (by becoming a leader for a partition of the offsets topic), it may need to load the offsets topic partition into the cache. In this case, the offset fetch will fail with an CoordinatorLoadInProgressException and the consumer may retry the OffsetFetchRequest after backing off. diff --git a/content/en/41/implementation/log.md b/content/en/41/implementation/log.md new file mode 100644 index 000000000..411f46017 --- /dev/null +++ b/content/en/41/implementation/log.md @@ -0,0 +1,61 @@ +--- +title: Log +description: Log +weight: 4 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Log + +A log for a topic named "my-topic" with two partitions consists of two directories (namely `my-topic-0` and `my-topic-1`) populated with data files containing the messages for that topic. The format of the log files is a sequence of "log entries"; each log entry is a 4 byte integer _N_ storing the message length which is followed by the _N_ message bytes. Each message is uniquely identified by a 64-bit integer _offset_ giving the byte position of the start of this message in the stream of all messages ever sent to that topic on that partition. The on-disk format of each message is given below. Each log file is named with the offset of the first message it contains. So the first file created will be 00000000000000000000.log, and each additional file will have an integer name roughly _S_ bytes from the previous file where _S_ is the max log file size given in the configuration. + +The exact binary format for records is versioned and maintained as a standard interface so record batches can be transferred between producer, broker, and client without recopying or conversion when desirable. The previous section included details about the on-disk format of records. + +The use of the message offset as the message id is unusual. Our original idea was to use a GUID generated by the producer, and maintain a mapping from GUID to offset on each broker. But since a consumer must maintain an ID for each server, the global uniqueness of the GUID provides no value. Furthermore, the complexity of maintaining the mapping from a random id to an offset requires a heavy weight index structure which must be synchronized with disk, essentially requiring a full persistent random-access data structure. Thus to simplify the lookup structure we decided to use a simple per-partition atomic counter which could be coupled with the partition id and node id to uniquely identify a message; this makes the lookup structure simpler, though multiple seeks per consumer request are still likely. However once we settled on a counter, the jump to directly using the offset seemed natural--both after all are monotonically increasing integers unique to a partition. Since the offset is hidden from the consumer API this decision is ultimately an implementation detail and we went with the more efficient approach. + +![](/41/images/kafka_log.png) + +## Writes + +The log allows serial appends which always go to the last file. This file is rolled over to a fresh file when it reaches a configurable size (say 1GB). The log takes two configuration parameters: _M_ , which gives the number of messages to write before forcing the OS to flush the file to disk, and _S_ , which gives a number of seconds after which a flush is forced. This gives a durability guarantee of losing at most _M_ messages or _S_ seconds of data in the event of a system crash. + +## Reads + +Reads are done by giving the 64-bit logical offset of a message and an _S_ -byte max chunk size. This will return an iterator over the messages contained in the _S_ -byte buffer. _S_ is intended to be larger than any single message, but in the event of an abnormally large message, the read can be retried multiple times, each time doubling the buffer size, until the message is read successfully. A maximum message and buffer size can be specified to make the server reject messages larger than some size, and to give a bound to the client on the maximum it needs to ever read to get a complete message. It is likely that the read buffer ends with a partial message, this is easily detected by the size delimiting. + +The actual process of reading from an offset requires first locating the log segment file in which the data is stored, calculating the file-specific offset from the global offset value, and then reading from that file offset. The search is done as a simple binary search variation against an in-memory range maintained for each file. + +The log provides the capability of getting the most recently written message to allow clients to start subscribing as of "right now". This is also useful in the case the consumer fails to consume its data within its SLA-specified number of days. In this case when the client attempts to consume a non-existent offset it is given an OutOfRangeException and can either reset itself or fail as appropriate to the use case. + +The following is the format of the results sent to the consumer. + + + MessageSetSend (fetch result) + + total length : 4 bytes + error code : 2 bytes + message 1 : x bytes + ... + message n : x bytes + + + MultiMessageSetSend (multiFetch result) + + total length : 4 bytes + error code : 2 bytes + messageSetSend 1 + ... + messageSetSend n + +## Deletes + +Data is deleted one log segment at a time. The log manager applies two metrics to identify segments which are eligible for deletion: time and size. For time-based policies, the record timestamps are considered, with the largest timestamp in a segment file (order of records is not relevant) defining the retention time for the entire segment. Size-based retention is disabled by default. When enabled the log manager keeps deleting the oldest segment file until the overall size of the partition is within the configured limit again. If both policies are enabled at the same time, a segment that is eligible for deletion due to either policy will be deleted. To avoid locking reads while still allowing deletes that modify the segment list we use a copy-on-write style segment list implementation that provides consistent views to allow a binary search to proceed on an immutable static snapshot view of the log segments while deletes are progressing. + +## Guarantees + +The log provides a configuration parameter _M_ which controls the maximum number of messages that are written before forcing a flush to disk. On startup a log recovery process is run that iterates over all messages in the newest log segment and verifies that each message entry is valid. A message entry is valid if the sum of its size and offset are less than the length of the file AND the CRC32 of the message payload matches the CRC stored with the message. In the event corruption is detected the log is truncated to the last valid offset. + +Note that two kinds of corruption must be handled: truncation in which an unwritten block is lost due to a crash, and corruption in which a nonsense block is ADDED to the file. The reason for this is that in general the OS makes no guarantee of the write order between the file inode and the actual block data so in addition to losing written data the file can gain nonsense data if the inode is updated with a new size but a crash occurs before the block containing that data is written. The CRC detects this corner case, and prevents it from corrupting the log (though the unwritten messages are, of course, lost). diff --git a/content/en/41/implementation/message-format.md b/content/en/41/implementation/message-format.md new file mode 100644 index 000000000..fea94ceec --- /dev/null +++ b/content/en/41/implementation/message-format.md @@ -0,0 +1,95 @@ +--- +title: Message Format +description: Message Format +weight: 3 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Message Format + +Messages (aka Records) are always written in batches. The technical term for a batch of messages is a record batch, and a record batch contains one or more records. In the degenerate case, we could have a record batch containing a single record. Record batches and records have their own headers. The format of each is described below. + +## Record Batch + +The following is the on-disk format of a RecordBatch. + + + baseOffset: int64 + batchLength: int32 + partitionLeaderEpoch: int32 + magic: int8 (current magic value is 2) + crc: uint32 + attributes: int16 + bit 0~2: + 0: no compression + 1: gzip + 2: snappy + 3: lz4 + 4: zstd + bit 3: timestampType + bit 4: isTransactional (0 means not transactional) + bit 5: isControlBatch (0 means not a control batch) + bit 6: hasDeleteHorizonMs (0 means baseTimestamp is not set as the delete horizon for compaction) + bit 7~15: unused + lastOffsetDelta: int32 + baseTimestamp: int64 + maxTimestamp: int64 + producerId: int64 + producerEpoch: int16 + baseSequence: int32 + recordsCount: int32 + records: [Record] + +Note that when compression is enabled, the compressed record data is serialized directly following the count of the number of records. + +The CRC covers the data from the attributes to the end of the batch (i.e. all the bytes that follow the CRC). It is located after the magic byte, which means that clients must parse the magic byte before deciding how to interpret the bytes between the batch length and the magic byte. The partition leader epoch field is not included in the CRC computation to avoid the need to recompute the CRC when this field is assigned for every batch that is received by the broker. The CRC-32C (Castagnoli) polynomial is used for the computation. + +On compaction, we preserve the first and last offset/sequence numbers from the original batch when the log is cleaned. This is required in order to be able to restore the producer's state when the log is reloaded. If we did not retain the last sequence number, for example, then after a partition leader failure, the producer might see an OutOfSequence error. The base sequence number must be preserved for duplicate checking (the broker checks incoming Produce requests for duplicates by verifying that the first and last sequence numbers of the incoming batch match the last from that producer). As a result, it is possible to have empty batches in the log when all the records in the batch are cleaned but batch is still retained in order to preserve a producer's last sequence number. One oddity here is that the baseTimestamp field is not preserved during compaction, so it will change if the first record in the batch is compacted away. + +Compaction may also modify the baseTimestamp if the record batch contains records with a null payload or aborted transaction markers. The baseTimestamp will be set to the timestamp of when those records should be deleted with the delete horizon attribute bit also set. + +### Control Batches + +A control batch contains a single record called the control record. Control records should not be passed on to applications. Instead, they are used by consumers to filter out aborted transactional messages. + +The key of a control record conforms to the following schema: + + + version: int16 (current version is 0) + type: int16 (0 indicates an abort marker, 1 indicates a commit) + +The schema for the value of a control record is dependent on the type. The value is opaque to clients. + +## Record + +The on-disk format of each record is delineated below. + + + length: varint + attributes: int8 + bit 0~7: unused + timestampDelta: varlong + offsetDelta: varint + keyLength: varint + key: byte[] + valueLength: varint + value: byte[] + headersCount: varint + Headers => [Header] + +### Record Header + + + headerKeyLength: varint + headerKey: String + headerValueLength: varint + Value: byte[] + +We use the same varint encoding as Protobuf. More information on the latter can be found [here](https://developers.google.com/protocol-buffers/docs/encoding#varints). The count of headers in a record is also encoded as a varint. + +## Old Message Format + +Prior to Kafka 0.11, messages were transferred and stored in _message sets_. See [Old Message Format](https://kafka.apache.org/39/#messageset) for more details. diff --git a/content/en/41/implementation/messages.md b/content/en/41/implementation/messages.md new file mode 100644 index 000000000..30775ac4c --- /dev/null +++ b/content/en/41/implementation/messages.md @@ -0,0 +1,13 @@ +--- +title: Messages +description: Messages +weight: 2 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Messages + +Messages consist of a variable-length header, a variable-length opaque key byte array and a variable-length opaque value byte array. The format of the header is described in the following section. Leaving the key and value opaque is the right decision: there is a great deal of progress being made on serialization libraries right now, and any particular choice is unlikely to be right for all uses. Needless to say a particular application using Kafka would likely mandate a particular serialization type as part of its usage. The `RecordBatch` interface is simply an iterator over messages with specialized methods for bulk reading and writing to an NIO `Channel`. diff --git a/content/en/41/implementation/network-layer.md b/content/en/41/implementation/network-layer.md new file mode 100644 index 000000000..6faa03778 --- /dev/null +++ b/content/en/41/implementation/network-layer.md @@ -0,0 +1,13 @@ +--- +title: Network Layer +description: Network Layer +weight: 1 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Network Layer + +The network layer is a fairly straight-forward NIO server, and will not be described in great detail. The sendfile implementation is done by giving the `TransferableRecords` interface a `writeTo` method. This allows the file-backed message set to use the more efficient `transferTo` implementation instead of an in-process buffered write. The threading model is a single acceptor thread and _N_ processor threads which handle a fixed number of connections each. This design has been pretty thoroughly tested [elsewhere](https://web.archive.org/web/20120619234320/https://sna-projects.com/blog/2009/08/introducing-the-nio-socketserver-implementation/) and found to be simple to implement and fast. The protocol is kept quite simple to allow for future implementation of clients in other languages. diff --git a/content/en/41/kafka-connect/_index.md b/content/en/41/kafka-connect/_index.md new file mode 100644 index 000000000..9f00c35b3 --- /dev/null +++ b/content/en/41/kafka-connect/_index.md @@ -0,0 +1,10 @@ +--- +title: Kafka Connect +description: +weight: 8 +tags: ['kafka', 'docs', 'security'] +aliases: +keywords: +type: docs +--- + diff --git a/content/en/41/kafka-connect/administration.md b/content/en/41/kafka-connect/administration.md new file mode 100644 index 000000000..178cb6e36 --- /dev/null +++ b/content/en/41/kafka-connect/administration.md @@ -0,0 +1,60 @@ +--- +title: Administration +description: Administration +weight: 4 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Administration + +Kafka Connect's REST layer provides a set of APIs to enable administration of the cluster. This includes APIs to view the configuration of connectors and the status of their tasks, as well as to alter their current behavior (e.g. changing configuration and restarting tasks). + +When a connector is first submitted to the cluster, a rebalance is triggered between the Connect workers in order to distribute the load that consists of the tasks of the new connector. This same rebalancing procedure is also used when connectors increase or decrease the number of tasks they require, when a connector's configuration is changed, or when a worker is added or removed from the group as part of an intentional upgrade of the Connect cluster or due to a failure. + +In versions prior to 2.3.0, the Connect workers would rebalance the full set of connectors and their tasks in the cluster as a simple way to make sure that each worker has approximately the same amount of work. This behavior can be still enabled by setting `connect.protocol=eager`. + +Starting with 2.3.0, Kafka Connect is using by default a protocol that performs [incremental cooperative rebalancing](https://cwiki.apache.org/confluence/x/Y4MCBg) that incrementally balances the connectors and tasks across the Connect workers, affecting only tasks that are new, to be removed, or need to move from one worker to another. Other tasks are not stopped and restarted during the rebalance, as they would have been with the old protocol. + +If a Connect worker leaves the group, intentionally or due to a failure, Connect waits for `scheduled.rebalance.max.delay.ms` before triggering a rebalance. This delay defaults to five minutes (`300000ms`) to tolerate failures or upgrades of workers without immediately redistributing the load of a departing worker. If this worker returns within the configured delay, it gets its previously assigned tasks in full. However, this means that the tasks will remain unassigned until the time specified by `scheduled.rebalance.max.delay.ms` elapses. If a worker does not return within that time limit, Connect will reassign those tasks among the remaining workers in the Connect cluster. + +The new Connect protocol is enabled when all the workers that form the Connect cluster are configured with `connect.protocol=compatible`, which is also the default value when this property is missing. Therefore, upgrading to the new Connect protocol happens automatically when all the workers upgrade to 2.3.0. A rolling upgrade of the Connect cluster will activate incremental cooperative rebalancing when the last worker joins on version 2.3.0. + +You can use the REST API to view the current status of a connector and its tasks, including the ID of the worker to which each was assigned. For example, the `GET /connectors/file-source/status` request shows the status of a connector named `file-source`: + + + { + "name": "file-source", + "connector": { + "state": "RUNNING", + "worker_id": "192.168.1.208:8083" + }, + "tasks": [ + { + "id": 0, + "state": "RUNNING", + "worker_id": "192.168.1.209:8083" + } + ] + } + +Connectors and their tasks publish status updates to a shared topic (configured with `status.storage.topic`) which all workers in the cluster monitor. Because the workers consume this topic asynchronously, there is typically a (short) delay before a state change is visible through the status API. The following states are possible for a connector or one of its tasks: + + * **UNASSIGNED:** The connector/task has not yet been assigned to a worker. + * **RUNNING:** The connector/task is running. + * **PAUSED:** The connector/task has been administratively paused. + * **STOPPED:** The connector has been stopped. Note that this state is not applicable to tasks because the tasks for a stopped connector are shut down and won't be visible in the status API. + * **FAILED:** The connector/task has failed (usually by raising an exception, which is reported in the status output). + * **RESTARTING:** The connector/task is either actively restarting or is expected to restart soon + + + +In most cases, connector and task states will match, though they may be different for short periods of time when changes are occurring or if tasks have failed. For example, when a connector is first started, there may be a noticeable delay before the connector and its tasks have all transitioned to the RUNNING state. States will also diverge when tasks fail since Connect does not automatically restart failed tasks. To restart a connector/task manually, you can use the restart APIs listed above. Note that if you try to restart a task while a rebalance is taking place, Connect will return a 409 (Conflict) status code. You can retry after the rebalance completes, but it might not be necessary since rebalances effectively restart all the connectors and tasks in the cluster. + +Starting with 2.5.0, Kafka Connect uses the `status.storage.topic` to also store information related to the topics that each connector is using. Connect Workers use these per-connector topic status updates to respond to requests to the REST endpoint `GET /connectors/{name}/topics` by returning the set of topic names that a connector is using. A request to the REST endpoint `PUT /connectors/{name}/topics/reset` resets the set of active topics for a connector and allows a new set to be populated, based on the connector's latest pattern of topic usage. Upon connector deletion, the set of the connector's active topics is also deleted. Topic tracking is enabled by default but can be disabled by setting `topic.tracking.enable=false`. If you want to disallow requests to reset the active topics of connectors during runtime, set the Worker property `topic.tracking.allow.reset=false`. + +It's sometimes useful to temporarily stop the message processing of a connector. For example, if the remote system is undergoing maintenance, it would be preferable for source connectors to stop polling it for new data instead of filling logs with exception spam. For this use case, Connect offers a pause/resume API. While a source connector is paused, Connect will stop polling it for additional records. While a sink connector is paused, Connect will stop pushing new messages to it. The pause state is persistent, so even if you restart the cluster, the connector will not begin message processing again until the task has been resumed. Note that there may be a delay before all of a connector's tasks have transitioned to the PAUSED state since it may take time for them to finish whatever processing they were in the middle of when being paused. Additionally, failed tasks will not transition to the PAUSED state until they have been restarted. + +In 3.5.0, Connect introduced a stop API that completely shuts down the tasks for a connector and deallocates any resources claimed by them. This is different from pausing a connector where tasks are left idling and any resources claimed by them are left allocated (which allows the connector to begin processing data quickly once it is resumed). Stopping a connector is more efficient from a resource usage standpoint than pausing it, but can cause it to take longer to begin processing data once resumed. Note that the offsets for a connector can be only modified via the offsets management endpoints if it is in the stopped state. diff --git a/content/en/41/kafka-connect/connector-development-guide.md b/content/en/41/kafka-connect/connector-development-guide.md new file mode 100644 index 000000000..55fd15664 --- /dev/null +++ b/content/en/41/kafka-connect/connector-development-guide.md @@ -0,0 +1,396 @@ +--- +title: Connector Development Guide +description: Connector Development Guide +weight: 3 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Connector Development Guide + +This guide describes how developers can write new connectors for Kafka Connect to move data between Kafka and other systems. It briefly reviews a few key concepts and then describes how to create a simple connector. + +## Core Concepts and APIs + +### Connectors and Tasks + +To copy data between Kafka and another system, users create a `Connector` for the system they want to pull data from or push data to. Connectors come in two flavors: `SourceConnectors` import data from another system (e.g. `JDBCSourceConnector` would import a relational database into Kafka) and `SinkConnectors` export data (e.g. `HDFSSinkConnector` would export the contents of a Kafka topic to an HDFS file). + +`Connectors` do not perform any data copying themselves: their configuration describes the data to be copied, and the `Connector` is responsible for breaking that job into a set of `Tasks` that can be distributed to workers. These `Tasks` also come in two corresponding flavors: `SourceTask` and `SinkTask`. + +With an assignment in hand, each `Task` must copy its subset of the data to or from Kafka. In Kafka Connect, it should always be possible to frame these assignments as a set of input and output streams consisting of records with consistent schemas. Sometimes this mapping is obvious: each file in a set of log files can be considered a stream with each parsed line forming a record using the same schema and offsets stored as byte offsets in the file. In other cases it may require more effort to map to this model: a JDBC connector can map each table to a stream, but the offset is less clear. One possible mapping uses a timestamp column to generate queries incrementally returning new data, and the last queried timestamp can be used as the offset. + +### Streams and Records + +Each stream should be a sequence of key-value records. Both the keys and values can have complex structure -- many primitive types are provided, but arrays, objects, and nested data structures can be represented as well. The runtime data format does not assume any particular serialization format; this conversion is handled internally by the framework. + +In addition to the key and value, records (both those generated by sources and those delivered to sinks) have associated stream IDs and offsets. These are used by the framework to periodically commit the offsets of data that have been processed so that in the event of failures, processing can resume from the last committed offsets, avoiding unnecessary reprocessing and duplication of events. + +### Dynamic Connectors + +Not all jobs are static, so `Connector` implementations are also responsible for monitoring the external system for any changes that might require reconfiguration. For example, in the `JDBCSourceConnector` example, the `Connector` might assign a set of tables to each `Task`. When a new table is created, it must discover this so it can assign the new table to one of the `Tasks` by updating its configuration. When it notices a change that requires reconfiguration (or a change in the number of `Tasks`), it notifies the framework and the framework updates any corresponding `Tasks`. + +## Developing a Simple Connector + +Developing a connector only requires implementing two interfaces, the `Connector` and `Task`. A simple example is included with the source code for Kafka in the `file` package. This connector is meant for use in standalone mode and has implementations of a `SourceConnector`/`SourceTask` to read each line of a file and emit it as a record and a `SinkConnector`/`SinkTask` that writes each record to a file. + +The rest of this section will walk through some code to demonstrate the key steps in creating a connector, but developers should also refer to the full example source code as many details are omitted for brevity. + +### Connector Example + +We'll cover the `SourceConnector` as a simple example. `SinkConnector` implementations are very similar. Pick a package and class name, these examples will use the `FileStreamSourceConnector` but substitute your own class name where appropriate. In order to make the plugin discoverable at runtime, add a ServiceLoader manifest to your resources in `META-INF/services/org.apache.kafka.connect.source.SourceConnector` with your fully-qualified class name on a single line: + + + com.example.FileStreamSourceConnector + +Create a class that inherits from `SourceConnector` and add a field that will store the configuration information to be propagated to the task(s) (the topic to send data to, and optionally - the filename to read from and the maximum batch size): + + + package com.example; + + public class FileStreamSourceConnector extends SourceConnector { + private Map props; + +The easiest method to fill in is `taskClass()`, which defines the class that should be instantiated in worker processes to actually read the data: + + + @Override + public Class taskClass() { + return FileStreamSourceTask.class; + } + +We will define the `FileStreamSourceTask` class below. Next, we add some standard lifecycle methods, `start()` and `stop()`: + + + @Override + public void start(Map props) { + // Initialization logic and setting up of resources can take place in this method. + // This connector doesn't need to do any of that, but we do log a helpful message to the user. + + this.props = props; + AbstractConfig config = new AbstractConfig(CONFIG_DEF, props); + String filename = config.getString(FILE_CONFIG); + filename = (filename == null || filename.isEmpty()) ? "standard input" : config.getString(FILE_CONFIG); + log.info("Starting file source connector reading from {}", filename); + } + + @Override + public void stop() { + // Nothing to do since no background monitoring is required. + } + +Finally, the real core of the implementation is in `taskConfigs()`. In this case we are only handling a single file, so even though we may be permitted to generate more tasks as per the `maxTasks` argument, we return a list with only one entry: + + + @Override + public List> taskConfigs(int maxTasks) { + // Note that the task configs could contain configs additional to or different from the connector configs if needed. For instance, + // if different tasks have different responsibilities, or if different tasks are meant to process different subsets of the source data stream). + ArrayList> configs = new ArrayList<>(); + // Only one input stream makes sense. + configs.add(props); + return configs; + } + +Even with multiple tasks, this method implementation is usually pretty simple. It just has to determine the number of input tasks, which may require contacting the remote service it is pulling data from, and then divvy them up. Because some patterns for splitting work among tasks are so common, some utilities are provided in `ConnectorUtils` to simplify these cases. + +Note that this simple example does not include dynamic input. See the discussion in the next section for how to trigger updates to task configs. + +### Task Example - Source Task + +Next we'll describe the implementation of the corresponding `SourceTask`. The implementation is short, but too long to cover completely in this guide. We'll use pseudo-code to describe most of the implementation, but you can refer to the source code for the full example. + +Just as with the connector, we need to create a class inheriting from the appropriate base `Task` class. It also has some standard lifecycle methods: + + + public class FileStreamSourceTask extends SourceTask { + private String filename; + private InputStream stream; + private String topic; + private int batchSize; + + @Override + public void start(Map props) { + filename = props.get(FileStreamSourceConnector.FILE_CONFIG); + stream = openOrThrowError(filename); + topic = props.get(FileStreamSourceConnector.TOPIC_CONFIG); + batchSize = props.get(FileStreamSourceConnector.TASK_BATCH_SIZE_CONFIG); + } + + @Override + public synchronized void stop() { + stream.close(); + } + } + +These are slightly simplified versions, but show that these methods should be relatively simple and the only work they should perform is allocating or freeing resources. There are two points to note about this implementation. First, the `start()` method does not yet handle resuming from a previous offset, which will be addressed in a later section. Second, the `stop()` method is synchronized. This will be necessary because `SourceTasks` are given a dedicated thread which they can block indefinitely, so they need to be stopped with a call from a different thread in the Worker. + +Next, we implement the main functionality of the task, the `poll()` method which gets events from the input system and returns a `List`: + + + @Override + public List poll() throws InterruptedException { + try { + ArrayList records = new ArrayList<>(); + while (streamValid(stream) && records.isEmpty()) { + LineAndOffset line = readToNextLine(stream); + if (line != null) { + Map sourcePartition = Collections.singletonMap("filename", filename); + Map sourceOffset = Collections.singletonMap("position", streamOffset); + records.add(new SourceRecord(sourcePartition, sourceOffset, topic, Schema.STRING_SCHEMA, line)); + if (records.size() >= batchSize) { + return records; + } + } else { + Thread.sleep(1); + } + } + return records; + } catch (IOException e) { + // Underlying stream was killed, probably as a result of calling stop. Allow to return + // null, and driving thread will handle any shutdown if necessary. + } + return null; + } + +Again, we've omitted some details, but we can see the important steps: the `poll()` method is going to be called repeatedly, and for each call it will loop trying to read records from the file. For each line it reads, it also tracks the file offset. It uses this information to create an output `SourceRecord` with four pieces of information: the source partition (there is only one, the single file being read), source offset (byte offset in the file), output topic name, and output value (the line, and we include a schema indicating this value will always be a string). Other variants of the `SourceRecord` constructor can also include a specific output partition, a key, and headers. + +Note that this implementation uses the normal Java `InputStream` interface and may sleep if data is not available. This is acceptable because Kafka Connect provides each task with a dedicated thread. While task implementations have to conform to the basic `poll()` interface, they have a lot of flexibility in how they are implemented. In this case, an NIO-based implementation would be more efficient, but this simple approach works, is quick to implement, and is compatible with older versions of Java. + +Although not used in the example, `SourceTask` also provides two APIs to commit offsets in the source system: `commit` and `commitRecord`. The APIs are provided for source systems which have an acknowledgement mechanism for messages. Overriding these methods allows the source connector to acknowledge messages in the source system, either in bulk or individually, once they have been written to Kafka. The `commit` API stores the offsets in the source system, up to the offsets that have been returned by `poll`. The implementation of this API should block until the commit is complete. The `commitRecord` API saves the offset in the source system for each `SourceRecord` after it is written to Kafka. As Kafka Connect will record offsets automatically, `SourceTask`s are not required to implement them. In cases where a connector does need to acknowledge messages in the source system, only one of the APIs is typically required. + +### Sink Tasks + +The previous section described how to implement a simple `SourceTask`. Unlike `SourceConnector` and `SinkConnector`, `SourceTask` and `SinkTask` have very different interfaces because `SourceTask` uses a pull interface and `SinkTask` uses a push interface. Both share the common lifecycle methods, but the `SinkTask` interface is quite different: + + + public abstract class SinkTask implements Task { + public void initialize(SinkTaskContext context) { + this.context = context; + } + + public abstract void put(Collection records); + + public void flush(Map currentOffsets) { + } + } + +The `SinkTask` documentation contains full details, but this interface is nearly as simple as the `SourceTask`. The `put()` method should contain most of the implementation, accepting sets of `SinkRecords`, performing any required translation, and storing them in the destination system. This method does not need to ensure the data has been fully written to the destination system before returning. In fact, in many cases internal buffering will be useful so an entire batch of records can be sent at once, reducing the overhead of inserting events into the downstream data store. The `SinkRecords` contain essentially the same information as `SourceRecords`: Kafka topic, partition, offset, the event key and value, and optional headers. + +The `flush()` method is used during the offset commit process, which allows tasks to recover from failures and resume from a safe point such that no events will be missed. The method should push any outstanding data to the destination system and then block until the write has been acknowledged. The `offsets` parameter can often be ignored, but is useful in some cases where implementations want to store offset information in the destination store to provide exactly-once delivery. For example, an HDFS connector could do this and use atomic move operations to make sure the `flush()` operation atomically commits the data and offsets to a final location in HDFS. + +### [Errant Record Reporter](connect_errantrecordreporter) + +When error reporting is enabled for a connector, the connector can use an `ErrantRecordReporter` to report problems with individual records sent to a sink connector. The following example shows how a connector's `SinkTask` subclass might obtain and use the `ErrantRecordReporter`, safely handling a null reporter when the DLQ is not enabled or when the connector is installed in an older Connect runtime that doesn't have this reporter feature: + + + private ErrantRecordReporter reporter; + + @Override + public void start(Map props) { + ... + try { + reporter = context.errantRecordReporter(); // may be null if DLQ not enabled + } catch (NoSuchMethodException | NoClassDefFoundError e) { + // Will occur in Connect runtimes earlier than 2.6 + reporter = null; + } + } + + @Override + public void put(Collection records) { + for (SinkRecord record: records) { + try { + // attempt to process and send record to data sink + process(record); + } catch(Exception e) { + if (reporter != null) { + // Send errant record to error reporter + reporter.report(record, e); + } else { + // There's no error reporter, so fail + throw new ConnectException("Failed on record", e); + } + } + } + } + +### Resuming from Previous Offsets + +The `SourceTask` implementation included a stream ID (the input filename) and offset (position in the file) with each record. The framework uses this to commit offsets periodically so that in the case of a failure, the task can recover and minimize the number of events that are reprocessed and possibly duplicated (or to resume from the most recent offset if Kafka Connect was stopped gracefully, e.g. in standalone mode or due to a job reconfiguration). This commit process is completely automated by the framework, but only the connector knows how to seek back to the right position in the input stream to resume from that location. + +To correctly resume upon startup, the task can use the `SourceContext` passed into its `initialize()` method to access the offset data. In `initialize()`, we would add a bit more code to read the offset (if it exists) and seek to that position: + + + stream = new FileInputStream(filename); + Map offset = context.offsetStorageReader().offset(Collections.singletonMap(FILENAME_FIELD, filename)); + if (offset != null) { + Long lastRecordedOffset = (Long) offset.get("position"); + if (lastRecordedOffset != null) + seekToOffset(stream, lastRecordedOffset); + } + +Of course, you might need to read many keys for each of the input streams. The `OffsetStorageReader` interface also allows you to issue bulk reads to efficiently load all offsets, then apply them by seeking each input stream to the appropriate position. + +### Exactly-once source connectors + +#### Supporting exactly-once + +With the passing of [KIP-618](https://cwiki.apache.org/confluence/x/Vg0rCQ), Kafka Connect supports exactly-once source connectors as of version 3.3.0. In order for a source connector to take advantage of this support, it must be able to provide meaningful source offsets for each record that it emits, and resume consumption from the external system at the exact position corresponding to any of those offsets without dropping or duplicating messages. + +#### Defining transaction boundaries + +By default, the Kafka Connect framework will create and commit a new Kafka transaction for each batch of records that a source task returns from its `poll` method. However, connectors can also define their own transaction boundaries, which can be enabled by users by setting the `transaction.boundary` property to `connector` in the config for the connector. + +If enabled, the connector's tasks will have access to a `TransactionContext` from their `SourceTaskContext`, which they can use to control when transactions are aborted and committed. + +For example, to commit a transaction at least every ten records: + + + private int recordsSent; + + @Override + public void start(Map props) { + this.recordsSent = 0; + } + + @Override + public List poll() { + List records = fetchRecords(); + boolean shouldCommit = false; + for (SourceRecord record : records) { + if (++this.recordsSent >= 10) { + shouldCommit = true; + } + } + if (shouldCommit) { + this.recordsSent = 0; + this.context.transactionContext().commitTransaction(); + } + return records; + } + +Or to commit a transaction for exactly every tenth record: + + + private int recordsSent; + + @Override + public void start(Map props) { + this.recordsSent = 0; + } + + @Override + public List poll() { + List records = fetchRecords(); + for (SourceRecord record : records) { + if (++this.recordsSent % 10 == 0) { + this.context.transactionContext().commitTransaction(record); + } + } + return records; + } + +Most connectors do not need to define their own transaction boundaries. However, it may be useful if files or objects in the source system are broken up into multiple source records, but should be delivered atomically. Additionally, it may be useful if it is impossible to give each source record a unique source offset, if every record with a given offset is delivered within a single transaction. + +Note that if the user has not enabled connector-defined transaction boundaries in the connector configuration, the `TransactionContext` returned by `context.transactionContext()` will be `null`. + +#### Validation APIs + +A few additional preflight validation APIs can be implemented by source connector developers. + +Some users may require exactly-once semantics from a connector. In this case, they may set the `exactly.once.support` property to `required` in the configuration for the connector. When this happens, the Kafka Connect framework will ask the connector whether it can provide exactly-once semantics with the specified configuration. This is done by invoking the `exactlyOnceSupport` method on the connector. + +If a connector doesn't support exactly-once semantics, it should still implement this method to let users know for certain that it cannot provide exactly-once semantics: + + + @Override + public ExactlyOnceSupport exactlyOnceSupport(Map props) { + // This connector cannot provide exactly-once semantics under any conditions + return ExactlyOnceSupport.UNSUPPORTED; + } + + +Otherwise, a connector should examine the configuration, and return `ExactlyOnceSupport.SUPPORTED` if it can provide exactly-once semantics: + + + @Override + public ExactlyOnceSupport exactlyOnceSupport(Map props) { + // This connector can always provide exactly-once semantics + return ExactlyOnceSupport.SUPPORTED; + } + +Additionally, if the user has configured the connector to define its own transaction boundaries, the Kafka Connect framework will ask the connector whether it can define its own transaction boundaries with the specified configuration, using the `canDefineTransactionBoundaries` method: + + + @Override + public ConnectorTransactionBoundaries canDefineTransactionBoundaries(Map props) { + // This connector can always define its own transaction boundaries + return ConnectorTransactionBoundaries.SUPPORTED; + } + +This method should only be implemented for connectors that can define their own transaction boundaries in some cases. If a connector is never able to define its own transaction boundaries, it does not need to implement this method. + +## Dynamic Input/Output Streams + +Kafka Connect is intended to define bulk data copying jobs, such as copying an entire database rather than creating many jobs to copy each table individually. One consequence of this design is that the set of input or output streams for a connector can vary over time. + +Source connectors need to monitor the source system for changes, e.g. table additions/deletions in a database. When they pick up changes, they should notify the framework via the `ConnectorContext` object that reconfiguration is necessary. For example, in a `SourceConnector`: + + + if (inputsChanged()) + this.context.requestTaskReconfiguration(); + +The framework will promptly request new configuration information and update the tasks, allowing them to gracefully commit their progress before reconfiguring them. Note that in the `SourceConnector` this monitoring is currently left up to the connector implementation. If an extra thread is required to perform this monitoring, the connector must allocate it itself. + +Ideally this code for monitoring changes would be isolated to the `Connector` and tasks would not need to worry about them. However, changes can also affect tasks, most commonly when one of their input streams is destroyed in the input system, e.g. if a table is dropped from a database. If the `Task` encounters the issue before the `Connector`, which will be common if the `Connector` needs to poll for changes, the `Task` will need to handle the subsequent error. Thankfully, this can usually be handled simply by catching and handling the appropriate exception. + +`SinkConnectors` usually only have to handle the addition of streams, which may translate to new entries in their outputs (e.g., a new database table). The framework manages any changes to the Kafka input, such as when the set of input topics changes because of a regex subscription. `SinkTasks` should expect new input streams, which may require creating new resources in the downstream system, such as a new table in a database. The trickiest situation to handle in these cases may be conflicts between multiple `SinkTasks` seeing a new input stream for the first time and simultaneously trying to create the new resource. `SinkConnectors`, on the other hand, will generally require no special code for handling a dynamic set of streams. + +## Configuration Validation + +Kafka Connect allows you to validate connector configurations before submitting a connector to be executed and can provide feedback about errors and recommended values. To take advantage of this, connector developers need to provide an implementation of `config()` to expose the configuration definition to the framework. + +The following code in `FileStreamSourceConnector` defines the configuration and exposes it to the framework. + + + static final ConfigDef CONFIG_DEF = new ConfigDef() + .define(FILE_CONFIG, Type.STRING, null, Importance.HIGH, "Source filename. If not specified, the standard input will be used") + .define(TOPIC_CONFIG, Type.STRING, ConfigDef.NO_DEFAULT_VALUE, new ConfigDef.NonEmptyString(), Importance.HIGH, "The topic to publish data to") + .define(TASK_BATCH_SIZE_CONFIG, Type.INT, DEFAULT_TASK_BATCH_SIZE, Importance.LOW, + "The maximum number of records the source task can read from the file each time it is polled"); + + public ConfigDef config() { + return CONFIG_DEF; + } + +`ConfigDef` class is used for specifying the set of expected configurations. For each configuration, you can specify the name, the type, the default value, the documentation, the group information, the order in the group, the width of the configuration value and the name suitable for display in the UI. Plus, you can provide special validation logic used for single configuration validation by overriding the `Validator` class. Moreover, as there may be dependencies between configurations, for example, the valid values and visibility of a configuration may change according to the values of other configurations. To handle this, `ConfigDef` allows you to specify the dependents of a configuration and to provide an implementation of `Recommender` to get valid values and set visibility of a configuration given the current configuration values. + +Also, the `validate()` method in `Connector` provides a default validation implementation which returns a list of allowed configurations together with configuration errors and recommended values for each configuration. However, it does not use the recommended values for configuration validation. You may provide an override of the default implementation for customized configuration validation, which may use the recommended values. + +## Working with Schemas + +The FileStream connectors are good examples because they are simple, but they also have trivially structured data -- each line is just a string. Almost all practical connectors will need schemas with more complex data formats. + +To create more complex data, you'll need to work with the Kafka Connect `data` API. Most structured records will need to interact with two classes in addition to primitive types: `Schema` and `Struct`. + +The API documentation provides a complete reference, but here is a simple example creating a `Schema` and `Struct`: + + + Schema schema = SchemaBuilder.struct().name(NAME) + .field("name", Schema.STRING_SCHEMA) + .field("age", Schema.INT_SCHEMA) + .field("admin", SchemaBuilder.bool().defaultValue(false).build()) + .build(); + + Struct struct = new Struct(schema) + .put("name", "Barbara Liskov") + .put("age", 75); + +If you are implementing a source connector, you'll need to decide when and how to create schemas. Where possible, you should avoid recomputing them as much as possible. For example, if your connector is guaranteed to have a fixed schema, create it statically and reuse a single instance. + +However, many connectors will have dynamic schemas. One simple example of this is a database connector. Considering even just a single table, the schema will not be predefined for the entire connector (as it varies from table to table). But it also may not be fixed for a single table over the lifetime of the connector since the user may execute an `ALTER TABLE` command. The connector must be able to detect these changes and react appropriately. + +Sink connectors are usually simpler because they are consuming data and therefore do not need to create schemas. However, they should take just as much care to validate that the schemas they receive have the expected format. When the schema does not match -- usually indicating the upstream producer is generating invalid data that cannot be correctly translated to the destination system -- sink connectors should throw an exception to indicate this error to the system. diff --git a/content/en/41/kafka-connect/overview.md b/content/en/41/kafka-connect/overview.md new file mode 100644 index 000000000..a9e5ae66d --- /dev/null +++ b/content/en/41/kafka-connect/overview.md @@ -0,0 +1,24 @@ +--- +title: Overview +description: Overview +weight: 1 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Overview + +Kafka Connect is a tool for scalably and reliably streaming data between Apache Kafka and other systems. It makes it simple to quickly define _connectors_ that move large collections of data into and out of Kafka. Kafka Connect can ingest entire databases or collect metrics from all your application servers into Kafka topics, making the data available for stream processing with low latency. An export job can deliver data from Kafka topics into secondary storage and query systems or into batch systems for offline analysis. + +Kafka Connect features include: + + * **A common framework for Kafka connectors** \- Kafka Connect standardizes integration of other data systems with Kafka, simplifying connector development, deployment, and management + * **Distributed and standalone modes** \- scale up to a large, centrally managed service supporting an entire organization or scale down to development, testing, and small production deployments + * **REST interface** \- submit and manage connectors to your Kafka Connect cluster via an easy to use REST API + * **Automatic offset management** \- with just a little information from connectors, Kafka Connect can manage the offset commit process automatically so connector developers do not need to worry about this error prone part of connector development + * **Distributed and scalable by default** \- Kafka Connect builds on the existing group management protocol. More workers can be added to scale up a Kafka Connect cluster. + * **Streaming/batch integration** \- leveraging Kafka's existing capabilities, Kafka Connect is an ideal solution for bridging streaming and batch data systems + + diff --git a/content/en/41/kafka-connect/user-guide.md b/content/en/41/kafka-connect/user-guide.md new file mode 100644 index 000000000..30e82e398 --- /dev/null +++ b/content/en/41/kafka-connect/user-guide.md @@ -0,0 +1,503 @@ +--- +title: User Guide +description: User Guide +weight: 2 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# User Guide + +The [quickstart](../quickstart) provides a brief example of how to run a standalone version of Kafka Connect. This section describes how to configure, run, and manage Kafka Connect in more detail. + +## Running Kafka Connect + +Kafka Connect currently supports two modes of execution: standalone (single process) and distributed. + +In standalone mode all work is performed in a single process. This configuration is simpler to setup and get started with and may be useful in situations where only one worker makes sense (e.g. collecting log files), but it does not benefit from some of the features of Kafka Connect such as fault tolerance. You can start a standalone process with the following command: + + + $ bin/connect-standalone.sh config/connect-standalone.properties [connector1.properties connector2.json …] + +The first parameter is the configuration for the worker. This includes settings such as the Kafka connection parameters, serialization format, and how frequently to commit offsets. The provided example should work well with a local cluster running with the default configuration provided by `config/server.properties`. It will require tweaking to use with a different configuration or production deployment. All workers (both standalone and distributed) require a few configs: + + * `bootstrap.servers` \- List of Kafka servers used to bootstrap connections to Kafka + * `key.converter` \- Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the keys in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro. + * `value.converter` \- Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the values in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro. + * `plugin.path` (default `empty`) - a list of paths that contain Connect plugins (connectors, converters, transformations). Before running quick starts, users must add the absolute path that contains the example FileStreamSourceConnector and FileStreamSinkConnector packaged in `connect-file-"version".jar`, because these connectors are not included by default to the `CLASSPATH` or the `plugin.path` of the Connect worker (see plugin.path property for examples). + + + +The important configuration options specific to standalone mode are: + + * `offset.storage.file.filename` \- File to store source connector offsets + + + +The parameters that are configured here are intended for producers and consumers used by Kafka Connect to access the configuration, offset and status topics. For configuration of the producers used by Kafka source tasks and the consumers used by Kafka sink tasks, the same parameters can be used but need to be prefixed with `producer.` and `consumer.` respectively. The only Kafka client parameter that is inherited without a prefix from the worker configuration is `bootstrap.servers`, which in most cases will be sufficient, since the same cluster is often used for all purposes. A notable exception is a secured cluster, which requires extra parameters to allow connections. These parameters will need to be set up to three times in the worker configuration, once for management access, once for Kafka sources and once for Kafka sinks. + +Starting with 2.3.0, client configuration overrides can be configured individually per connector by using the prefixes `producer.override.` and `consumer.override.` for Kafka sources or Kafka sinks respectively. These overrides are included with the rest of the connector's configuration properties. + +The remaining parameters are connector configuration files. Each file may either be a Java Properties file or a JSON file containing an object with the same structure as the request body of either the `POST /connectors` endpoint or the `PUT /connectors/{name}/config` endpoint (see the [OpenAPI documentation](/41/generated/connect_rest.yaml)). You may include as many as you want, but all will execute within the same process (on different threads). You can also choose not to specify any connector configuration files on the command line, and instead use the REST API to create connectors at runtime after your standalone worker starts. + +Distributed mode handles automatic balancing of work, allows you to scale up (or down) dynamically, and offers fault tolerance both in the active tasks and for configuration and offset commit data. Execution is very similar to standalone mode: + + + $ bin/connect-distributed.sh config/connect-distributed.properties + +The difference is in the class which is started and the configuration parameters which change how the Kafka Connect process decides where to store configurations, how to assign work, and where to store offsets and task statues. In the distributed mode, Kafka Connect stores the offsets, configs and task statuses in Kafka topics. It is recommended to manually create the topics for offset, configs and statuses in order to achieve the desired the number of partitions and replication factors. If the topics are not yet created when starting Kafka Connect, the topics will be auto created with default number of partitions and replication factor, which may not be best suited for its usage. + +In particular, the following configuration parameters, in addition to the common settings mentioned above, are critical to set before starting your cluster: + + * `group.id` (default `connect-cluster`) - unique name for the cluster, used in forming the Connect cluster group; note that this **must not conflict** with consumer group IDs + * `config.storage.topic` (default `connect-configs`) - topic to use for storing connector and task configurations; note that this should be a single partition, highly replicated, compacted topic. You may need to manually create the topic to ensure the correct configuration as auto created topics may have multiple partitions or be automatically configured for deletion rather than compaction + * `offset.storage.topic` (default `connect-offsets`) - topic to use for storing offsets; this topic should have many partitions, be replicated, and be configured for compaction + * `status.storage.topic` (default `connect-status`) - topic to use for storing statuses; this topic can have multiple partitions, and should be replicated and configured for compaction + + + +Note that in distributed mode the connector configurations are not passed on the command line. Instead, use the REST API described below to create, modify, and destroy connectors. + +## Configuring Connectors + +Connector configurations are simple key-value mappings. In both standalone and distributed mode, they are included in the JSON payload for the REST request that creates (or modifies) the connector. In standalone mode these can also be defined in a properties file and passed to the Connect process on the command line. + +Most configurations are connector dependent, so they can't be outlined here. However, there are a few common options: + + * `name` \- Unique name for the connector. Attempting to register again with the same name will fail. + * `connector.class` \- The Java class for the connector + * `tasks.max` \- The maximum number of tasks that should be created for this connector. The connector may create fewer tasks if it cannot achieve this level of parallelism. + * `key.converter` \- (optional) Override the default key converter set by the worker. + * `value.converter` \- (optional) Override the default value converter set by the worker. + + + +The `connector.class` config supports several formats: the full name or alias of the class for this connector. If the connector is org.apache.kafka.connect.file.FileStreamSinkConnector, you can either specify this full name or use FileStreamSink or FileStreamSinkConnector to make the configuration a bit shorter. + +Sink connectors also have a few additional options to control their input. Each sink connector must set one of the following: + + * `topics` \- A comma-separated list of topics to use as input for this connector + * `topics.regex` \- A Java regular expression of topics to use as input for this connector + + + +For any other options, you should consult the documentation for the connector. + +## Transformations + +Connectors can be configured with transformations to make lightweight message-at-a-time modifications. They can be convenient for data massaging and event routing. + +A transformation chain can be specified in the connector configuration. + + * `transforms` \- List of aliases for the transformation, specifying the order in which the transformations will be applied. + * `transforms.$alias.type` \- Fully qualified class name for the transformation. + * `transforms.$alias.$transformationSpecificConfig` Configuration properties for the transformation + + + +For example, lets take the built-in file source connector and use a transformation to add a static field. + +Throughout the example we'll use schemaless JSON data format. To use schemaless format, we changed the following two lines in `connect-standalone.properties` from true to false: + + + key.converter.schemas.enable + value.converter.schemas.enable + +The file source connector reads each line as a String. We will wrap each line in a Map and then add a second field to identify the origin of the event. To do this, we use two transformations: + + * **HoistField** to place the input line inside a Map + * **InsertField** to add the static field. In this example we'll indicate that the record came from a file connector + + + +After adding the transformations, `connect-file-source.properties` file looks as following: + + + name=local-file-source + connector.class=FileStreamSource + tasks.max=1 + file=test.txt + topic=connect-test + transforms=MakeMap, InsertSource + transforms.MakeMap.type=org.apache.kafka.connect.transforms.HoistField$Value + transforms.MakeMap.field=line + transforms.InsertSource.type=org.apache.kafka.connect.transforms.InsertField$Value + transforms.InsertSource.static.field=data_source + transforms.InsertSource.static.value=test-file-source + +All the lines starting with `transforms` were added for the transformations. You can see the two transformations we created: "InsertSource" and "MakeMap" are aliases that we chose to give the transformations. The transformation types are based on the list of built-in transformations you can see below. Each transformation type has additional configuration: HoistField requires a configuration called "field", which is the name of the field in the map that will include the original String from the file. InsertField transformation lets us specify the field name and the value that we are adding. + +When we ran the file source connector on my sample file without the transformations, and then read them using `kafka-console-consumer.sh`, the results were: + + + "foo" + "bar" + "hello world" + +We then create a new file connector, this time after adding the transformations to the configuration file. This time, the results will be: + + + {"line":"foo","data_source":"test-file-source"} + {"line":"bar","data_source":"test-file-source"} + {"line":"hello world","data_source":"test-file-source"} + +You can see that the lines we've read are now part of a JSON map, and there is an extra field with the static value we specified. This is just one example of what you can do with transformations. + +### Included transformations + +Several widely-applicable data and routing transformations are included with Kafka Connect: + + * Cast \- Cast fields or the entire key or value to a specific type + * DropHeaders \- Remove headers by name + * ExtractField \- Extract a specific field from Struct and Map and include only this field in results + * Filter \- Removes messages from all further processing. This is used with a predicate to selectively filter certain messages + * Flatten \- Flatten a nested data structure + * HeaderFrom \- Copy or move fields in the key or value to the record headers + * HoistField \- Wrap the entire event as a single field inside a Struct or a Map + * InsertField \- Add a field using either static data or record metadata + * InsertHeader \- Add a header using static data + * MaskField \- Replace field with valid null value for the type (0, empty string, etc) or custom replacement (non-empty string or numeric value only) + * RegexRouter \- modify the topic of a record based on original topic, replacement string and a regular expression + * ReplaceField \- Filter or rename fields + * SetSchemaMetadata \- modify the schema name or version + * TimestampConverter \- Convert timestamps between different formats + * TimestampRouter \- Modify the topic of a record based on original topic and timestamp. Useful when using a sink that needs to write to different tables or indexes based on timestamps + * ValueToKey \- Replace the record key with a new key formed from a subset of fields in the record value + + + +Details on how to configure each transformation are listed below: + +{{< include-html file="/static/41/generated/connect_transforms.html" >}} + +### Predicates + +Transformations can be configured with predicates so that the transformation is applied only to messages which satisfy some condition. In particular, when combined with the **Filter** transformation predicates can be used to selectively filter out certain messages. + +Predicates are specified in the connector configuration. + + * `predicates` \- Set of aliases for the predicates to be applied to some of the transformations. + * `predicates.$alias.type` \- Fully qualified class name for the predicate. + * `predicates.$alias.$predicateSpecificConfig` \- Configuration properties for the predicate. + + + +All transformations have the implicit config properties `predicate` and `negate`. A predicular predicate is associated with a transformation by setting the transformation's `predicate` config to the predicate's alias. The predicate's value can be reversed using the `negate` configuration property. + +For example, suppose you have a source connector which produces messages to many different topics and you want to: + + * filter out the messages in the 'foo' topic entirely + * apply the ExtractField transformation with the field name 'other_field' to records in all topics _except_ the topic 'bar' + + + +To do this we need first to filter out the records destined for the topic 'foo'. The Filter transformation removes records from further processing, and can use the TopicNameMatches predicate to apply the transformation only to records in topics which match a certain regular expression. TopicNameMatches's only configuration property is `pattern` which is a Java regular expression for matching against the topic name. The configuration would look like this: + + + transforms=Filter + transforms.Filter.type=org.apache.kafka.connect.transforms.Filter + transforms.Filter.predicate=IsFoo + + predicates=IsFoo + predicates.IsFoo.type=org.apache.kafka.connect.transforms.predicates.TopicNameMatches + predicates.IsFoo.pattern=foo + +Next we need to apply ExtractField only when the topic name of the record is not 'bar'. We can't just use TopicNameMatches directly, because that would apply the transformation to matching topic names, not topic names which do _not_ match. The transformation's implicit `negate` config properties allows us to invert the set of records which a predicate matches. Adding the configuration for this to the previous example we arrive at: + + + transforms=Filter,Extract + transforms.Filter.type=org.apache.kafka.connect.transforms.Filter + transforms.Filter.predicate=IsFoo + + transforms.Extract.type=org.apache.kafka.connect.transforms.ExtractField$Key + transforms.Extract.field=other_field + transforms.Extract.predicate=IsBar + transforms.Extract.negate=true + + predicates=IsFoo,IsBar + predicates.IsFoo.type=org.apache.kafka.connect.transforms.predicates.TopicNameMatches + predicates.IsFoo.pattern=foo + + predicates.IsBar.type=org.apache.kafka.connect.transforms.predicates.TopicNameMatches + predicates.IsBar.pattern=bar + +Kafka Connect includes the following predicates: + + * `TopicNameMatches` \- matches records in a topic with a name matching a particular Java regular expression. + * `HasHeaderKey` \- matches records which have a header with the given key. + * `RecordIsTombstone` \- matches tombstone records, that is records with a null value. + + + +Details on how to configure each predicate are listed below: + +{{< include-html file="/static/41/generated/connect_predicates.html" >}} + +## REST API + +Since Kafka Connect is intended to be run as a service, it also provides a REST API for managing connectors. This REST API is available in both standalone and distributed mode. The REST API server can be configured using the `listeners` configuration option. This field should contain a list of listeners in the following format: `protocol://host:port,protocol2://host2:port2`. Currently supported protocols are `http` and `https`. For example: + + + listeners=http://localhost:8080,https://localhost:8443 + +By default, if no `listeners` are specified, the REST server runs on port 8083 using the HTTP protocol. When using HTTPS, the configuration has to include the SSL configuration. By default, it will use the `ssl.*` settings. In case it is needed to use different configuration for the REST API than for connecting to Kafka brokers, the fields can be prefixed with `listeners.https`. When using the prefix, only the prefixed options will be used and the `ssl.*` options without the prefix will be ignored. Following fields can be used to configure HTTPS for the REST API: + + * `ssl.keystore.location` + * `ssl.keystore.password` + * `ssl.keystore.type` + * `ssl.key.password` + * `ssl.truststore.location` + * `ssl.truststore.password` + * `ssl.truststore.type` + * `ssl.enabled.protocols` + * `ssl.provider` + * `ssl.protocol` + * `ssl.cipher.suites` + * `ssl.keymanager.algorithm` + * `ssl.secure.random.implementation` + * `ssl.trustmanager.algorithm` + * `ssl.endpoint.identification.algorithm` + * `ssl.client.auth` + + + +The REST API is used not only by users to monitor / manage Kafka Connect. In distributed mode, it is also used for the Kafka Connect cross-cluster communication. Some requests received on the follower nodes REST API will be forwarded to the leader node REST API. In case the URI under which is given host reachable is different from the URI which it listens on, the configuration options `rest.advertised.host.name`, `rest.advertised.port` and `rest.advertised.listener` can be used to change the URI which will be used by the follower nodes to connect with the leader. When using both HTTP and HTTPS listeners, the `rest.advertised.listener` option can be also used to define which listener will be used for the cross-cluster communication. When using HTTPS for communication between nodes, the same `ssl.*` or `listeners.https` options will be used to configure the HTTPS client. + +The following are the currently supported REST API endpoints: + + * `GET /connectors` \- return a list of active connectors + * `POST /connectors` \- create a new connector; the request body should be a JSON object containing a string `name` field and an object `config` field with the connector configuration parameters. The JSON object may also optionally contain a string `initial_state` field which can take the following values - `STOPPED`, `PAUSED` or `RUNNING` (the default value) + * `GET /connectors/{name}` \- get information about a specific connector + * `GET /connectors/{name}/config` \- get the configuration parameters for a specific connector + * `PUT /connectors/{name}/config` \- update the configuration parameters for a specific connector + * `PATCH /connectors/{name}/config` \- patch the configuration parameters for a specific connector, where `null` values in the JSON body indicates removing of the key from the final configuration + * `GET /connectors/{name}/status` \- get current status of the connector, including if it is running, failed, paused, etc., which worker it is assigned to, error information if it has failed, and the state of all its tasks + * `GET /connectors/{name}/tasks` \- get a list of tasks currently running for a connector along with their configurations + * `GET /connectors/{name}/tasks/{taskid}/status` \- get current status of the task, including if it is running, failed, paused, etc., which worker it is assigned to, and error information if it has failed + * `PUT /connectors/{name}/pause` \- pause the connector and its tasks, which stops message processing until the connector is resumed. Any resources claimed by its tasks are left allocated, which allows the connector to begin processing data quickly once it is resumed. + * `PUT /connectors/{name}/stop` \- stop the connector and shut down its tasks, deallocating any resources claimed by its tasks. This is more efficient from a resource usage standpoint than pausing the connector, but can cause it to take longer to begin processing data once resumed. Note that the offsets for a connector can be only modified via the offsets management endpoints if it is in the stopped state + * `PUT /connectors/{name}/resume` \- resume a paused or stopped connector (or do nothing if the connector is not paused or stopped) + * `POST /connectors/{name}/restart?includeTasks=&onlyFailed=` \- restart a connector and its tasks instances. + * the "includeTasks" parameter specifies whether to restart the connector instance and task instances ("includeTasks=true") or just the connector instance ("includeTasks=false"), with the default ("false") preserving the same behavior as earlier versions. + * the "onlyFailed" parameter specifies whether to restart just the instances with a FAILED status ("onlyFailed=true") or all instances ("onlyFailed=false"), with the default ("false") preserving the same behavior as earlier versions. + * `POST /connectors/{name}/tasks/{taskId}/restart` \- restart an individual task (typically because it has failed) + * `DELETE /connectors/{name}` \- delete a connector, halting all tasks and deleting its configuration + * `GET /connectors/{name}/topics` \- get the set of topics that a specific connector is using since the connector was created or since a request to reset its set of active topics was issued + * `PUT /connectors/{name}/topics/reset` \- send a request to empty the set of active topics of a connector + * Offsets management endpoints (see [KIP-875](https://cwiki.apache.org/confluence/x/Io3GDQ) for more details): + * `GET /connectors/{name}/offsets` \- get the current offsets for a connector + * `DELETE /connectors/{name}/offsets` \- reset the offsets for a connector. The connector must exist and must be in the stopped state (see `PUT /connectors/{name}/stop`) + * `PATCH /connectors/{name}/offsets` \- alter the offsets for a connector. The connector must exist and must be in the stopped state (see `PUT /connectors/{name}/stop`). The request body should be a JSON object containing a JSON array `offsets` field, similar to the response body of the `GET /connectors/{name}/offsets` endpoint. An example request body for the `FileStreamSourceConnector`: + + { + "offsets": [ + { + "partition": { + "filename": "test.txt" + }, + "offset": { + "position": 30 + } + } + ] + } + +An example request body for the `FileStreamSinkConnector`: + + { + "offsets": [ + { + "partition": { + "kafka_topic": "test", + "kafka_partition": 0 + }, + "offset": { + "kafka_offset": 5 + } + }, + { + "partition": { + "kafka_topic": "test", + "kafka_partition": 1 + }, + "offset": null + } + ] + } + +The "offset" field may be null to reset the offset for a specific partition (applicable to both source and sink connectors). Note that the request body format depends on the connector implementation in the case of source connectors, whereas there is a common format across all sink connectors. + + + +Kafka Connect also provides a REST API for getting information about connector plugins: + + * `GET /connector-plugins`\- return a list of connector plugins installed in the Kafka Connect cluster. Note that the API only checks for connectors on the worker that handles the request, which means you may see inconsistent results, especially during a rolling upgrade if you add new connector jars + * `GET /connector-plugins/{plugin-type}/config` \- get the configuration definition for the specified plugin. + * `PUT /connector-plugins/{connector-type}/config/validate` \- validate the provided configuration values against the configuration definition. This API performs per config validation, returns suggested values and error messages during validation. + + + +The following is a supported REST request at the top-level (root) endpoint: + + * `GET /`\- return basic information about the Kafka Connect cluster such as the version of the Connect worker that serves the REST request (including git commit ID of the source code) and the Kafka cluster ID that is connected to. + + +The `admin.listeners` configuration can be used to configure admin REST APIs on Kafka Connect's REST API server. Similar to the `listeners` configuration, this field should contain a list of listeners in the following format: `protocol://host:port,protocol2://host2:port2`. Currently supported protocols are `http` and `https`. For example: + + + admin.listeners=http://localhost:8080,https://localhost:8443 + +By default, if `admin.listeners` is not configured, the admin REST APIs will be available on the regular listeners. + +The following are the currently supported admin REST API endpoints: + + * `GET /admin/loggers` \- list the current loggers that have their levels explicitly set and their log levels + * `GET /admin/loggers/{name}` \- get the log level for the specified logger + * `PUT /admin/loggers/{name}` \- set the log level for the specified logger + + + +See [KIP-495](https://cwiki.apache.org/confluence/x/-4tTBw) for more details about the admin logger REST APIs. + +For the complete specification of the Kafka Connect REST API, see the [OpenAPI documentation](/41/generated/connect_rest.yaml) + +## Error Reporting in Connect + +Kafka Connect provides error reporting to handle errors encountered along various stages of processing. By default, any error encountered during conversion or within transformations will cause the connector to fail. Each connector configuration can also enable tolerating such errors by skipping them, optionally writing each error and the details of the failed operation and problematic record (with various levels of detail) to the Connect application log. These mechanisms also capture errors when a sink connector is processing the messages consumed from its Kafka topics, and all of the errors can be written to a configurable "dead letter queue" (DLQ) Kafka topic. + +To report errors within a connector's converter, transforms, or within the sink connector itself to the log, set `errors.log.enable=true` in the connector configuration to log details of each error and problem record's topic, partition, and offset. For additional debugging purposes, set `errors.log.include.messages=true` to also log the problem record key, value, and headers to the log (note this may log sensitive information). + +To report errors within a connector's converter, transforms, or within the sink connector itself to a dead letter queue topic, set `errors.deadletterqueue.topic.name`, and optionally `errors.deadletterqueue.context.headers.enable=true`. + +By default connectors exhibit "fail fast" behavior immediately upon an error or exception. This is equivalent to adding the following configuration properties with their defaults to a connector configuration: + + + # disable retries on failure + errors.retry.timeout=0 + + # do not log the error and their contexts + errors.log.enable=false + + # do not record errors in a dead letter queue topic + errors.deadletterqueue.topic.name= + + # Fail on first error + errors.tolerance=none + +These and other related connector configuration properties can be changed to provide different behavior. For example, the following configuration properties can be added to a connector configuration to setup error handling with multiple retries, logging to the application logs and the `my-connector-errors` Kafka topic, and tolerating all errors by reporting them rather than failing the connector task: + + + # retry for at most 10 minutes times waiting up to 30 seconds between consecutive failures + errors.retry.timeout=600000 + errors.retry.delay.max.ms=30000 + + # log error context along with application logs, but do not include configs and messages + errors.log.enable=true + errors.log.include.messages=false + + # produce error context into the Kafka topic + errors.deadletterqueue.topic.name=my-connector-errors + + # Tolerate all errors. + errors.tolerance=all + +## Exactly-once support + +Kafka Connect is capable of providing exactly-once semantics for sink connectors (as of version 0.11.0) and source connectors (as of version 3.3.0). Please note that **support for exactly-once semantics is highly dependent on the type of connector you run.** Even if you set all the correct worker properties in the configuration for each node in a cluster, if a connector is not designed to, or cannot take advantage of the capabilities of the Kafka Connect framework, exactly-once may not be possible. + +### Sink connectors + +If a sink connector supports exactly-once semantics, to enable exactly-once at the Connect worker level, you must ensure its consumer group is configured to ignore records in aborted transactions. You can do this by setting the worker property `consumer.isolation.level` to `read_committed` or, if running a version of Kafka Connect that supports it, using a connector client config override policy that allows the `consumer.override.isolation.level` property to be set to `read_committed` in individual connector configs. There are no additional ACL requirements. + +### Source connectors + +If a source connector supports exactly-once semantics, you must configure your Connect cluster to enable framework-level support for exactly-once source connectors. Additional ACLs may be necessary if running against a secured Kafka cluster. Note that exactly-once support for source connectors is currently only available in distributed mode; standalone Connect workers cannot provide exactly-once semantics. + +#### Worker configuration + +For new Connect clusters, set the `exactly.once.source.support` property to `enabled` in the worker config for each node in the cluster. For existing clusters, two rolling upgrades are necessary. During the first upgrade, the `exactly.once.source.support` property should be set to `preparing`, and during the second, it should be set to `enabled`. + +#### ACL requirements + +With exactly-once source support enabled, or with `exactly.once.source.support` set to `preparing`, the principal for each Connect worker will require the following ACLs: + +Operation | Resource Type | Resource Name | Note +---|---|---|--- +Write | TransactionalId | `connect-cluster-${groupId}`, where `${groupId}` is the `group.id` of the cluster | +Describe | TransactionalId | `connect-cluster-${groupId}`, where `${groupId}` is the `group.id` of the cluster | +IdempotentWrite | Cluster | ID of the Kafka cluster that hosts the worker's config topic | The IdempotentWrite ACL has been deprecated as of 2.8 and will only be necessary for Connect clusters running on pre-2.8 Kafka clusters + +And with exactly-once source enabled (but not if `exactly.once.source.support` is set to `preparing`), the principal for each individual connector will require the following ACLs: + +Operation | Resource Type | Resource Name | Note +---|---|---|--- +Write | TransactionalId | `${groupId}-${connector}-${taskId}`, for each task that the connector will create, where `${groupId}` is the `group.id` of the Connect cluster, `${connector}` is the name of the connector, and `${taskId}` is the ID of the task (starting from zero) | A wildcard prefix of `${groupId}-${connector}*` can be used for convenience if there is no risk of conflict with other transactional IDs or if conflicts are acceptable to the user. +Describe | TransactionalId | `${groupId}-${connector}-${taskId}`, for each task that the connector will create, where `${groupId}` is the `group.id` of the Connect cluster, `${connector}` is the name of the connector, and `${taskId}` is the ID of the task (starting from zero) | A wildcard prefix of `${groupId}-${connector}*` can be used for convenience if there is no risk of conflict with other transactional IDs or if conflicts are acceptable to the user. +Write | Topic | Offsets topic used by the connector, which is either the value of the `offsets.storage.topic` property in the connector’s configuration if provided, or the value of the `offsets.storage.topic` property in the worker’s configuration if not. | +Read | Topic | Offsets topic used by the connector, which is either the value of the `offsets.storage.topic` property in the connector’s configuration if provided, or the value of the `offsets.storage.topic` property in the worker’s configuration if not. | +Describe | Topic | Offsets topic used by the connector, which is either the value of the `offsets.storage.topic` property in the connector’s configuration if provided, or the value of the `offsets.storage.topic` property in the worker’s configuration if not. | +Create | Topic | Offsets topic used by the connector, which is either the value of the `offsets.storage.topic` property in the connector’s configuration if provided, or the value of the `offsets.storage.topic` property in the worker’s configuration if not. | Only necessary if the offsets topic for the connector does not exist yet +IdempotentWrite | Cluster | ID of the Kafka cluster that the source connector writes to | The IdempotentWrite ACL has been deprecated as of 2.8 and will only be necessary for Connect clusters running on pre-2.8 Kafka clusters + +## Plugin Discovery + +Plugin discovery is the name for the strategy which the Connect worker uses to find plugin classes and make them accessible to configure and run in connectors. This is controlled by the plugin.discovery worker configuration, and has a significant impact on worker startup time. `service_load` is the fastest strategy, but care should be taken to verify that plugins are compatible before setting this configuration to `service_load`. + +Prior to version 3.6, this strategy was not configurable, and behaved like the `only_scan` mode which is compatible with all plugins. For version 3.6 and later, this mode defaults to `hybrid_warn` which is also compatible with all plugins, but logs a warning for plugins which are incompatible with `service_load`. The `hybrid_fail` strategy stops the worker with an error if a plugin incompatible with `service_load` is detected, asserting that all plugins are compatible. Finally, the `service_load` strategy disables the slow legacy scanning mechanism used in all other modes, and instead uses the faster `ServiceLoader` mechanism. Plugins which are incompatible with that mechanism may be unusable. + +### Verifying Plugin Compatibility + +To verify if all of your plugins are compatible with `service_load`, first ensure that you are using version 3.6 or later of Kafka Connect. You can then perform one of the following checks: + + * Start your worker with the default `hybrid_warn`strategy, and WARN logs enabled for the `org.apache.kafka.connect` package. At least one WARN log message mentioning the `plugin.discovery` configuration should be printed. This log message will explicitly say that all plugins are compatible, or list the incompatible plugins. + * Start your worker in a test environment with `hybrid_fail`. If all plugins are compatible, startup will succeed. If at least one plugin is not compatible the worker will fail to start up, and all incompatible plugins will be listed in the exception. + + + +If the verification step succeeds, then your current set of installed plugins is compatible, and it should be safe to change the `plugin.discovery` configuration to `service_load`. If the verification fails, you cannot use `service_load` strategy and should take note of the list of incompatible plugins. All plugins must be addressed before using the `service_load` strategy. It is recommended to perform this verification after installing or changing plugin versions, and the verification can be done automatically in a Continuous Integration environment. + +### Operators: Artifact Migration + +As an operator of Connect, if you discover incompatible plugins, there are multiple ways to resolve the incompatibility. They are listed below from most to least preferable. + + 1. Check the latest release from your plugin provider, and if it is compatible, upgrade. + 2. Contact your plugin provider and request that they migrate the plugin to be compatible, following the source migration instructions, and then upgrade to the compatible version. + 3. Migrate the plugin artifacts yourself using the included migration script. + + + +The migration script is located in `bin/connect-plugin-path.sh` and `bin\windows\connect-plugin-path.bat` of your Kafka installation. The script can migrate incompatible plugin artifacts already installed on your Connect worker's `plugin.path` by adding or modifying JAR or resource files. This is not suitable for environments using code-signing, as this can change artifacts such that they will fail signature verification. View the built-in help with `--help`. + +To perform a migration, first use the `list` subcommand to get an overview of the plugins available to the script. You must tell the script where to find plugins, which can be done with the repeatable `--worker-config`, `--plugin-path`, and `--plugin-location` arguments. The script will ignore plugins on the classpath, so any custom plugins on your classpath should be moved to the plugin path in order to be used with this migration script, or migrated manually. Be sure to compare the output of `list` with the worker startup warning or error message to ensure that all of your affected plugins are found by the script. + +Once you see that all incompatible plugins are included in the listing, you can proceed to dry-run the migration with `sync-manifests --dry-run`. This will perform all parts of the migration, except for writing the results of the migration to disk. Note that the `sync-manifests` command requires all specified paths to be writable, and may alter the contents of the directories. Make a backup of your plugins in the specified paths, or copy them to a writable directory. + +Ensure that you have a backup of your plugins and the dry-run succeeds before removing the `--dry-run` flag and actually running the migration. If the migration fails without the `--dry-run` flag, then the partially migrated artifacts should be discarded. The migration is idempotent, so running it multiple times and on already-migrated plugins is safe. After the script finishes, you should verify the migration is complete. The migration script is suitable for use in a Continuous Integration environment for automatic migration. + +### Developers: Source Migration + +To make plugins compatible with `service_load`, it is necessary to add [ServiceLoader](https://docs.oracle.com/javase/8/docs/api/java/util/ServiceLoader.html) manifests to your source code, which should then be packaged in the release artifact. Manifests are resource files in `META-INF/services/` named after their superclass type, and contain a list of fully-qualified subclass names, one on each line. + +In order for a plugin to be compatible, it must appear as a line in a manifest corresponding to the plugin superclass it extends. If a single plugin implements multiple plugin interfaces, then it should appear in a manifest for each interface it implements. If you have no classes for a certain type of plugin, you do not need to include a manifest file for that type. If you have classes which should not be visible as plugins, they should be marked abstract. The following types are expected to have manifests: + + * `org.apache.kafka.connect.sink.SinkConnector` + * `org.apache.kafka.connect.source.SourceConnector` + * `org.apache.kafka.connect.storage.Converter` + * `org.apache.kafka.connect.storage.HeaderConverter` + * `org.apache.kafka.connect.transforms.Transformation` + * `org.apache.kafka.connect.transforms.predicates.Predicate` + * `org.apache.kafka.common.config.provider.ConfigProvider` + * `org.apache.kafka.connect.rest.ConnectRestExtension` + * `org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy` + + + +For example, if you only have one connector with the fully-qualified name `com.example.MySinkConnector`, then only one manifest file must be added to resources in `META-INF/services/org.apache.kafka.connect.sink.SinkConnector`, and the contents should be similar to the following: + + + # license header or comment + com.example.MySinkConnector + +You should then verify that your manifests are correct by using the verification steps with a pre-release artifact. If the verification succeeds, you can then release the plugin normally, and operators can upgrade to the compatible version. diff --git a/content/en/41/operations/_index.md b/content/en/41/operations/_index.md new file mode 100644 index 000000000..1ae5b53f6 --- /dev/null +++ b/content/en/41/operations/_index.md @@ -0,0 +1,10 @@ +--- +title: Operations +description: +weight: 6 +tags: ['kafka', 'docs', 'ops'] +aliases: +keywords: +type: docs +--- + diff --git a/content/en/41/operations/basic-kafka-operations.md b/content/en/41/operations/basic-kafka-operations.md new file mode 100644 index 000000000..47835f5d4 --- /dev/null +++ b/content/en/41/operations/basic-kafka-operations.md @@ -0,0 +1,614 @@ +--- +title: Basic Kafka Operations +description: Basic Kafka Operations +weight: 1 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Basic Kafka Operations + +This section will review the most common operations you will perform on your Kafka cluster. All of the tools reviewed in this section are available under the `bin/` directory of the Kafka distribution and each tool will print details on all possible commandline options if it is run with no arguments. + +## Adding and removing topics + +You have the option of either adding topics manually or having them be created automatically when data is first published to a non-existent topic. If topics are auto-created then you may want to tune the default topic configurations used for auto-created topics. + +Topics are added and modified using the topic tool: + + + $ bin/kafka-topics.sh --bootstrap-server localhost:9092 --create --topic my_topic_name \ + --partitions 20 --replication-factor 3 --config x=y + +The replication factor controls how many servers will replicate each message that is written. If you have a replication factor of 3 then up to 2 servers can fail before you will lose access to your data. We recommend you use a replication factor of 2 or 3 so that you can transparently bounce machines without interrupting data consumption. + +The partition count controls how many logs the topic will be sharded into. There are several impacts of the partition count. First each partition must fit entirely on a single server. So if you have 20 partitions the full data set (and read and write load) will be handled by no more than 20 servers (not counting replicas). Finally the partition count impacts the maximum parallelism of your consumers. This is discussed in greater detail in the concepts section. + +Each sharded partition log is placed into its own folder under the Kafka log directory. The name of such folders consists of the topic name, appended by a dash (-) and the partition id. Since a typical folder name can not be over 255 characters long, there will be a limitation on the length of topic names. We assume the number of partitions will not ever be above 100,000. Therefore, topic names cannot be longer than 249 characters. This leaves just enough room in the folder name for a dash and a potentially 5 digit long partition id. + +The configurations added on the command line override the default settings the server has for things like the length of time data should be retained. The complete set of per-topic configurations is documented here. + +## Modifying topics + +You can change the configuration or partitioning of a topic using the same topic tool. + +To add partitions you can do + + + $ bin/kafka-topics.sh --bootstrap-server localhost:9092 --alter --topic my_topic_name \ + --partitions 40 + +Be aware that one use case for partitions is to semantically partition data, and adding partitions doesn't change the partitioning of existing data so this may disturb consumers if they rely on that partition. That is if data is partitioned by `hash(key) % number_of_partitions` then this partitioning will potentially be shuffled by adding partitions but Kafka will not attempt to automatically redistribute data in any way. + +To add configs: + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type topics --entity-name my_topic_name --alter --add-config x=y + +To remove a config: + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type topics --entity-name my_topic_name --alter --delete-config x + +And finally deleting a topic: + + + $ bin/kafka-topics.sh --bootstrap-server localhost:9092 --delete --topic my_topic_name + +Kafka does not currently support reducing the number of partitions for a topic. + +Instructions for changing the replication factor of a topic can be found here. + +## Graceful shutdown + +The Kafka cluster will automatically detect any broker shutdown or failure and elect new leaders for the partitions on that machine. This will occur whether a server fails or it is brought down intentionally for maintenance or configuration changes. For the latter cases Kafka supports a more graceful mechanism for stopping a server than just killing it. When a server is stopped gracefully it has two optimizations it will take advantage of: + + 1. It will sync all its logs to disk to avoid needing to do any log recovery when it restarts (i.e. validating the checksum for all messages in the tail of the log). Log recovery takes time so this speeds up intentional restarts. + 2. It will migrate any partitions the server is the leader for to other replicas prior to shutting down. This will make the leadership transfer faster and minimize the time each partition is unavailable to a few milliseconds. +Syncing the logs will happen automatically whenever the server is stopped other than by a hard kill, but the controlled leadership migration requires using a special setting: + + + controlled.shutdown.enable=true + +Note that controlled shutdown will only succeed if _all_ the partitions hosted on the broker have replicas (i.e. the replication factor is greater than 1 _and_ at least one of these replicas is alive). This is generally what you want since shutting down the last replica would make that topic partition unavailable. + +## Balancing leadership + +Whenever a broker stops or crashes, leadership for that broker's partitions transfers to other replicas. When the broker is restarted it will only be a follower for all its partitions, meaning it will not be used for client reads and writes. + +To avoid this imbalance, Kafka has a notion of preferred replicas. If the list of replicas for a partition is 1,5,9 then node 1 is preferred as the leader to either node 5 or 9 because it is earlier in the replica list. By default the Kafka cluster will try to restore leadership to the preferred replicas. This behaviour is configured with: + + + auto.leader.rebalance.enable=true + +You can also set this to false, but you will then need to manually restore leadership to the restored replicas by running the command: + + + $ bin/kafka-leader-election.sh --bootstrap-server localhost:9092 --election-type preferred --all-topic-partitions + +## Balancing replicas across racks + +The rack awareness feature spreads replicas of the same partition across different racks. This extends the guarantees Kafka provides for broker-failure to cover rack-failure, limiting the risk of data loss should all the brokers on a rack fail at once. The feature can also be applied to other broker groupings such as availability zones in EC2. + +You can specify that a broker belongs to a particular rack by adding a property to the broker config: + + + broker.rack=my-rack-id + +When a topic is created, modified or replicas are redistributed, the rack constraint will be honoured, ensuring replicas span as many racks as they can (a partition will span min(#racks, replication-factor) different racks). + +The algorithm used to assign replicas to brokers ensures that the number of leaders per broker will be constant, regardless of how brokers are distributed across racks. This ensures balanced throughput. + +However if racks are assigned different numbers of brokers, the assignment of replicas will not be even. Racks with fewer brokers will get more replicas, meaning they will use more storage and put more resources into replication. Hence it is sensible to configure an equal number of brokers per rack. + +## Mirroring data between clusters & Geo-replication + +Kafka administrators can define data flows that cross the boundaries of individual Kafka clusters, data centers, or geographical regions. Please refer to the section on Geo-Replication for further information. + +## Checking consumer position + +Sometimes it's useful to see the position of your consumers. We have a tool that will show the position of all consumers in a consumer group as well as how far behind the end of the log they are. To run this tool on a consumer group named _my-group_ consuming a topic named _my-topic_ would look like this: + + + $ bin/kafka-consumer-groups.sh --bootstrap-server localhost:9092 --describe --group my-group + TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID + my-topic 0 2 4 2 consumer-1-029af89c-873c-4751-a720-cefd41a669d6 /127.0.0.1 consumer-1 + my-topic 1 2 3 1 consumer-1-029af89c-873c-4751-a720-cefd41a669d6 /127.0.0.1 consumer-1 + my-topic 2 2 3 1 consumer-2-42c1abd4-e3b2-425d-a8bb-e1ea49b29bb2 /127.0.0.1 consumer-2 + +## Managing groups + +With the GroupCommand tool, we can list groups of all types, including consumer groups, share groups and streams groups. Each type of group has its own tool for administering groups of that type. For example, to list all groups in the cluster: + + + $ bin/kafka-groups.sh --bootstrap-server localhost:9092 --list + GROUP TYPE PROTOCOL + my-consumer-group Consumer consumer + my-share-group Share share + +## Managing consumer groups + +With the ConsumerGroupCommand tool, we can list, describe, or delete the consumer groups. The consumer group can be deleted manually, or automatically when the last committed offset for that group expires. Manual deletion works only if the group does not have any active members. For example, to list all consumer groups across all topics: + + + $ bin/kafka-consumer-groups.sh --bootstrap-server localhost:9092 --list + test-consumer-group + +To view offsets, as mentioned earlier, we "describe" the consumer group like this: + + + $ bin/kafka-consumer-groups.sh --bootstrap-server localhost:9092 --describe --group my-group + TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID + topic3 0 241019 395308 154289 consumer2-e76ea8c3-5d30-4299-9005-47eb41f3d3c4 /127.0.0.1 consumer2 + topic2 1 520678 803288 282610 consumer2-e76ea8c3-5d30-4299-9005-47eb41f3d3c4 /127.0.0.1 consumer2 + topic3 1 241018 398817 157799 consumer2-e76ea8c3-5d30-4299-9005-47eb41f3d3c4 /127.0.0.1 consumer2 + topic1 0 854144 855809 1665 consumer1-3fc8d6f1-581a-4472-bdf3-3515b4aee8c1 /127.0.0.1 consumer1 + topic2 0 460537 803290 342753 consumer1-3fc8d6f1-581a-4472-bdf3-3515b4aee8c1 /127.0.0.1 consumer1 + topic3 2 243655 398812 155157 consumer4-117fe4d3-c6c1-4178-8ee9-eb4a3954bee0 /127.0.0.1 consumer4 + +Note that if the consumer group uses the consumer protocol, the admin client needs DESCRIBE access to all the topics used in the group (topics the members are subscribed to). In contrast, the classic protocol does not require all topics DESCRIBE authorization. There are a number of additional "describe" options that can be used to provide more detailed information about a consumer group: + + * \--members: This option provides the list of all active members in the consumer group. + + $ bin/kafka-consumer-groups.sh --bootstrap-server localhost:9092 --describe --group my-group --members + CONSUMER-ID HOST CLIENT-ID #PARTITIONS + consumer1-3fc8d6f1-581a-4472-bdf3-3515b4aee8c1 /127.0.0.1 consumer1 2 + consumer4-117fe4d3-c6c1-4178-8ee9-eb4a3954bee0 /127.0.0.1 consumer4 1 + consumer2-e76ea8c3-5d30-4299-9005-47eb41f3d3c4 /127.0.0.1 consumer2 3 + consumer3-ecea43e4-1f01-479f-8349-f9130b75d8ee /127.0.0.1 consumer3 0 + + * \--members --verbose: On top of the information reported by the "--members" options above, this option also provides the partitions assigned to each member. + + $ bin/kafka-consumer-groups.sh --bootstrap-server localhost:9092 --describe --group my-group --members --verbose + CONSUMER-ID HOST CLIENT-ID #PARTITIONS ASSIGNMENT + consumer1-3fc8d6f1-581a-4472-bdf3-3515b4aee8c1 /127.0.0.1 consumer1 2 topic1(0), topic2(0) + consumer4-117fe4d3-c6c1-4178-8ee9-eb4a3954bee0 /127.0.0.1 consumer4 1 topic3(2) + consumer2-e76ea8c3-5d30-4299-9005-47eb41f3d3c4 /127.0.0.1 consumer2 3 topic2(1), topic3(0,1) + consumer3-ecea43e4-1f01-479f-8349-f9130b75d8ee /127.0.0.1 consumer3 0 - + + * \--offsets: This is the default describe option and provides the same output as the "--describe" option. + * \--state: This option provides useful group-level information. + + $ bin/kafka-consumer-groups.sh --bootstrap-server localhost:9092 --describe --group my-group --state + COORDINATOR (ID) ASSIGNMENT-STRATEGY STATE #MEMBERS + localhost:9092 (0) range Stable 4 + + +To manually delete one or multiple consumer groups, the "--delete" option can be used: + + + $ bin/kafka-consumer-groups.sh --bootstrap-server localhost:9092 --delete --group my-group --group my-other-group + Deletion of requested consumer groups ('my-group', 'my-other-group') was successful. + +To reset offsets of a consumer group, "--reset-offsets" option can be used. This option supports one consumer group at the time. It requires defining following scopes: --all-topics or --topic. One scope must be selected, unless you use '--from-file' scenario. Also, first make sure that the consumer instances are inactive. See [KIP-122](https://cwiki.apache.org/confluence/x/_iEIB) for more details. + +It has 3 execution options: + + * (default) to display which offsets to reset. + * \--execute : to execute --reset-offsets process. + * \--export : to export the results to a CSV format. + + + +\--reset-offsets also has the following scenarios to choose from (at least one scenario must be selected): + + * \--to-datetime : Reset offsets to offsets from datetime. Format: 'YYYY-MM-DDThh:mm:ss.sss' + * \--to-earliest : Reset offsets to earliest offset. + * \--to-latest : Reset offsets to latest offset. + * \--shift-by : Reset offsets shifting current offset by 'n', where 'n' can be positive or negative. + * \--from-file : Reset offsets to values defined in CSV file. + * \--to-current : Resets offsets to current offset. + * \--by-duration : Reset offsets to offset by duration from current timestamp. Format: 'PnDTnHnMnS' + * \--to-offset : Reset offsets to a specific offset. + +Please note, that out of range offsets will be adjusted to available offset end. For example, if offset end is at 10 and offset shift request is of 15, then, offset at 10 will actually be selected. + +For example, to reset offsets of a consumer group to the latest offset: + + + $ bin/kafka-consumer-groups.sh --bootstrap-server localhost:9092 --reset-offsets --group my-group --topic topic1 --to-latest + TOPIC PARTITION NEW-OFFSET + topic1 0 0 + +## Managing share groups + +NOTE: Apache Kafka 4.1 ships with a preview of share groups which is not enabled by default. To enable share groups, use the `kafka-features.sh` tool to upgrade to `share.version=1`. For more information, please read the [ release notes](https://cwiki.apache.org/confluence/x/CIq3FQ). + +Use the ShareGroupCommand tool to list, describe, or delete the share groups. Only share groups without any active members can be deleted. For example, to list all share groups in a cluster: + + + $ bin/kafka-share-groups.sh --bootstrap-server localhost:9092 --list + my-share-group + +To view the current start offset, use the "--describe" option: + + + $ bin/kafka-share-groups.sh --bootstrap-server localhost:9092 --describe --group my-share-group + GROUP TOPIC PARTITION START-OFFSET + my-share-group topic1 0 4 + +NOTE: The admin client needs DESCRIBE access to all the topics used in the group. There are many --describe options that provide more detailed information about a share group: + + * \--members: Describes active members in the share group. + + bin/kafka-share-groups.sh --bootstrap-server localhost:9092 --describe --group my-share-group --members + GROUP CONSUMER-ID HOST CLIENT-ID #PARTITIONS ASSIGNMENT + my-share-group 94wrSQNmRda9Q6sk6jMO6Q /127.0.0.1 console-share-consumer 1 topic1:0 + my-share-group EfI0sha8QSKSrL_-I_zaTA /127.0.0.1 console-share-consumer 1 topic1:0 + +You can see that both members have been assigned the same partition which they are sharing. + * \--offsets: The default describe option. This provides the same output as the "--describe" option. + * \--state: Describes a summary of the state of the share group. + + bin/kafka-share-groups.sh --bootstrap-server localhost:9092 --describe --group my-share-group --state + GROUP COORDINATOR (ID) STATE #MEMBERS + my-share-group localhost:9092 (1) Stable 2 + + + + +To delete the offsets of individual topics in the share group, use the "--delete-offsets" option: + + + $ bin/kafka-share-groups.sh --bootstrap-server localhost:9092 --delete-offsets --group my-share-group --topic topic1 + TOPIC STATUS + topic1 Successful + +To delete one or more share groups, use "--delete" option: + + + $ bin/kafka-share-groups.sh --bootstrap-server localhost:9092 --delete --group my-share-group + Deletion of requested share groups ('my-share-group') was successful. + +## Expanding your cluster + +Adding servers to a Kafka cluster is easy, just assign them a unique broker id and start up Kafka on your new servers. However these new servers will not automatically be assigned any data partitions, so unless partitions are moved to them they won't be doing any work until new topics are created. So usually when you add machines to your cluster you will want to migrate some existing data to these machines. + +The process of migrating data is manually initiated but fully automated. Under the covers what happens is that Kafka will add the new server as a follower of the partition it is migrating and allow it to fully replicate the existing data in that partition. When the new server has fully replicated the contents of this partition and joined the in-sync replica one of the existing replicas will delete their partition's data. + +The partition reassignment tool can be used to move partitions across brokers. An ideal partition distribution would ensure even data load and partition sizes across all brokers. The partition reassignment tool does not have the capability to automatically study the data distribution in a Kafka cluster and move partitions around to attain an even load distribution. As such, the admin has to figure out which topics or partitions should be moved around. + +The partition reassignment tool can run in 3 mutually exclusive modes: + + * \--generate: In this mode, given a list of topics and a list of brokers, the tool generates a candidate reassignment to move all partitions of the specified topics to the new brokers. This option merely provides a convenient way to generate a partition reassignment plan given a list of topics and target brokers. + * \--execute: In this mode, the tool kicks off the reassignment of partitions based on the user provided reassignment plan. (using the --reassignment-json-file option). This can either be a custom reassignment plan hand crafted by the admin or provided by using the --generate option + * \--verify: In this mode, the tool verifies the status of the reassignment for all partitions listed during the last --execute. The status can be either of successfully completed, failed or in progress + + + +### Automatically migrating data to new machines + +The partition reassignment tool can be used to move some topics off of the current set of brokers to the newly added brokers. This is typically useful while expanding an existing cluster since it is easier to move entire topics to the new set of brokers, than moving one partition at a time. When used to do this, the user should provide a list of topics that should be moved to the new set of brokers and a target list of new brokers. The tool then evenly distributes all partitions for the given list of topics across the new set of brokers. During this move, the replication factor of the topic is kept constant. Effectively the replicas for all partitions for the input list of topics are moved from the old set of brokers to the newly added brokers. + +For instance, the following example will move all partitions for topics foo1,foo2 to the new set of brokers 5,6. At the end of this move, all partitions for topics foo1 and foo2 will _only_ exist on brokers 5,6. + +Since the tool accepts the input list of topics as a json file, you first need to identify the topics you want to move and create the json file as follows: + + + $ cat topics-to-move.json + { + "topics": [ + { "topic": "foo1" }, + { "topic": "foo2" } + ], + "version": 1 + } + +Once the json file is ready, use the partition reassignment tool to generate a candidate assignment: + + + $ bin/kafka-reassign-partitions.sh --bootstrap-server localhost:9092 --topics-to-move-json-file topics-to-move.json --broker-list "5,6" --generate + Current partition replica assignment + {"version":1, + "partitions":[{"topic":"foo1","partition":0,"replicas":[2,1],"log_dirs":["any"]}, + {"topic":"foo1","partition":1,"replicas":[1,3],"log_dirs":["any"]}, + {"topic":"foo1","partition":2,"replicas":[3,4],"log_dirs":["any"]}, + {"topic":"foo2","partition":0,"replicas":[4,2],"log_dirs":["any"]}, + {"topic":"foo2","partition":1,"replicas":[2,1],"log_dirs":["any"]}, + {"topic":"foo2","partition":2,"replicas":[1,3],"log_dirs":["any"]}] + } + + Proposed partition reassignment configuration + {"version":1, + "partitions":[{"topic":"foo1","partition":0,"replicas":[6,5],"log_dirs":["any"]}, + {"topic":"foo1","partition":1,"replicas":[5,6],"log_dirs":["any"]}, + {"topic":"foo1","partition":2,"replicas":[6,5],"log_dirs":["any"]}, + {"topic":"foo2","partition":0,"replicas":[5,6],"log_dirs":["any"]}, + {"topic":"foo2","partition":1,"replicas":[6,5],"log_dirs":["any"]}, + {"topic":"foo2","partition":2,"replicas":[5,6],"log_dirs":["any"]}] + } + +The tool generates a candidate assignment that will move all partitions from topics foo1,foo2 to brokers 5,6. Note, however, that at this point, the partition movement has not started, it merely tells you the current assignment and the proposed new assignment. The current assignment should be saved in case you want to rollback to it. The new assignment should be saved in a json file (e.g. expand-cluster-reassignment.json) to be input to the tool with the --execute option as follows: + + + $ bin/kafka-reassign-partitions.sh --bootstrap-server localhost:9092 --reassignment-json-file expand-cluster-reassignment.json --execute + Current partition replica assignment + + {"version":1, + "partitions":[{"topic":"foo1","partition":0,"replicas":[2,1],"log_dirs":["any"]}, + {"topic":"foo1","partition":1,"replicas":[1,3],"log_dirs":["any"]}, + {"topic":"foo1","partition":2,"replicas":[3,4],"log_dirs":["any"]}, + {"topic":"foo2","partition":0,"replicas":[4,2],"log_dirs":["any"]}, + {"topic":"foo2","partition":1,"replicas":[2,1],"log_dirs":["any"]}, + {"topic":"foo2","partition":2,"replicas":[1,3],"log_dirs":["any"]}] + } + + Save this to use as the --reassignment-json-file option during rollback + Successfully started partition reassignments for foo1-0,foo1-1,foo1-2,foo2-0,foo2-1,foo2-2 + +Finally, the --verify option can be used with the tool to check the status of the partition reassignment. Note that the same expand-cluster-reassignment.json (used with the --execute option) should be used with the --verify option: + + + $ bin/kafka-reassign-partitions.sh --bootstrap-server localhost:9092 --reassignment-json-file expand-cluster-reassignment.json --verify + Status of partition reassignment: + Reassignment of partition [foo1,0] is completed + Reassignment of partition [foo1,1] is still in progress + Reassignment of partition [foo1,2] is still in progress + Reassignment of partition [foo2,0] is completed + Reassignment of partition [foo2,1] is completed + Reassignment of partition [foo2,2] is completed + +### Custom partition assignment and migration + +The partition reassignment tool can also be used to selectively move replicas of a partition to a specific set of brokers. When used in this manner, it is assumed that the user knows the reassignment plan and does not require the tool to generate a candidate reassignment, effectively skipping the --generate step and moving straight to the --execute step + +For instance, the following example moves partition 0 of topic foo1 to brokers 5,6 and partition 1 of topic foo2 to brokers 2,3: + +The first step is to hand craft the custom reassignment plan in a json file: + + + $ cat custom-reassignment.json + {"version":1,"partitions":[{"topic":"foo1","partition":0,"replicas":[5,6]},{"topic":"foo2","partition":1,"replicas":[2,3]}]} + +Then, use the json file with the --execute option to start the reassignment process: + + + $ bin/kafka-reassign-partitions.sh --bootstrap-server localhost:9092 --reassignment-json-file custom-reassignment.json --execute + Current partition replica assignment + + {"version":1, + "partitions":[{"topic":"foo1","partition":0,"replicas":[1,2],"log_dirs":["any"]}, + {"topic":"foo2","partition":1,"replicas":[3,4],"log_dirs":["any"]}] + } + + Save this to use as the --reassignment-json-file option during rollback + Successfully started partition reassignments for foo1-0,foo2-1 + +The --verify option can be used with the tool to check the status of the partition reassignment. Note that the same custom-reassignment.json (used with the --execute option) should be used with the --verify option: + + + $ bin/kafka-reassign-partitions.sh --bootstrap-server localhost:9092 --reassignment-json-file custom-reassignment.json --verify + Status of partition reassignment: + Reassignment of partition [foo1,0] is completed + Reassignment of partition [foo2,1] is completed + +## Decommissioning brokers + +The partition reassignment tool does not have the ability to automatically generate a reassignment plan for decommissioning brokers yet. As such, the admin has to come up with a reassignment plan to move the replica for all partitions hosted on the broker to be decommissioned, to the rest of the brokers. This can be relatively tedious as the reassignment needs to ensure that all the replicas are not moved from the decommissioned broker to only one other broker. To make this process effortless, we plan to add tooling support for decommissioning brokers in the future. + +## Increasing replication factor + +Increasing the replication factor of an existing partition is easy. Just specify the extra replicas in the custom reassignment json file and use it with the --execute option to increase the replication factor of the specified partitions. + +For instance, the following example increases the replication factor of partition 0 of topic foo from 1 to 3. Before increasing the replication factor, the partition's only replica existed on broker 5. As part of increasing the replication factor, we will add more replicas on brokers 6 and 7. + +The first step is to hand craft the custom reassignment plan in a json file: + + + $ cat increase-replication-factor.json + {"version":1, + "partitions":[{"topic":"foo","partition":0,"replicas":[5,6,7]}]} + +Then, use the json file with the --execute option to start the reassignment process: + + + $ bin/kafka-reassign-partitions.sh --bootstrap-server localhost:9092 --reassignment-json-file increase-replication-factor.json --execute + Current partition replica assignment + + {"version":1, + "partitions":[{"topic":"foo","partition":0,"replicas":[5],"log_dirs":["any"]}]} + + Save this to use as the --reassignment-json-file option during rollback + Successfully started partition reassignment for foo-0 + +The --verify option can be used with the tool to check the status of the partition reassignment. Note that the same increase-replication-factor.json (used with the --execute option) should be used with the --verify option: + + + $ bin/kafka-reassign-partitions.sh --bootstrap-server localhost:9092 --reassignment-json-file increase-replication-factor.json --verify + Status of partition reassignment: + Reassignment of partition [foo,0] is completed + +You can also verify the increase in replication factor with the kafka-topics.sh tool: + + + $ bin/kafka-topics.sh --bootstrap-server localhost:9092 --topic foo --describe + Topic:foo PartitionCount:1 ReplicationFactor:3 Configs: + Topic: foo Partition: 0 Leader: 5 Replicas: 5,6,7 Isr: 5,6,7 + +## Limiting bandwidth usage during data migration + +Kafka lets you apply a throttle to replication traffic, setting an upper bound on the bandwidth used to move replicas from machine to machine and from disk to disk. This is useful when rebalancing a cluster, adding or removing brokers or adding or removing disks, as it limits the impact these data-intensive operations will have on users. + +There are two interfaces that can be used to engage a throttle. The simplest, and safest, is to apply a throttle when invoking the kafka-reassign-partitions.sh, but kafka-configs.sh can also be used to view and alter the throttle values directly. + +So for example, if you were to execute a rebalance, with the below command, it would move partitions at no more than 50MB/s between brokers, and at no more than 100MB/s between disks on a broker. + + + $ bin/kafka-reassign-partitions.sh --bootstrap-server localhost:9092 --execute --reassignment-json-file bigger-cluster.json --throttle 50000000 --replica-alter-log-dirs-throttle 100000000 + +When you execute this script you will see the throttle engage: + + + The inter-broker throttle limit was set to 50000000 B/s + The replica-alter-dir throttle limit was set to 100000000 B/s + Successfully started partition reassignment for foo1-0 + +Should you wish to alter the throttle, during a rebalance, say to increase the inter-broker throughput so it completes quicker, you can do this by re-running the execute command with the --additional option passing the same reassignment-json-file: + + + $ bin/kafka-reassign-partitions.sh --bootstrap-server localhost:9092 --additional --execute --reassignment-json-file bigger-cluster.json --throttle 700000000 + The inter-broker throttle limit was set to 700000000 B/s + +Once the rebalance completes the administrator can check the status of the rebalance using the --verify option. If the rebalance has completed, the throttle will be removed via the --verify command. It is important that administrators remove the throttle in a timely manner once rebalancing completes by running the command with the --verify option. Failure to do so could cause regular replication traffic to be throttled. + +When the --verify option is executed, and the reassignment has completed, the script will confirm that the throttle was removed: + + + $ bin/kafka-reassign-partitions.sh --bootstrap-server localhost:9092 --verify --reassignment-json-file bigger-cluster.json + Status of partition reassignment: + Reassignment of partition [my-topic,1] is completed + Reassignment of partition [my-topic,0] is completed + + Clearing broker-level throttles on brokers 1,2,3 + Clearing topic-level throttles on topic my-topic + +The administrator can also validate the assigned configs using the kafka-configs.sh. There are two sets of throttle configuration used to manage the throttling process. First set refers to the throttle value itself. This is configured, at a broker level, using the dynamic properties: + + + leader.replication.throttled.rate + follower.replication.throttled.rate + replica.alter.log.dirs.io.max.bytes.per.second + +Then there is the configuration pair of enumerated sets of throttled replicas: + + + leader.replication.throttled.replicas + follower.replication.throttled.replicas + +Which are configured per topic. + +All five config values are automatically assigned by kafka-reassign-partitions.sh (discussed below). + +To view the throttle limit configuration: + + + $ bin/kafka-configs.sh --describe --bootstrap-server localhost:9092 --entity-type brokers + Configs for brokers '2' are leader.replication.throttled.rate=700000000,follower.replication.throttled.rate=700000000,replica.alter.log.dirs.io.max.bytes.per.second=1000000000 + Configs for brokers '1' are leader.replication.throttled.rate=700000000,follower.replication.throttled.rate=700000000,replica.alter.log.dirs.io.max.bytes.per.second=1000000000 + +This shows the throttle applied to both leader and follower side of the replication protocol (by default both sides are assigned the same throttled throughput value), as well as the disk throttle. + +To view the list of throttled replicas: + + + $ bin/kafka-configs.sh --describe --bootstrap-server localhost:9092 --entity-type topics + Configs for topic 'my-topic' are leader.replication.throttled.replicas=1:102,0:101, + follower.replication.throttled.replicas=1:101,0:102 + +Here we see the leader throttle is applied to partition 1 on broker 102 and partition 0 on broker 101. Likewise the follower throttle is applied to partition 1 on broker 101 and partition 0 on broker 102. + +By default kafka-reassign-partitions.sh will apply the leader throttle to all replicas that exist before the rebalance, any one of which might be leader. It will apply the follower throttle to all move destinations. So if there is a partition with replicas on brokers 101,102, being reassigned to 102,103, a leader throttle, for that partition, would be applied to 101,102 and a follower throttle would be applied to 103 only. + +If required, you can also use the --alter switch on kafka-configs.sh to alter the throttle configurations manually. + +### Safe usage of throttled replication + +Some care should be taken when using throttled replication. In particular: + +_(1) Throttle Removal:_ + +The throttle should be removed in a timely manner once reassignment completes (by running `bin/kafka-reassign-partitions.sh --verify`). + +_(2) Ensuring Progress:_ + +If the throttle is set too low, in comparison to the incoming write rate, it is possible for replication to not make progress. This occurs when: + + + max(BytesInPerSec) > throttle + +Where BytesInPerSec is the metric that monitors the write throughput of producers into each broker. + +The administrator can monitor whether replication is making progress, during the rebalance, using the metric: + + + kafka.server:type=FetcherLagMetrics,name=ConsumerLag,clientId=([-.\w]+),topic=([-.\w]+),partition=([0-9]+) + +The lag should constantly decrease during replication. If the metric does not decrease the administrator should increase the throttle throughput as described above. + +## Setting quotas + +Quotas overrides and defaults may be configured at (user, client-id), user or client-id levels as described here. By default, clients receive an unlimited quota. It is possible to set custom quotas for each (user, client-id), user or client-id group. + +Configure custom quota for (user=user1, client-id=clientA): + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --alter --add-config 'producer_byte_rate=1024,consumer_byte_rate=2048,request_percentage=200' --entity-type users --entity-name user1 --entity-type clients --entity-name clientA + Updated config for entity: user-principal 'user1', client-id 'clientA'. + +Configure custom quota for user=user1: + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --alter --add-config 'producer_byte_rate=1024,consumer_byte_rate=2048,request_percentage=200' --entity-type users --entity-name user1 + Updated config for entity: user-principal 'user1'. + +Configure custom quota for client-id=clientA: + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --alter --add-config 'producer_byte_rate=1024,consumer_byte_rate=2048,request_percentage=200' --entity-type clients --entity-name clientA + Updated config for entity: client-id 'clientA'. + +It is possible to set default quotas for each (user, client-id), user or client-id group by specifying _\--entity-default_ option instead of _\--entity-name_. + +Configure default client-id quota for user=user1: + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --alter --add-config 'producer_byte_rate=1024,consumer_byte_rate=2048,request_percentage=200' --entity-type users --entity-name user1 --entity-type clients --entity-default + Updated config for entity: user-principal 'user1', default client-id. + +Configure default quota for user: + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --alter --add-config 'producer_byte_rate=1024,consumer_byte_rate=2048,request_percentage=200' --entity-type users --entity-default + Updated config for entity: default user-principal. + +Configure default quota for client-id: + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --alter --add-config 'producer_byte_rate=1024,consumer_byte_rate=2048,request_percentage=200' --entity-type clients --entity-default + Updated config for entity: default client-id. + +Here's how to describe the quota for a given (user, client-id): + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --describe --entity-type users --entity-name user1 --entity-type clients --entity-name clientA + Configs for user-principal 'user1', client-id 'clientA' are producer_byte_rate=1024,consumer_byte_rate=2048,request_percentage=200 + +Describe quota for a given user: + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --describe --entity-type users --entity-name user1 + Configs for user-principal 'user1' are producer_byte_rate=1024,consumer_byte_rate=2048,request_percentage=200 + +Describe quota for a given client-id: + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --describe --entity-type clients --entity-name clientA + Configs for client-id 'clientA' are producer_byte_rate=1024,consumer_byte_rate=2048,request_percentage=200 + +Describe default quota for user: + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --describe --entity-type users --entity-default + Quota configs for the default user-principal are consumer_byte_rate=2048.0, request_percentage=200.0, producer_byte_rate=1024.0 + +Describe default quota for client-id: + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --describe --entity-type clients --entity-default + Quota configs for the default client-id are consumer_byte_rate=2048.0, request_percentage=200.0, producer_byte_rate=1024.0 + +If entity name is not specified, all entities of the specified type are described. For example, describe all users: + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --describe --entity-type users + Configs for user-principal 'user1' are producer_byte_rate=1024,consumer_byte_rate=2048,request_percentage=200 + Configs for default user-principal are producer_byte_rate=1024,consumer_byte_rate=2048,request_percentage=200 + +Similarly for (user, client): + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --describe --entity-type users --entity-type clients + Configs for user-principal 'user1', default client-id are producer_byte_rate=1024,consumer_byte_rate=2048,request_percentage=200 + Configs for user-principal 'user1', client-id 'clientA' are producer_byte_rate=1024,consumer_byte_rate=2048,request_percentage=200 diff --git a/content/en/41/operations/consumer-rebalance-protocol.md b/content/en/41/operations/consumer-rebalance-protocol.md new file mode 100644 index 000000000..592e05f31 --- /dev/null +++ b/content/en/41/operations/consumer-rebalance-protocol.md @@ -0,0 +1,70 @@ +--- +title: Consumer Rebalance Protocol +description: Consumer Rebalance Protocol +weight: 10 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Consumer Rebalance Protocol + +## Overview + +Starting from Apache Kafka 4.0, the Next Generation of the Consumer Rebalance Protocol ([KIP-848](https://cwiki.apache.org/confluence/x/HhD1D)) is Generally Available (GA). It improves the scalability of consumer groups while simplifying consumers. It also decreases rebalance times, thanks to its fully incremental design, which no longer relies on a global synchronization barrier. + +Consumer Groups using the new protocol are now referred to as `Consumer` groups, while groups using the old protocol are referred to as `Classic` groups. Note that Classic groups can still be used to form consumer groups using the old protocol. + +## Server + +The new consumer protocol is automatically enabled on the server since Apache Kafka 4.0. Enabling and disabling the protocol is controlled by the `group.version` feature flag. + +The consumer heartbeat interval and the session timeout are controlled by the server now with the following configs: + + * `group.consumer.heartbeat.interval.ms` + * `group.consumer.session.timeout.ms` + + + +The assignment strategy is also controlled by the server. The `group.consumer.assignors` configuration can be used to specify the list of available assignors for `Consumer` groups. By default, the `uniform` assignor and the `range` assignor are configured. The first assignor in the list is used by default unless the Consumer selects a different one. It is also possible to implement custom assignment strategies on the server side by implementing the `ConsumerGroupPartitionAssignor` interface and specifying the full class name in the configuration. + +## Consumer + +Since Apache Kafka 4.0, the Consumer supports the new consumer rebalance protocol. However, the protocol is not enabled by default. The `group.protocol` configuration must be set to `consumer` to enable it. When enabled, the new consumer protocol is used alongside an improved threading model. + +The `group.remote.assignor` configuration is introduced as an optional configuration to overwrite the default assignment strategy configured on the server side. + +The `subscribe(SubscriptionPattern)` and `subscribe(SubscriptionPattern, ConsumerRebalanceListener)` methods have been added to subscribe to a regular expression with the new consumer rebalance protocol. With these methods, the regular expression uses the RE2J format and is now evaluated on the server side. + +New metrics have been added to the Consumer when using the new rebalance protocol, mainly providing visibility over the improved threading model. See [New Consumer Metrics](https://cwiki.apache.org/confluence/x/lQ_TEg). + +When the new rebalance protocol is enabled, the following configurations and APIs are no longer usable: + + * `heartbeat.interval.ms` + * `session.timeout.ms` + * `partition.assignment.strategy` + * `enforceRebalance(String)` and `enforceRebalance()` + + + +## Upgrade & Downgrade + +### Offline + +Consumer groups are automatically converted from `Classic` to `Consumer` and vice versa when they are empty. Hence, it is possible to change the protocol used by the group by shutting down all the consumers and bringing them back up with the `group.protocol=consumer` configuration. The downside is that it requires taking the consumer group down. + +### Online + +Consumer groups can be upgraded without downtime by rolling out the consumer with the `group.protocol=consumer` configuration. When the first consumer using the new consumer rebalance protocol joins the group, the group is converted from `Classic` to `Consumer`, and the classic rebalance protocol is interoperated to work with the new consumer rebalance protocol. This is only possible when the classic group uses an assignor that does not embed custom metadata. + +Consumer groups can be downgraded using the opposite process. In this case, the group is converted from `Consumer` to `Classic` when the last consumer using the new consumer rebalance protocol leaves the group. + +## Limitations + +While the new consumer rebalance protocol works for most use cases, it is still important to be aware of the following limitations: + + * Client-side assignors are not supported. (see [KAFKA-18327](https://issues.apache.org/jira/browse/KAFKA-18327)) + * Rack-aware assignment strategies are not fully supported. (see [KAFKA-17747](https://issues.apache.org/jira/browse/KAFKA-17747)) + + diff --git a/content/en/41/operations/datacenters.md b/content/en/41/operations/datacenters.md new file mode 100644 index 000000000..4d78f77db --- /dev/null +++ b/content/en/41/operations/datacenters.md @@ -0,0 +1,23 @@ +--- +title: Datacenters +description: Datacenters +weight: 2 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Datacenters + +Some deployments will need to manage a data pipeline that spans multiple datacenters. Our recommended approach to this is to deploy a local Kafka cluster in each datacenter, with application instances in each datacenter interacting only with their local cluster and mirroring data between clusters (see the documentation on Geo-Replication for how to do this). + +This deployment pattern allows datacenters to act as independent entities and allows us to manage and tune inter-datacenter replication centrally. This allows each facility to stand alone and operate even if the inter-datacenter links are unavailable: when this occurs the mirroring falls behind until the link is restored at which time it catches up. + +For applications that need a global view of all data you can use mirroring to provide clusters which have aggregate data mirrored from the local clusters in _all_ datacenters. These aggregate clusters are used for reads by applications that require the full data set. + +This is not the only possible deployment pattern. It is possible to read from or write to a remote Kafka cluster over the WAN, though obviously this will add whatever latency is required to get the cluster. + +Kafka naturally batches data in both the producer and consumer so it can achieve high-throughput even over a high-latency connection. To allow this though it may be necessary to increase the TCP socket buffer sizes for the producer, consumer, and broker using the `socket.send.buffer.bytes` and `socket.receive.buffer.bytes` configurations. The appropriate way to set this is documented [here](https://en.wikipedia.org/wiki/Bandwidth-delay_product). + +It is generally _not_ advisable to run a _single_ Kafka cluster that spans multiple datacenters over a high-latency link. This will incur very high replication latency for Kafka writes, and Kafka will remain available in all locations if the network between locations is unavailable. diff --git a/content/en/41/operations/eligible-leader-replicas.md b/content/en/41/operations/eligible-leader-replicas.md new file mode 100644 index 000000000..76fc8444c --- /dev/null +++ b/content/en/41/operations/eligible-leader-replicas.md @@ -0,0 +1,42 @@ +--- +title: Eligible Leader Replicas +description: Eligible Leader Replicas +weight: 12 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Eligible Leader Replicas + +## Overview + +Starting from Apache Kafka 4.0, Eligible Leader Replicas ([KIP-966 Part 1](https://cwiki.apache.org/confluence/x/mpOzDw)) is available for the users to an improvement to Kafka replication (ELR is enabled by default on new clusters starting 4.1). As the "strict min ISR" rule has been generally applied, which means the high watermark for the data partition can't advance if the size of the ISR is smaller than the min ISR(`min.insync.replicas`), it makes some replicas that are not in the ISR safe to become the leader. The KRaft controller stores such replicas in the PartitionRecord field called `Eligible Leader Replicas`. During the leader election, the controller will select the leaders with the following order: + + * If ISR is not empty, select one of them. + * If ELR is not empty, select one that is not fenced. + * Select the last known leader if it is unfenced. This is a similar behavior prior to the 4.0 when all the replicas are offline. + + + +## Upgrade & Downgrade + +The ELR is not enabled by default for 4.0. To enable the new protocol on the server, set `eligible.leader.replicas.version=1`. After that the upgrade, the KRaft controller will start tracking the ELR. + +Downgrades are safe to perform by setting `eligible.leader.replicas.version=0`. + +## Tool + +The ELR fields can be checked through the API DescribeTopicPartitions. The admin client can fetch the ELR info by describing the topics. + +Note that when the ELR feature is enabled: + + * The cluster-level `min.insync.replicas` config will be added if there is not any. The value is the same as the static config in the active controller. + * The removal of `min.insync.replicas` config at the cluster-level is not allowed. + * If the cluster-level `min.insync.replicas` is updated, even if the value is unchanged, all the ELR state will be cleaned. + * The previously set `min.insync.replicas` value at the broker-level config will be removed. Please set at the cluster-level if necessary. + * The alteration of `min.insync.replicas` config at the broker-level is not allowed. + * If `min.insync.replicas` is updated for a topic, the ELR state will be cleaned. + + diff --git a/content/en/41/operations/geo-replication-(cross-cluster-data-mirroring).md b/content/en/41/operations/geo-replication-(cross-cluster-data-mirroring).md new file mode 100644 index 000000000..9cc529a82 --- /dev/null +++ b/content/en/41/operations/geo-replication-(cross-cluster-data-mirroring).md @@ -0,0 +1,449 @@ +--- +title: Geo-Replication (Cross-Cluster Data Mirroring) +description: Geo-Replication (Cross-Cluster Data Mirroring) +weight: 3 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Geo-Replication (Cross-Cluster Data Mirroring) + +## Geo-Replication Overview + +Kafka administrators can define data flows that cross the boundaries of individual Kafka clusters, data centers, or geo-regions. Such event streaming setups are often needed for organizational, technical, or legal requirements. Common scenarios include: + + * Geo-replication + * Disaster recovery + * Feeding edge clusters into a central, aggregate cluster + * Physical isolation of clusters (such as production vs. testing) + * Cloud migration or hybrid cloud deployments + * Legal and compliance requirements + + + +Administrators can set up such inter-cluster data flows with Kafka's MirrorMaker (version 2), a tool to replicate data between different Kafka environments in a streaming manner. MirrorMaker is built on top of the Kafka Connect framework and supports features such as: + + * Replicates topics (data plus configurations) + * Replicates consumer groups including offsets to migrate applications between clusters + * Replicates ACLs + * Preserves partitioning + * Automatically detects new topics and partitions + * Provides a wide range of metrics, such as end-to-end replication latency across multiple data centers/clusters + * Fault-tolerant and horizontally scalable operations + + + +_Note: Geo-replication with MirrorMaker replicates data across Kafka clusters. This inter-cluster replication is different from Kafka'sintra-cluster replication, which replicates data within the same Kafka cluster._ + +## What Are Replication Flows + +With MirrorMaker, Kafka administrators can replicate topics, topic configurations, consumer groups and their offsets, and ACLs from one or more source Kafka clusters to one or more target Kafka clusters, i.e., across cluster environments. In a nutshell, MirrorMaker uses Connectors to consume from source clusters and produce to target clusters. + +These directional flows from source to target clusters are called replication flows. They are defined with the format `{source_cluster}->{target_cluster}` in the MirrorMaker configuration file as described later. Administrators can create complex replication topologies based on these flows. + +Here are some example patterns: + + * Active/Active high availability deployments: `A->B, B->A` + * Active/Passive or Active/Standby high availability deployments: `A->B` + * Aggregation (e.g., from many clusters to one): `A->K, B->K, C->K` + * Fan-out (e.g., from one to many clusters): `K->A, K->B, K->C` + * Forwarding: `A->B, B->C, C->D` + + + +By default, a flow replicates all topics and consumer groups (except excluded ones). However, each replication flow can be configured independently. For instance, you can define that only specific topics or consumer groups are replicated from the source cluster to the target cluster. + +Here is a first example on how to configure data replication from a `primary` cluster to a `secondary` cluster (an active/passive setup): + + + # Basic settings + clusters = primary, secondary + primary.bootstrap.servers = broker3-primary:9092 + secondary.bootstrap.servers = broker5-secondary:9092 + + # Define replication flows + primary->secondary.enabled = true + primary->secondary.topics = foobar-topic, quux-.* + +## Configuring Geo-Replication + +The following sections describe how to configure and run a dedicated MirrorMaker cluster. If you want to run MirrorMaker within an existing Kafka Connect cluster or other supported deployment setups, please refer to [KIP-382: MirrorMaker 2.0](https://cwiki.apache.org/confluence/x/ooOzBQ) and be aware that the names of configuration settings may vary between deployment modes. + +Beyond what's covered in the following sections, further examples and information on configuration settings are available at: + + * [MirrorMakerConfig](https://github.com/apache/kafka/blob/trunk/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMakerConfig.java), [MirrorConnectorConfig](https://github.com/apache/kafka/blob/trunk/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorConnectorConfig.java) + * [DefaultTopicFilter](https://github.com/apache/kafka/blob/trunk/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/DefaultTopicFilter.java) for topics, [DefaultGroupFilter](https://github.com/apache/kafka/blob/trunk/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/DefaultGroupFilter.java) for consumer groups + * Example configuration settings in [connect-mirror-maker.properties](https://github.com/apache/kafka/blob/trunk/config/connect-mirror-maker.properties), [KIP-382: MirrorMaker 2.0](https://cwiki.apache.org/confluence/x/ooOzBQ) + + + +### Configuration File Syntax + +The MirrorMaker configuration file is typically named `connect-mirror-maker.properties`. You can configure a variety of components in this file: + + * MirrorMaker settings: global settings including cluster definitions (aliases), plus custom settings per replication flow + * Kafka Connect and connector settings + * Kafka producer, consumer, and admin client settings + + + +Example: Define MirrorMaker settings (explained in more detail later). + + + # Global settings + clusters = us-west, us-east # defines cluster aliases + us-west.bootstrap.servers = broker3-west:9092 + us-east.bootstrap.servers = broker5-east:9092 + + topics = .* # all topics to be replicated by default + + # Specific replication flow settings (here: flow from us-west to us-east) + us-west->us-east.enabled = true + us-west->us.east.topics = foo.*, bar.* # override the default above + +MirrorMaker is based on the Kafka Connect framework. Any Kafka Connect, source connector, and sink connector settings as described in the documentation chapter on Kafka Connect can be used directly in the MirrorMaker configuration, without having to change or prefix the name of the configuration setting. + +Example: Define custom Kafka Connect settings to be used by MirrorMaker. + + + # Setting Kafka Connect defaults for MirrorMaker + tasks.max = 5 + +Most of the default Kafka Connect settings work well for MirrorMaker out-of-the-box, with the exception of `tasks.max`. In order to evenly distribute the workload across more than one MirrorMaker process, it is recommended to set `tasks.max` to at least `2` (preferably higher) depending on the available hardware resources and the total number of topic-partitions to be replicated. + +You can further customize MirrorMaker's Kafka Connect settings _per source or target cluster_ (more precisely, you can specify Kafka Connect worker-level configuration settings "per connector"). Use the format of `{cluster}.{config_name}` in the MirrorMaker configuration file. + +Example: Define custom connector settings for the `us-west` cluster. + + + # us-west custom settings + us-west.offset.storage.topic = my-mirrormaker-offsets + +MirrorMaker internally uses the Kafka producer, consumer, and admin clients. Custom settings for these clients are often needed. To override the defaults, use the following format in the MirrorMaker configuration file: + + * `{source}.consumer.{consumer_config_name}` + * `{target}.producer.{producer_config_name}` + * `{source_or_target}.admin.{admin_config_name}` + + + +Example: Define custom producer, consumer, admin client settings. + + + # us-west cluster (from which to consume) + us-west.consumer.isolation.level = read_committed + us-west.admin.bootstrap.servers = broker57-primary:9092 + + # us-east cluster (to which to produce) + us-east.producer.compression.type = gzip + us-east.producer.buffer.memory = 32768 + us-east.admin.bootstrap.servers = broker8-secondary:9092 + +### Exactly once + +Exactly-once semantics are supported for dedicated MirrorMaker clusters as of version 3.5.0. + +For new MirrorMaker clusters, set the `exactly.once.source.support` property to enabled for all targeted Kafka clusters that should be written to with exactly-once semantics. For example, to enable exactly-once for writes to cluster `us-east`, the following configuration can be used: + + + us-east.exactly.once.source.support = enabled + +For existing MirrorMaker clusters, a two-step upgrade is necessary. Instead of immediately setting the `exactly.once.source.support` property to enabled, first set it to `preparing` on all nodes in the cluster. Once this is complete, it can be set to `enabled` on all nodes in the cluster, in a second round of restarts. + +In either case, it is also necessary to enable intra-cluster communication between the MirrorMaker nodes, as described in [KIP-710](https://cwiki.apache.org/confluence/x/4g5RCg). To do this, the `dedicated.mode.enable.internal.rest` property must be set to `true`. In addition, many of the REST-related [configuration properties available for Kafka Connect](https://kafka.apache.org/#connectconfigs) can be specified the MirrorMaker config. For example, to enable intra-cluster communication in MirrorMaker cluster with each node listening on port 8080 of their local machine, the following should be added to the MirrorMaker config file: + + + dedicated.mode.enable.internal.rest = true + listeners = http://localhost:8080 + +**Note that, if intra-cluster communication is enabled in production environments, it is highly recommended to secure the REST servers brought up by each MirrorMaker node. See the[configuration properties for Kafka Connect](https://kafka.apache.org/#connectconfigs) for information on how this can be accomplished. ** + +It is also recommended to filter records from aborted transactions out from replicated data when running MirrorMaker. To do this, ensure that the consumer used to read from source clusters is configured with `isolation.level` set to `read_committed`. If replicating data from cluster `us-west`, this can be done for all replication flows that read from that cluster by adding the following to the MirrorMaker config file: + + + us-west.consumer.isolation.level = read_committed + +As a final note, under the hood, MirrorMaker uses Kafka Connect source connectors to replicate data. For more information on exactly-once support for these kinds of connectors, see the [relevant docs page](https://kafka.apache.org/#connect_exactlyoncesource). + +### Creating and Enabling Replication Flows + +To define a replication flow, you must first define the respective source and target Kafka clusters in the MirrorMaker configuration file. + + * `clusters` (required): comma-separated list of Kafka cluster "aliases" + * `{clusterAlias}.bootstrap.servers` (required): connection information for the specific cluster; comma-separated list of "bootstrap" Kafka brokers + + +Example: Define two cluster aliases `primary` and `secondary`, including their connection information. + + + clusters = primary, secondary + primary.bootstrap.servers = broker10-primary:9092,broker-11-primary:9092 + secondary.bootstrap.servers = broker5-secondary:9092,broker6-secondary:9092 + +Secondly, you must explicitly enable individual replication flows with `{source}->{target}.enabled = true` as needed. Remember that flows are directional: if you need two-way (bidirectional) replication, you must enable flows in both directions. + + + # Enable replication from primary to secondary + primary->secondary.enabled = true + +By default, a replication flow will replicate all but a few special topics and consumer groups from the source cluster to the target cluster, and automatically detect any newly created topics and groups. The names of replicated topics in the target cluster will be prefixed with the name of the source cluster (see section further below). For example, the topic `foo` in the source cluster `us-west` would be replicated to a topic named `us-west.foo` in the target cluster `us-east`. + +The subsequent sections explain how to customize this basic setup according to your needs. + +### Configuring Replication Flows + +The configuration of a replication flow is a combination of top-level default settings (e.g., `topics`), on top of which flow-specific settings, if any, are applied (e.g., `us-west->us-east.topics`). To change the top-level defaults, add the respective top-level setting to the MirrorMaker configuration file. To override the defaults for a specific replication flow only, use the syntax format `{source}->{target}.{config.name}`. + +The most important settings are: + + * `topics`: list of topics or a regular expression that defines which topics in the source cluster to replicate (default: `topics = .*`) + * `topics.exclude`: list of topics or a regular expression to subsequently exclude topics that were matched by the `topics` setting (default: `topics.exclude = .*[\-\.]internal, .*\.replica, __.*`) + * `groups`: list of topics or regular expression that defines which consumer groups in the source cluster to replicate (default: `groups = .*`) + * `groups.exclude`: list of topics or a regular expression to subsequently exclude consumer groups that were matched by the `groups` setting (default: `groups.exclude = console-consumer-.*, connect-.*, __.*`) + * `{source}->{target}.enable`: set to `true` to enable the replication flow (default: `false`) + + +Example: + + + # Custom top-level defaults that apply to all replication flows + topics = .* + groups = consumer-group1, consumer-group2 + + # Don't forget to enable a flow! + us-west->us-east.enabled = true + + # Custom settings for specific replication flows + us-west->us-east.topics = foo.* + us-west->us-east.groups = bar.* + us-west->us-east.emit.heartbeats = false + +Additional configuration settings are supported which can be left with their default values in most cases. See [MirrorMaker Configs](/#mirrormakerconfigs). + +### Securing Replication Flows + +MirrorMaker supports the same security settings as Kafka Connect, so please refer to the linked section for further information. + +Example: Encrypt communication between MirrorMaker and the `us-east` cluster. + + + us-east.security.protocol=SSL + us-east.ssl.truststore.location=/path/to/truststore.jks + us-east.ssl.truststore.password=my-secret-password + us-east.ssl.keystore.location=/path/to/keystore.jks + us-east.ssl.keystore.password=my-secret-password + us-east.ssl.key.password=my-secret-password + +### Custom Naming of Replicated Topics in Target Clusters + +Replicated topics in a target cluster—sometimes called _remote_ topics—are renamed according to a replication policy. MirrorMaker uses this policy to ensure that events (aka records, messages) from different clusters are not written to the same topic-partition. By default as per [DefaultReplicationPolicy](https://github.com/apache/kafka/blob/trunk/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/DefaultReplicationPolicy.java), the names of replicated topics in the target clusters have the format `{source}.{source_topic_name}`: + + + us-west us-east + ========= ================= + bar-topic + foo-topic --> us-west.foo-topic + +You can customize the separator (default: `.`) with the `replication.policy.separator` setting: + + + # Defining a custom separator + us-west->us-east.replication.policy.separator = _ + +If you need further control over how replicated topics are named, you can implement a custom `ReplicationPolicy` and override `replication.policy.class` (default is `DefaultReplicationPolicy`) in the MirrorMaker configuration. + +### Preventing Configuration Conflicts + +MirrorMaker processes share configuration via their target Kafka clusters. This behavior may cause conflicts when configurations differ among MirrorMaker processes that operate against the same target cluster. + +For example, the following two MirrorMaker processes would be racy: + + + # Configuration of process 1 + A->B.enabled = true + A->B.topics = foo + + # Configuration of process 2 + A->B.enabled = true + A->B.topics = bar + +In this case, the two processes will share configuration via cluster `B`, which causes a conflict. Depending on which of the two processes is the elected "leader", the result will be that either the topic `foo` or the topic `bar` is replicated, but not both. + +It is therefore important to keep the MirrorMaker configuration consistent across replication flows to the same target cluster. This can be achieved, for example, through automation tooling or by using a single, shared MirrorMaker configuration file for your entire organization. + +### Best Practice: Consume from Remote, Produce to Local + +To minimize latency ("producer lag"), it is recommended to locate MirrorMaker processes as close as possible to their target clusters, i.e., the clusters that it produces data to. That's because Kafka producers typically struggle more with unreliable or high-latency network connections than Kafka consumers. + + + First DC Second DC + ========== ========================= + primary --------- MirrorMaker --> secondary + (remote) (local) + +To run such a "consume from remote, produce to local" setup, run the MirrorMaker processes close to and preferably in the same location as the target clusters, and explicitly set these "local" clusters in the `--clusters` command line parameter (blank-separated list of cluster aliases): + + + # Run in secondary's data center, reading from the remote `primary` cluster + $ bin/connect-mirror-maker.sh connect-mirror-maker.properties --clusters secondary + +The `--clusters secondary` tells the MirrorMaker process that the given cluster(s) are nearby, and prevents it from replicating data or sending configuration to clusters at other, remote locations. + +### Example: Active/Passive High Availability Deployment + +The following example shows the basic settings to replicate topics from a primary to a secondary Kafka environment, but not from the secondary back to the primary. Please be aware that most production setups will need further configuration, such as security settings. + + + # Unidirectional flow (one-way) from primary to secondary cluster + primary.bootstrap.servers = broker1-primary:9092 + secondary.bootstrap.servers = broker2-secondary:9092 + + primary->secondary.enabled = true + secondary->primary.enabled = false + + primary->secondary.topics = foo.* # only replicate some topics + +### Example: Active/Active High Availability Deployment + +The following example shows the basic settings to replicate topics between two clusters in both ways. Please be aware that most production setups will need further configuration, such as security settings. + + + # Bidirectional flow (two-way) between us-west and us-east clusters + clusters = us-west, us-east + us-west.bootstrap.servers = broker1-west:9092,broker2-west:9092 + Us-east.bootstrap.servers = broker3-east:9092,broker4-east:9092 + + us-west->us-east.enabled = true + us-east->us-west.enabled = true + +_Note on preventing replication "loops" (where topics will be originally replicated from A to B, then the replicated topics will be replicated yet again from B to A, and so forth)_ : As long as you define the above flows in the same MirrorMaker configuration file, you do not need to explicitly add `topics.exclude` settings to prevent replication loops between the two clusters. + +### Example: Multi-Cluster Geo-Replication + +Let's put all the information from the previous sections together in a larger example. Imagine there are three data centers (west, east, north), with two Kafka clusters in each data center (e.g., `west-1`, `west-2`). The example in this section shows how to configure MirrorMaker (1) for Active/Active replication within each data center, as well as (2) for Cross Data Center Replication (XDCR). + +First, define the source and target clusters along with their replication flows in the configuration: + + + # Basic settings + clusters: west-1, west-2, east-1, east-2, north-1, north-2 + west-1.bootstrap.servers = ... + west-2.bootstrap.servers = ... + east-1.bootstrap.servers = ... + east-2.bootstrap.servers = ... + north-1.bootstrap.servers = ... + north-2.bootstrap.servers = ... + + # Replication flows for Active/Active in West DC + west-1->west-2.enabled = true + west-2->west-1.enabled = true + + # Replication flows for Active/Active in East DC + east-1->east-2.enabled = true + east-2->east-1.enabled = true + + # Replication flows for Active/Active in North DC + north-1->north-2.enabled = true + north-2->north-1.enabled = true + + # Replication flows for XDCR via west-1, east-1, north-1 + west-1->east-1.enabled = true + west-1->north-1.enabled = true + east-1->west-1.enabled = true + east-1->north-1.enabled = true + north-1->west-1.enabled = true + north-1->east-1.enabled = true + +Then, in each data center, launch one or more MirrorMaker as follows: + + + # In West DC: + $ bin/connect-mirror-maker.sh connect-mirror-maker.properties --clusters west-1 west-2 + + # In East DC: + $ bin/connect-mirror-maker.sh connect-mirror-maker.properties --clusters east-1 east-2 + + # In North DC: + $ bin/connect-mirror-maker.sh connect-mirror-maker.properties --clusters north-1 north-2 + +With this configuration, records produced to any cluster will be replicated within the data center, as well as across to other data centers. By providing the `--clusters` parameter, we ensure that each MirrorMaker process produces data to nearby clusters only. + +_Note:_ The `--clusters` parameter is, technically, not required here. MirrorMaker will work fine without it. However, throughput may suffer from "producer lag" between data centers, and you may incur unnecessary data transfer costs. + +## Starting Geo-Replication + +You can run as few or as many MirrorMaker processes (think: nodes, servers) as needed. Because MirrorMaker is based on Kafka Connect, MirrorMaker processes that are configured to replicate the same Kafka clusters run in a distributed setup: They will find each other, share configuration (see section below), load balance their work, and so on. If, for example, you want to increase the throughput of replication flows, one option is to run additional MirrorMaker processes in parallel. + +To start a MirrorMaker process, run the command: + + + $ bin/connect-mirror-maker.sh connect-mirror-maker.properties + +After startup, it may take a few minutes until a MirrorMaker process first begins to replicate data. + +Optionally, as described previously, you can set the parameter `--clusters` to ensure that the MirrorMaker process produces data to nearby clusters only. + + + # Note: The cluster alias us-west must be defined in the configuration file + $ bin/connect-mirror-maker.sh connect-mirror-maker.properties \ + --clusters us-west + + +_Note when testing replication of consumer groups:_ By default, MirrorMaker does not replicate consumer groups created by the kafka-console-consumer.sh tool, which you might use to test your MirrorMaker setup on the command line. If you do want to replicate these consumer groups as well, set the `groups.exclude` configuration accordingly (default: `groups.exclude = console-consumer-.*, connect-.*, __.*`). Remember to update the configuration again once you completed your testing. + +## Stopping Geo-Replication + +You can stop a running MirrorMaker process by sending a SIGTERM signal with the command: + + + $ kill + +## Applying Configuration Changes + +To make configuration changes take effect, the MirrorMaker process(es) must be restarted. + +## Monitoring Geo-Replication + +It is recommended to monitor MirrorMaker processes to ensure all defined replication flows are up and running correctly. MirrorMaker is built on the Connect framework and inherits all of Connect's metrics, such `source-record-poll-rate`. In addition, MirrorMaker produces its own metrics under the `kafka.connect.mirror` metric group. Metrics are tagged with the following properties: + + * `source`: alias of source cluster (e.g., `primary`) + * `target`: alias of target cluster (e.g., `secondary`) + * `topic`: replicated topic on target cluster + * `partition`: partition being replicated + + + +Metrics are tracked for each replicated topic. The source cluster can be inferred from the topic name. For example, replicating `topic1` from `primary->secondary` will yield metrics like: + + * `target=secondary` + * `topic=primary.topic1` + * `partition=1` + + +The following metrics are emitted: + + + # MBean: kafka.connect.mirror:type=MirrorSourceConnector,target=([-.w]+),topic=([-.w]+),partition=([0-9]+) + record-count # number of records replicated source -> target + record-rate # average number of records/sec in replicated records + record-age-ms # age of records when they are replicated + record-age-ms-min + record-age-ms-max + record-age-ms-avg + replication-latency-ms # time it takes records to propagate source->target + replication-latency-ms-min + replication-latency-ms-max + replication-latency-ms-avg + byte-rate # average number of bytes/sec in replicated records + byte-count # number of bytes replicated source -> target + + # MBean: kafka.connect.mirror:type=MirrorCheckpointConnector,source=([-.w]+),target=([-.w]+),group=([-.w]+),topic=([-.w]+),partition=([0-9]+) + + checkpoint-latency-ms # time it takes to replicate consumer offsets + checkpoint-latency-ms-min + checkpoint-latency-ms-max + checkpoint-latency-ms-avg + +These metrics do not differentiate between created-at and log-append timestamps. diff --git a/content/en/41/operations/hardware-and-os.md b/content/en/41/operations/hardware-and-os.md new file mode 100644 index 000000000..781dfc2d2 --- /dev/null +++ b/content/en/41/operations/hardware-and-os.md @@ -0,0 +1,134 @@ +--- +title: Hardware and OS +description: Hardware and OS +weight: 6 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Hardware and OS + +We are using dual quad-core Intel Xeon machines with 24GB of memory. + +You need sufficient memory to buffer active readers and writers. You can do a back-of-the-envelope estimate of memory needs by assuming you want to be able to buffer for 30 seconds and compute your memory need as write_throughput*30. + +The disk throughput is important. We have 8x7200 rpm SATA drives. In general disk throughput is the performance bottleneck, and more disks is better. Depending on how you configure flush behavior you may or may not benefit from more expensive disks (if you force flush often then higher RPM SAS drives may be better). + +## OS + +Kafka should run well on any unix system and has been tested on Linux and Solaris. + +We have seen a few issues running on Windows and Windows is not currently a well supported platform though we would be happy to change that. + +It is unlikely to require much OS-level tuning, but there are three potentially important OS-level configurations: + + * File descriptor limits: Kafka uses file descriptors for log segments and open connections. If a broker hosts many partitions, consider that the broker needs at least (number_of_partitions)*(partition_size/segment_size) to track all log segments in addition to the number of connections the broker makes. We recommend at least 100000 allowed file descriptors for the broker processes as a starting point. Note: The mmap() function adds an extra reference to the file associated with the file descriptor fildes which is not removed by a subsequent close() on that file descriptor. This reference is removed when there are no more mappings to the file. + * Max socket buffer size: can be increased to enable high-performance data transfer between data centers as [described here](https://www.psc.edu/index.php/networking/641-tcp-tune). + * Maximum number of memory map areas a process may have (aka vm.max_map_count). [See the Linux kernel documentation](https://kernel.org/doc/Documentation/sysctl/vm.txt). You should keep an eye at this OS-level property when considering the maximum number of partitions a broker may have. By default, on a number of Linux systems, the value of vm.max_map_count is somewhere around 65535. Each log segment, allocated per partition, requires a pair of index/timeindex files, and each of these files consumes 1 map area. In other words, each log segment uses 2 map areas. Thus, each partition requires minimum 2 map areas, as long as it hosts a single log segment. That is to say, creating 50000 partitions on a broker will result allocation of 100000 map areas and likely cause broker crash with OutOfMemoryError (Map failed) on a system with default vm.max_map_count. Keep in mind that the number of log segments per partition varies depending on the segment size, load intensity, retention policy and, generally, tends to be more than one. + + +## Disks and Filesystem + +We recommend using multiple drives to get good throughput and not sharing the same drives used for Kafka data with application logs or other OS filesystem activity to ensure good latency. You can either RAID these drives together into a single volume or format and mount each drive as its own directory. Since Kafka has replication the redundancy provided by RAID can also be provided at the application level. This choice has several tradeoffs. + +If you configure multiple data directories partitions will be assigned round-robin to data directories. Each partition will be entirely in one of the data directories. If data is not well balanced among partitions this can lead to load imbalance between disks. + +RAID can potentially do better at balancing load between disks (although it doesn't always seem to) because it balances load at a lower level. The primary downside of RAID is that it is usually a big performance hit for write throughput and reduces the available disk space. + +Another potential benefit of RAID is the ability to tolerate disk failures. However our experience has been that rebuilding the RAID array is so I/O intensive that it effectively disables the server, so this does not provide much real availability improvement. + +## Application vs. OS Flush Management + +Kafka always immediately writes all data to the filesystem and supports the ability to configure the flush policy that controls when data is forced out of the OS cache and onto disk using the flush. This flush policy can be controlled to force data to disk after a period of time or after a certain number of messages has been written. There are several choices in this configuration. + +Kafka must eventually call fsync to know that data was flushed. When recovering from a crash for any log segment not known to be fsync'd Kafka will check the integrity of each message by checking its CRC and also rebuild the accompanying offset index file as part of the recovery process executed on startup. + +Note that durability in Kafka does not require syncing data to disk, as a failed node will always recover from its replicas. + +We recommend using the default flush settings which disable application fsync entirely. This means relying on the background flush done by the OS and Kafka's own background flush. This provides the best of all worlds for most uses: no knobs to tune, great throughput and latency, and full recovery guarantees. We generally feel that the guarantees provided by replication are stronger than sync to local disk, however the paranoid still may prefer having both and application level fsync policies are still supported. + +The drawback of using application level flush settings is that it is less efficient in its disk usage pattern (it gives the OS less leeway to re-order writes) and it can introduce latency as fsync in most Linux filesystems blocks writes to the file whereas the background flushing does much more granular page-level locking. + +In general you don't need to do any low-level tuning of the filesystem, but in the next few sections we will go over some of this in case it is useful. + +## Understanding Linux OS Flush Behavior + +In Linux, data written to the filesystem is maintained in [pagecache](https://en.wikipedia.org/wiki/Page_cache) until it must be written out to disk (due to an application-level fsync or the OS's own flush policy). The flushing of data is done by a set of background threads called pdflush (or in post 2.6.32 kernels "flusher threads"). + +Pdflush has a configurable policy that controls how much dirty data can be maintained in cache and for how long before it must be written back to disk. This policy is described [here](https://web.archive.org/web/20160518040713/http://www.westnet.com/~gsmith/content/linux-pdflush.htm). When Pdflush cannot keep up with the rate of data being written it will eventually cause the writing process to block incurring latency in the writes to slow down the accumulation of data. + +You can see the current state of OS memory usage by doing + + + $ cat /proc/meminfo + +The meaning of these values are described in the link above. + +Using pagecache has several advantages over an in-process cache for storing data that will be written out to disk: + + * The I/O scheduler will batch together consecutive small writes into bigger physical writes which improves throughput. + * The I/O scheduler will attempt to re-sequence writes to minimize movement of the disk head which improves throughput. + * It automatically uses all the free memory on the machine + + +## Filesystem Selection + +Kafka uses regular files on disk, and as such it has no hard dependency on a specific filesystem. The two filesystems which have the most usage, however, are EXT4 and XFS. Historically, EXT4 has had more usage, but recent improvements to the XFS filesystem have shown it to have better performance characteristics for Kafka's workload with no compromise in stability. + +Comparison testing was performed on a cluster with significant message loads, using a variety of filesystem creation and mount options. The primary metric in Kafka that was monitored was the "Request Local Time", indicating the amount of time append operations were taking. XFS resulted in much better local times (160ms vs. 250ms+ for the best EXT4 configuration), as well as lower average wait times. The XFS performance also showed less variability in disk performance. + +### General Filesystem Notes + +For any filesystem used for data directories, on Linux systems, the following options are recommended to be used at mount time: + + * noatime: This option disables updating of a file's atime (last access time) attribute when the file is read. This can eliminate a significant number of filesystem writes, especially in the case of bootstrapping consumers. Kafka does not rely on the atime attributes at all, so it is safe to disable this. + + + +### XFS Notes + +The XFS filesystem has a significant amount of auto-tuning in place, so it does not require any change in the default settings, either at filesystem creation time or at mount. The only tuning parameters worth considering are: + + * largeio: This affects the preferred I/O size reported by the stat call. While this can allow for higher performance on larger disk writes, in practice it had minimal or no effect on performance. + * nobarrier: For underlying devices that have battery-backed cache, this option can provide a little more performance by disabling periodic write flushes. However, if the underlying device is well-behaved, it will report to the filesystem that it does not require flushes, and this option will have no effect. + + + +### EXT4 Notes + +EXT4 is a serviceable choice of filesystem for the Kafka data directories, however getting the most performance out of it will require adjusting several mount options. In addition, these options are generally unsafe in a failure scenario, and will result in much more data loss and corruption. For a single broker failure, this is not much of a concern as the disk can be wiped and the replicas rebuilt from the cluster. In a multiple-failure scenario, such as a power outage, this can mean underlying filesystem (and therefore data) corruption that is not easily recoverable. The following options can be adjusted: + + * data=writeback: Ext4 defaults to data=ordered which puts a strong order on some writes. Kafka does not require this ordering as it does very paranoid data recovery on all unflushed log. This setting removes the ordering constraint and seems to significantly reduce latency. + * Disabling journaling: Journaling is a tradeoff: it makes reboots faster after server crashes but it introduces a great deal of additional locking which adds variance to write performance. Those who don't care about reboot time and want to reduce a major source of write latency spikes can turn off journaling entirely. + * commit=num_secs: This tunes the frequency with which ext4 commits to its metadata journal. Setting this to a lower value reduces the loss of unflushed data during a crash. Setting this to a higher value will improve throughput. + * nobh: This setting controls additional ordering guarantees when using data=writeback mode. This should be safe with Kafka as we do not depend on write ordering and improves throughput and latency. + * delalloc: Delayed allocation means that the filesystem avoid allocating any blocks until the physical write occurs. This allows ext4 to allocate a large extent instead of smaller pages and helps ensure the data is written sequentially. This feature is great for throughput. It does seem to involve some locking in the filesystem which adds a bit of latency variance. + * fast_commit: Added in Linux 5.10, [fast_commit](https://lwn.net/Articles/842385/) is a lighter-weight journaling method which can be used with data=ordered journaling mode. Enabling it seems to significantly reduce latency. + + +## Replace KRaft Controller Disk + +When Kafka is configured to use KRaft, the controllers store the cluster metadata in the directory specified in `metadata.log.dir` \-- or the first log directory, if `metadata.log.dir` is not configured. See the documentation for `metadata.log.dir` for details. + +If the data in the cluster metadata directory is lost either because of hardware failure or the hardware needs to be replaced, care should be taken when provisioning the new controller node. The new controller node should not be formatted and started until the majority of the controllers have all of the committed data. To determine if the majority of the controllers have the committed data, run the kafka-metadata-quorum.sh tool to describe the replication status: + + + $ bin/kafka-metadata-quorum.sh --bootstrap-server localhost:9092 describe --replication + NodeId DirectoryId LogEndOffset Lag LastFetchTimestamp LastCaughtUpTimestamp Status + 1 dDo1k_pRSD-VmReEpu383g 966 0 1732367153528 1732367153528 Leader + 2 wQWaQMJYpcifUPMBGeRHqg 966 0 1732367153304 1732367153304 Observer + ... ... ... ... ... ... + +Check and wait until the `Lag` is small for a majority of the controllers. If the leader's end offset is not increasing, you can wait until the lag is 0 for a majority; otherwise, you can pick the latest leader end offset and wait until all replicas have reached it. Check and wait until the `LastFetchTimestamp` and `LastCaughtUpTimestamp` are close to each other for the majority of the controllers. At this point it is safer to format the controller's metadata log directory. This can be done by running the kafka-storage.sh command. + + + $ bin/kafka-storage.sh format --cluster-id uuid --config config/server.properties + +It is possible for the `bin/kafka-storage.sh format` command above to fail with a message like `Log directory ... is already formatted`. This can happen when combined mode is used and only the metadata log directory was lost but not the others. In that case and only in that case, can you run the `bin/kafka-storage.sh format` command with the `--ignore-formatted` option. + +Start the KRaft controller after formatting the log directories. + + + $ bin/kafka-server-start.sh config/server.properties diff --git a/content/en/41/operations/java-version.md b/content/en/41/operations/java-version.md new file mode 100644 index 000000000..0b1cd2249 --- /dev/null +++ b/content/en/41/operations/java-version.md @@ -0,0 +1,31 @@ +--- +title: Java Version +description: Java Version +weight: 5 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Java Version + +Java 17 and Java 21 are fully supported while Java 11 is supported for a subset of modules (clients, streams and related). Support for versions newer than the most recent LTS version are best-effort and the project typically only tests with the most recent non LTS version. + +We generally recommend running Apache Kafka with the most recent LTS release (Java 21 at the time of writing) for performance, efficiency and support reasons. From a security perspective, we recommend the latest released patch version as older versions typically have disclosed security vulnerabilities. + +Typical arguments for running Kafka with OpenJDK-based Java implementations (including Oracle JDK) are: + + + -Xmx6g -Xms6g -XX:MetaspaceSize=96m -XX:+UseG1GC + -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:G1HeapRegionSize=16M + -XX:MinMetaspaceFreeRatio=50 -XX:MaxMetaspaceFreeRatio=80 -XX:+ExplicitGCInvokesConcurrent + +For reference, here are the stats for one of LinkedIn's busiest clusters (at peak) that uses said Java arguments: + + * 60 brokers + * 50k partitions (replication factor 2) + * 800k messages/sec in + * 300 MB/sec inbound, 1 GB/sec+ outbound + +All of the brokers in that cluster have a 90% GC pause time of about 21ms with less than 1 young GC per second. diff --git a/content/en/41/operations/kraft.md b/content/en/41/operations/kraft.md new file mode 100644 index 000000000..7083d9ac5 --- /dev/null +++ b/content/en/41/operations/kraft.md @@ -0,0 +1,267 @@ +--- +title: KRaft +description: KRaft +weight: 8 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# KRaft + +## Configuration + +### Process Roles + +In KRaft mode each Kafka server can be configured as a controller, a broker, or both using the `process.roles` property. This property can have the following values: + + * If `process.roles` is set to `broker`, the server acts as a broker. + * If `process.roles` is set to `controller`, the server acts as a controller. + * If `process.roles` is set to `broker,controller`, the server acts as both a broker and a controller. + + + +Kafka servers that act as both brokers and controllers are referred to as "combined" servers. Combined servers are simpler to operate for small use cases like a development environment. The key disadvantage is that the controller will be less isolated from the rest of the system. For example, it is not possible to roll or scale the controllers separately from the brokers in combined mode. Combined mode is not recommended in critical deployment environments. + +### Controllers + +In KRaft mode, specific Kafka servers are selected to be controllers. The servers selected to be controllers will participate in the metadata quorum. Each controller is either an active or a hot standby for the current active controller. + +A Kafka admin will typically select 3 or 5 servers for this role, depending on factors like cost and the number of concurrent failures your system should withstand without availability impact. A majority of the controllers must be alive in order to maintain availability. With 3 controllers, the cluster can tolerate 1 controller failure; with 5 controllers, the cluster can tolerate 2 controller failures. + +All of the servers in a Kafka cluster discover the active controller using the `controller.quorum.bootstrap.servers` property. All the controllers should be enumerated in this property. Each controller is identified with their `host` and `port` information. For example: + + + controller.quorum.bootstrap.servers=host1:port1,host2:port2,host3:port3 + +If a Kafka cluster has 3 controllers named controller1, controller2 and controller3, then controller1 may have the following configuration: + + + process.roles=controller + node.id=1 + listeners=CONTROLLER://controller1.example.com:9093 + controller.quorum.bootstrap.servers=controller1.example.com:9093,controller2.example.com:9093,controller3.example.com:9093 + controller.listener.names=CONTROLLER + +Every broker and controller must set the `controller.quorum.bootstrap.servers` property. + +## Upgrade + +Apache Kafka 4.1 added support for upgrading a cluster from a static controller configuration to a dynamic controller configuration. Dynamic controller configuration allows users to add controller to and remove controller from the cluster. See the Controller membership changes section for more details. + +This feature upgrade is done by upgrading the KRaft feature version and updating the nodes' configuration. + +### Describe KRaft Version + +Dynamic controller cluster was added in `kraft.version=1` or `release-version 4.1`. To determine which kraft feature version the cluster is using you can execute the following CLI command: + + + $ bin/kafka-features.sh --bootstrap-controller localhost:9093 describe + ... + Feature: kraft.version SupportedMinVersion: 0 SupportedMaxVersion: 1 FinalizedVersionLevel: 0 Epoch: 7 + Feature: metadata.version SupportedMinVersion: 3.3-IV3 SupportedMaxVersion: 4.0-IV3 FinalizedVersionLevel: 4.0-IV3 Epoch: 7 + +If the `FinalizedVersionLevel` for `Feature: kraft.version` is `0`, the version needs to be upgraded to at least `1` to support a dynamic controller cluster. + +### Upgrade KRaft Version + +The KRaft feature version can be upgraded to support dynamic controller clusters by using the `kafka-feature` CLI command. To upgrade all of the feature versions to the latest version: + + + $ bin/kafka-features.sh --bootstrap-server localhost:9092 upgrade --release-version 4.1 + +To upgrade just the KRaft feature version: + + + $ bin/kafka-features.sh --bootstrap-server localhost:9092 upgrade --feature kraft.version=1 + +### Update KRaft Config + +KRaft version 1 deprecated the `controller.quorum.voters` property and added the `controller.quorum.bootstrap.servers` property. After checking that the KRaft version has been successfully upgraded to at least version `1`, remove the `controller.quorum.voters` property and add the `controller.quorum.bootstrap.servers` to all of the nodes (controllers and brokers) in the cluster. + + + process.roles=... + node.id=... + controller.quorum.bootstrap.servers=controller1.example.com:9093,controller2.example.com:9093,controller3.example.com:9093 + controller.listener.names=CONTROLLER + +## Provisioning Nodes + +The `bin/kafka-storage.sh random-uuid` command can be used to generate a cluster ID for your new cluster. This cluster ID must be used when formatting each server in the cluster with the `bin/kafka-storage.sh format` command. + +This is different from how Kafka has operated in the past. Previously, Kafka would format blank storage directories automatically, and also generate a new cluster ID automatically. One reason for the change is that auto-formatting can sometimes obscure an error condition. This is particularly important for the metadata log maintained by the controller and broker servers. If a majority of the controllers were able to start with an empty log directory, a leader might be able to be elected with missing committed data. + +### Bootstrap a Standalone Controller + +The recommended method for creating a new KRaft controller cluster is to bootstrap it with one voter and dynamically add the rest of the controllers. Bootstrapping the first controller can be done with the following CLI command: + + + $ bin/kafka-storage.sh format --cluster-id --standalone --config config/controller.properties + +This command will 1) create a meta.properties file in metadata.log.dir with a randomly generated directory.id, 2) create a snapshot at 00000000000000000000-0000000000.checkpoint with the necessary control records (KRaftVersionRecord and VotersRecord) to make this Kafka node the only voter for the quorum. + +### Bootstrap with Multiple Controllers + +The KRaft cluster metadata partition can also be bootstrapped with more than one voter. This can be done by using the --initial-controllers flag: + + + CLUSTER_ID="$(bin/kafka-storage.sh random-uuid)" + CONTROLLER_0_UUID="$(bin/kafka-storage.sh random-uuid)" + CONTROLLER_1_UUID="$(bin/kafka-storage.sh random-uuid)" + CONTROLLER_2_UUID="$(bin/kafka-storage.sh random-uuid)" + + # In each controller execute + bin/kafka-storage.sh format --cluster-id ${CLUSTER_ID} \ + --initial-controllers "0@controller-0:1234:${CONTROLLER_0_UUID},1@controller-1:1234:${CONTROLLER_1_UUID},2@controller-2:1234:${CONTROLLER_2_UUID}" \ + --config config/controller.properties + +This command is similar to the standalone version but the snapshot at 00000000000000000000-0000000000.checkpoint will instead contain a VotersRecord that includes information for all of the controllers specified in --initial-controllers. It is important that the value of this flag is the same in all of the controllers with the same cluster id. In the replica description 0@controller-0:1234:3Db5QLSqSZieL3rJBUUegA, 0 is the replica id, 3Db5QLSqSZieL3rJBUUegA is the replica directory id, controller-0 is the replica's host and 1234 is the replica's port. + +### Formatting Brokers and New Controllers + +When provisioning new broker and controller nodes that we want to add to an existing Kafka cluster, use the `kafka-storage.sh format` command with the --no-initial-controllers flag. + + + $ bin/kafka-storage.sh format --cluster-id --config config/server.properties --no-initial-controllers + +## Controller membership changes + +### Static versus Dynamic KRaft Quorums + +There are two ways to run KRaft: the old way using static controller quorums, and the new way using KIP-853 dynamic controller quorums. + +When using a static quorum, the configuration file for each broker and controller must specify the IDs, hostnames, and ports of all controllers in `controller.quorum.voters`. + +In contrast, when using a dynamic quorum, you should set `controller.quorum.bootstrap.servers` instead. This configuration key need not contain all the controllers, but it should contain as many as possible so that all the servers can locate the quorum. In other words, its function is much like the `bootstrap.servers` configuration used by Kafka clients. + +If you are not sure whether you are using static or dynamic quorums, you can determine this by running something like the following: + + + $ bin/kafka-features.sh --bootstrap-controller localhost:9093 describe + + +If the `kraft.version` field is level 0 or absent, you are using a static quorum. If it is 1 or above, you are using a dynamic quorum. For example, here is an example of a static quorum: + + + Feature: kraft.version SupportedMinVersion: 0 SupportedMaxVersion: 1 FinalizedVersionLevel: 0 Epoch: 5 + Feature: metadata.version SupportedMinVersion: 3.3-IV3 SupportedMaxVersion: 3.9-IV0 FinalizedVersionLevel: 3.9-IV0 Epoch: 5 + + +Here is another example of a static quorum: + + + Feature: metadata.version SupportedMinVersion: 3.3-IV3 SupportedMaxVersion: 3.8-IV0 FinalizedVersionLevel: 3.8-IV0 Epoch: 5 + + +Here is an example of a dynamic quorum: + + + Feature: kraft.version SupportedMinVersion: 0 SupportedMaxVersion: 1 FinalizedVersionLevel: 1 Epoch: 5 + Feature: metadata.version SupportedMinVersion: 3.3-IV3 SupportedMaxVersion: 3.9-IV0 FinalizedVersionLevel: 3.9-IV0 Epoch: 5 + + +The static versus dynamic nature of the quorum is determined at the time of formatting. Specifically, the quorum will be formatted as dynamic if `controller.quorum.voters` is **not** present, and if the software version is Apache Kafka 3.9 or newer. If you have followed the instructions earlier in this document, you will get a dynamic quorum. + +If you would like the formatting process to fail if a dynamic quorum cannot be achieved, format your controllers using the `--feature kraft.version=1`. (Note that you should not supply this flag when formatting brokers -- only when formatting controllers.) + + + $ bin/kafka-storage.sh format -t KAFKA_CLUSTER_ID --feature kraft.version=1 -c controller.properties + + +Note: To migrate from static voter set to dynamic voter set, please refer to the Upgrade section. + +### Add New Controller + +If a dynamic controller cluster already exists, it can be expanded by first provisioning a new controller using the kafka-storage.sh tool and starting the controller. After starting the controller, the replication to the new controller can be monitored using the `bin/kafka-metadata-quorum.sh describe --replication` command. Once the new controller has caught up to the active controller, it can be added to the cluster using the `bin/kafka-metadata-quorum.sh add-controller` command. When using broker endpoints use the --bootstrap-server flag: + + + $ bin/kafka-metadata-quorum.sh --command-config config/controller.properties --bootstrap-server localhost:9092 add-controller + +When using controller endpoints use the --bootstrap-controller flag: + + + $ bin/kafka-metadata-quorum.sh --command-config config/controller.properties --bootstrap-controller localhost:9093 add-controller + +### Remove Controller + +If the dynamic controller cluster already exists, it can be shrunk using the `bin/kafka-metadata-quorum.sh remove-controller` command. Until KIP-996: Pre-vote has been implemented and released, it is recommended to shutdown the controller that will be removed before running the remove-controller command. When using broker endpoints use the --bootstrap-server flag: + + + $ bin/kafka-metadata-quorum.sh --bootstrap-server localhost:9092 remove-controller --controller-id --controller-directory-id + +When using controller endpoints use the --bootstrap-controller flag: + + + $ bin/kafka-metadata-quorum.sh --bootstrap-controller localhost:9092 remove-controller --controller-id --controller-directory-id + +## Debugging + +### Metadata Quorum Tool + +The kafka-metadata-quorum.sh tool can be used to describe the runtime state of the cluster metadata partition. For example, the following command displays a summary of the metadata quorum: + + + $ bin/kafka-metadata-quorum.sh --bootstrap-server localhost:9092 describe --status + ClusterId: fMCL8kv1SWm87L_Md-I2hg + LeaderId: 3002 + LeaderEpoch: 2 + HighWatermark: 10 + MaxFollowerLag: 0 + MaxFollowerLagTimeMs: -1 + CurrentVoters: [{"id": 3000, "directoryId": "ILZ5MPTeRWakmJu99uBJCA", "endpoints": ["CONTROLLER://localhost:9093"]}, + {"id": 3001, "directoryId": "b-DwmhtOheTqZzPoh52kfA", "endpoints": ["CONTROLLER://localhost:9094"]}, + {"id": 3002, "directoryId": "g42deArWBTRM5A1yuVpMCg", "endpoints": ["CONTROLLER://localhost:9095"]}] + CurrentObservers: [{"id": 0, "directoryId": "3Db5QLSqSZieL3rJBUUegA"}, + {"id": 1, "directoryId": "UegA3Db5QLSqSZieL3rJBU"}, + {"id": 2, "directoryId": "L3rJBUUegA3Db5QLSqSZie"}] + +### Dump Log Tool + +The kafka-dump-log.sh tool can be used to debug the log segments and snapshots for the cluster metadata directory. The tool will scan the provided files and decode the metadata records. For example, this command decodes and prints the records in the first log segment: + + + $ bin/kafka-dump-log.sh --cluster-metadata-decoder --files metadata_log_dir/__cluster_metadata-0/00000000000000000000.log + +This command decodes and prints the records in the a cluster metadata snapshot: + + + $ bin/kafka-dump-log.sh --cluster-metadata-decoder --files metadata_log_dir/__cluster_metadata-0/00000000000000000100-0000000001.checkpoint + +### Metadata Shell + +The kafka-metadata-shell.sh tool can be used to interactively inspect the state of the cluster metadata partition: + + + $ bin/kafka-metadata-shell.sh --snapshot metadata_log_dir/__cluster_metadata-0/00000000000000007228-0000000001.checkpoint + >> ls / + brokers local metadataQuorum topicIds topics + >> ls /topics + foo + >> cat /topics/foo/0/data + { + "partitionId" : 0, + "topicId" : "5zoAlv-xEh9xRANKXt1Lbg", + "replicas" : [ 1 ], + "isr" : [ 1 ], + "removingReplicas" : null, + "addingReplicas" : null, + "leader" : 1, + "leaderEpoch" : 0, + "partitionEpoch" : 0 + } + >> exit + +Note: `00000000000000000000-0000000000.checkpoint` does not contain cluster metadata. Use a valid snapshot file when examining metadata with the `kafka-metadata-shell.sh` tool. + +## Deploying Considerations + + * Kafka server's `process.role` should be set to either `broker` or `controller` but not both. Combined mode can be used in development environments, but it should be avoided in critical deployment environments. + * For redundancy, a Kafka cluster should use 3 or more controllers, depending on factors like cost and the number of concurrent failures your system should withstand without availability impact. For the KRaft controller cluster to withstand `N` concurrent failures the controller cluster must include `2N + 1` controllers. + * The Kafka controllers store all the metadata for the cluster in memory and on disk. We believe that for a typical Kafka cluster 5GB of main memory and 5GB of disk space on the metadata log director is sufficient. + + + +## ZooKeeper to KRaft Migration + +In order to migrate from ZooKeeper to KRaft you need to use a bridge release. The last bridge release is Kafka 3.9. See the [ZooKeeper to KRaft Migration steps](/39/#kraft_zk_migration) in the 3.9 documentation. diff --git a/content/en/41/operations/monitoring.md b/content/en/41/operations/monitoring.md new file mode 100644 index 000000000..4c02ebd64 --- /dev/null +++ b/content/en/41/operations/monitoring.md @@ -0,0 +1,598 @@ +--- +title: Monitoring +description: Monitoring +weight: 7 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Monitoring + +Kafka uses Yammer Metrics for metrics reporting in the server. The Java clients use Kafka Metrics, a built-in metrics registry that minimizes transitive dependencies pulled into client applications. Both expose metrics via JMX and can be configured to report stats using pluggable stats reporters to hook up to your monitoring system. + +All Kafka rate metrics have a corresponding cumulative count metric with suffix `-total`. For example, `records-consumed-rate` has a corresponding metric named `records-consumed-total`. + +The easiest way to see the available metrics is to fire up jconsole and point it at a running kafka client or server; this will allow browsing all metrics with JMX. + +## Security Considerations for Remote Monitoring using JMX + +Apache Kafka disables remote JMX by default. You can enable remote monitoring using JMX by setting the environment variable `JMX_PORT` for processes started using the CLI or standard Java system properties to enable remote JMX programmatically. You must enable security when enabling remote JMX in production scenarios to ensure that unauthorized users cannot monitor or control your broker or application as well as the platform on which these are running. Note that authentication is disabled for JMX by default in Kafka and security configs must be overridden for production deployments by setting the environment variable `KAFKA_JMX_OPTS` for processes started using the CLI or by setting appropriate Java system properties. See [Monitoring and Management Using JMX Technology](https://docs.oracle.com/javase/8/docs/technotes/guides/management/agent.html) for details on securing JMX. + +We do graphing and alerting on the following metrics: Description | Mbean name | Normal value +---|---|--- +Message in rate | kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec,topic=([-.\w]+) | Incoming message rate per topic. Omitting 'topic=(...)' will yield the all-topic rate. +Byte in rate from clients | kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec,topic=([-.\w]+) | Byte in (from the clients) rate per topic. Omitting 'topic=(...)' will yield the all-topic rate. +Byte in rate from other brokers | kafka.server:type=BrokerTopicMetrics,name=ReplicationBytesInPerSec | Byte in (from the other brokers) rate across all topics. +Controller Request rate from Broker | kafka.controller:type=ControllerChannelManager,name=RequestRateAndQueueTimeMs,brokerId=([0-9]+) | The rate (requests per second) at which the ControllerChannelManager takes requests from the queue of the given broker. And the time it takes for a request to stay in this queue before it is taken from the queue. +Controller Event queue size | kafka.controller:type=ControllerEventManager,name=EventQueueSize | Size of the ControllerEventManager's queue. +Controller Event queue time | kafka.controller:type=ControllerEventManager,name=EventQueueTimeMs | Time that takes for any event (except the Idle event) to wait in the ControllerEventManager's queue before being processed +Request rate | kafka.network:type=RequestMetrics,name=RequestsPerSec,request={Produce|FetchConsumer|FetchFollower},version=([0-9]+) | +Error rate | kafka.network:type=RequestMetrics,name=ErrorsPerSec,request=([-.\w]+),error=([-.\w]+) | Number of errors in responses counted per-request-type, per-error-code. If a response contains multiple errors, all are counted. error=NONE indicates successful responses. +Produce request rate | kafka.server:type=BrokerTopicMetrics,name=TotalProduceRequestsPerSec,topic=([-.\w]+) | Produce request rate per topic. Omitting 'topic=(...)' will yield the all-topic rate. +Fetch request rate | kafka.server:type=BrokerTopicMetrics,name=TotalFetchRequestsPerSec,topic=([-.\w]+) | Fetch request (from clients or followers) rate per topic. Omitting 'topic=(...)' will yield the all-topic rate. +Failed produce request rate | kafka.server:type=BrokerTopicMetrics,name=FailedProduceRequestsPerSec,topic=([-.\w]+) | Failed Produce request rate per topic. Omitting 'topic=(...)' will yield the all-topic rate. +Failed fetch request rate | kafka.server:type=BrokerTopicMetrics,name=FailedFetchRequestsPerSec,topic=([-.\w]+) | Failed Fetch request (from clients or followers) rate per topic. Omitting 'topic=(...)' will yield the all-topic rate. +Request size in bytes | kafka.network:type=RequestMetrics,name=RequestBytes,request=([-.\w]+) | Size of requests for each request type. +Temporary memory size in bytes | kafka.network:type=RequestMetrics,name=TemporaryMemoryBytes,request={Produce|Fetch} | Temporary memory used for message format conversions and decompression. +Message conversion time | kafka.network:type=RequestMetrics,name=MessageConversionsTimeMs,request={Produce|Fetch} | Time in milliseconds spent on message format conversions. +Message conversion rate | kafka.server:type=BrokerTopicMetrics,name={Produce|Fetch}MessageConversionsPerSec,topic=([-.\w]+) | Message format conversion rate, for Produce or Fetch requests, per topic. Omitting 'topic=(...)' will yield the all-topic rate. +Request Queue Size | kafka.network:type=RequestChannel,name=RequestQueueSize | Size of the request queue. +Byte out rate to clients | kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec,topic=([-.\w]+) | Byte out (to the clients) rate per topic. Omitting 'topic=(...)' will yield the all-topic rate. +Byte out rate to other brokers | kafka.server:type=BrokerTopicMetrics,name=ReplicationBytesOutPerSec | Byte out (to the other brokers) rate across all topics +Rejected byte rate | kafka.server:type=BrokerTopicMetrics,name=BytesRejectedPerSec,topic=([-.\w]+) | Rejected byte rate per topic, due to the record batch size being greater than max.message.bytes configuration. Omitting 'topic=(...)' will yield the all-topic rate. +Message validation failure rate due to no key specified for compacted topic | kafka.server:type=BrokerTopicMetrics,name=NoKeyCompactedTopicRecordsPerSec | 0 +Message validation failure rate due to invalid magic number | kafka.server:type=BrokerTopicMetrics,name=InvalidMagicNumberRecordsPerSec | 0 +Message validation failure rate due to incorrect crc checksum | kafka.server:type=BrokerTopicMetrics,name=InvalidMessageCrcRecordsPerSec | 0 +Message validation failure rate due to non-continuous offset or sequence number in batch | kafka.server:type=BrokerTopicMetrics,name=InvalidOffsetOrSequenceRecordsPerSec | 0 +Log flush rate and time | kafka.log:type=LogFlushStats,name=LogFlushRateAndTimeMs | +# of offline log directories | kafka.log:type=LogManager,name=OfflineLogDirectoryCount | 0 +Leader election rate | kafka.controller:type=ControllerStats,name=LeaderElectionRateAndTimeMs | non-zero when there are broker failures +Unclean leader election rate | kafka.controller:type=ControllerStats,name=UncleanLeaderElectionsPerSec | 0 +Election from Eligible leader replicas rate | kafka.controller:type=ControllerStats,name=ElectionFromEligibleLeaderReplicasPerSec | 0 +Is controller active on broker | kafka.controller:type=KafkaController,name=ActiveControllerCount | only one broker in the cluster should have 1 +Pending topic deletes | kafka.controller:type=KafkaController,name=TopicsToDeleteCount | +Pending replica deletes | kafka.controller:type=KafkaController,name=ReplicasToDeleteCount | +Ineligible pending topic deletes | kafka.controller:type=KafkaController,name=TopicsIneligibleToDeleteCount | +Ineligible pending replica deletes | kafka.controller:type=KafkaController,name=ReplicasIneligibleToDeleteCount | +# of under replicated partitions (|ISR| < |all replicas|) | kafka.server:type=ReplicaManager,name=UnderReplicatedPartitions | 0 +# of under minIsr partitions (|ISR| < min.insync.replicas) | kafka.server:type=ReplicaManager,name=UnderMinIsrPartitionCount | 0 +# of at minIsr partitions (|ISR| = min.insync.replicas) | kafka.server:type=ReplicaManager,name=AtMinIsrPartitionCount | 0 +Producer Id counts | kafka.server:type=ReplicaManager,name=ProducerIdCount | Count of all producer ids created by transactional and idempotent producers in each replica on the broker +Partition counts | kafka.server:type=ReplicaManager,name=PartitionCount | mostly even across brokers +Offline Replica counts | kafka.server:type=ReplicaManager,name=OfflineReplicaCount | 0 +Leader replica counts | kafka.server:type=ReplicaManager,name=LeaderCount | mostly even across brokers +ISR shrink rate | kafka.server:type=ReplicaManager,name=IsrShrinksPerSec | If a broker goes down, ISR for some of the partitions will shrink. When that broker is up again, ISR will be expanded once the replicas are fully caught up. Other than that, the expected value for both ISR shrink rate and expansion rate is 0. +ISR expansion rate | kafka.server:type=ReplicaManager,name=IsrExpandsPerSec | See above +Failed ISR update rate | kafka.server:type=ReplicaManager,name=FailedIsrUpdatesPerSec | 0 +Max lag in messages btw follower and leader replicas | kafka.server:type=ReplicaFetcherManager,name=MaxLag,clientId=Replica | lag should be proportional to the maximum batch size of a produce request. +Lag in messages per follower replica | kafka.server:type=FetcherLagMetrics,name=ConsumerLag,clientId=([-.\w]+),topic=([-.\w]+),partition=([0-9]+) | lag should be proportional to the maximum batch size of a produce request. +Requests waiting in the producer purgatory | kafka.server:type=DelayedOperationPurgatory,name=PurgatorySize,delayedOperation=Produce | non-zero if ack=-1 is used +Requests waiting in the fetch purgatory | kafka.server:type=DelayedOperationPurgatory,name=PurgatorySize,delayedOperation=Fetch | size depends on fetch.wait.max.ms in the consumer +Request total time | kafka.network:type=RequestMetrics,name=TotalTimeMs,request={Produce|FetchConsumer|FetchFollower} | broken into queue, local, remote and response send time +Time the request waits in the request queue | kafka.network:type=RequestMetrics,name=RequestQueueTimeMs,request={Produce|FetchConsumer|FetchFollower} | +Time the request is processed at the leader | kafka.network:type=RequestMetrics,name=LocalTimeMs,request={Produce|FetchConsumer|FetchFollower} | +Time the request waits for the follower | kafka.network:type=RequestMetrics,name=RemoteTimeMs,request={Produce|FetchConsumer|FetchFollower} | non-zero for produce requests when ack=-1 +Time the request waits in the response queue | kafka.network:type=RequestMetrics,name=ResponseQueueTimeMs,request={Produce|FetchConsumer|FetchFollower} | +Time to send the response | kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request={Produce|FetchConsumer|FetchFollower} | +Number of messages the consumer lags behind the producer by. Published by the consumer, not broker. | kafka.consumer:type=consumer-fetch-manager-metrics,client-id={client-id} Attribute: records-lag-max | +The average fraction of time the network processors are idle | kafka.network:type=SocketServer,name=NetworkProcessorAvgIdlePercent | between 0 and 1, ideally > 0.3 +The number of connections disconnected on a processor due to a client not re-authenticating and then using the connection beyond its expiration time for anything other than re-authentication | kafka.server:type=socket-server-metrics,listener=[SASL_PLAINTEXT|SASL_SSL],networkProcessor=<#>,name=expired-connections-killed-count | ideally 0 when re-authentication is enabled, implying there are no longer any older, pre-2.2.0 clients connecting to this (listener, processor) combination +The total number of connections disconnected, across all processors, due to a client not re-authenticating and then using the connection beyond its expiration time for anything other than re-authentication | kafka.network:type=SocketServer,name=ExpiredConnectionsKilledCount | ideally 0 when re-authentication is enabled, implying there are no longer any older, pre-2.2.0 clients connecting to this broker +The average fraction of time the request handler threads are idle | kafka.server:type=KafkaRequestHandlerPool,name=RequestHandlerAvgIdlePercent | between 0 and 1, ideally > 0.3 +Bandwidth quota metrics per (user, client-id), user or client-id | kafka.server:type={Produce|Fetch},user=([-.\w]+),client-id=([-.\w]+) | Two attributes. throttle-time indicates the amount of time in ms the client was throttled. Ideally = 0. byte-rate indicates the data produce/consume rate of the client in bytes/sec. For (user, client-id) quotas, both user and client-id are specified. If per-client-id quota is applied to the client, user is not specified. If per-user quota is applied, client-id is not specified. +Request quota metrics per (user, client-id), user or client-id | kafka.server:type=Request,user=([-.\w]+),client-id=([-.\w]+) | Two attributes. throttle-time indicates the amount of time in ms the client was throttled. Ideally = 0. request-time indicates the percentage of time spent in broker network and I/O threads to process requests from client group. For (user, client-id) quotas, both user and client-id are specified. If per-client-id quota is applied to the client, user is not specified. If per-user quota is applied, client-id is not specified. +Requests exempt from throttling | kafka.server:type=Request | exempt-throttle-time indicates the percentage of time spent in broker network and I/O threads to process requests that are exempt from throttling. +Max time to load group metadata | kafka.server:type=group-coordinator-metrics,name=partition-load-time-max | maximum time, in milliseconds, it took to load offsets and group metadata from the consumer offset partitions loaded in the last 30 seconds (including time spent waiting for the loading task to be scheduled) +Avg time to load group metadata | kafka.server:type=group-coordinator-metrics,name=partition-load-time-avg | average time, in milliseconds, it took to load offsets and group metadata from the consumer offset partitions loaded in the last 30 seconds (including time spent waiting for the loading task to be scheduled) +Max time to load transaction metadata | kafka.server:type=transaction-coordinator-metrics,name=partition-load-time-max | maximum time, in milliseconds, it took to load transaction metadata from the consumer offset partitions loaded in the last 30 seconds (including time spent waiting for the loading task to be scheduled) +Avg time to load transaction metadata | kafka.server:type=transaction-coordinator-metrics,name=partition-load-time-avg | average time, in milliseconds, it took to load transaction metadata from the consumer offset partitions loaded in the last 30 seconds (including time spent waiting for the loading task to be scheduled) +Rate of transactional verification errors | kafka.server:type=AddPartitionsToTxnManager,name=VerificationFailureRate | Rate of verifications that returned in failure either from the AddPartitionsToTxn API response or through errors in the AddPartitionsToTxnManager. In steady state 0, but transient errors are expected during rolls and reassignments of the transactional state partition. +Time to verify a transactional request | kafka.server:type=AddPartitionsToTxnManager,name=VerificationTimeMs | The amount of time queueing while a possible previous request is in-flight plus the round trip to the transaction coordinator to verify (or not verify) +Number of reassigning partitions | kafka.server:type=ReplicaManager,name=ReassigningPartitions | The number of reassigning leader partitions on a broker. +Outgoing byte rate of reassignment traffic | kafka.server:type=BrokerTopicMetrics,name=ReassignmentBytesOutPerSec | 0; non-zero when a partition reassignment is in progress. +Incoming byte rate of reassignment traffic | kafka.server:type=BrokerTopicMetrics,name=ReassignmentBytesInPerSec | 0; non-zero when a partition reassignment is in progress. +Size of a partition on disk (in bytes) | kafka.log:type=Log,name=Size,topic=([-.\w]+),partition=([0-9]+) | The size of a partition on disk, measured in bytes. +Number of log segments in a partition | kafka.log:type=Log,name=NumLogSegments,topic=([-.\w]+),partition=([0-9]+) | The number of log segments in a partition. +First offset in a partition | kafka.log:type=Log,name=LogStartOffset,topic=([-.\w]+),partition=([0-9]+) | The first offset in a partition. +Last offset in a partition | kafka.log:type=Log,name=LogEndOffset,topic=([-.\w]+),partition=([0-9]+) | The last offset in a partition. +Remaining logs to recover | kafka.log:type=LogManager,name=remainingLogsToRecover | The number of remaining logs for each log.dir to be recovered.This metric provides an overview of the recovery progress for a given log directory. +Remaining segments to recover for the current recovery thread | kafka.log:type=LogManager,name=remainingSegmentsToRecover | The number of remaining segments assigned to the currently active recovery thread. +Log directory offline status | kafka.log:type=LogManager,name=LogDirectoryOffline | Indicates if a log directory is offline (1) or online (0). + +## Group Coordinator Monitoring + +The following set of metrics are available for monitoring the group coordinator: + +The Partition Count, per State | kafka.server:type=group-coordinator-metrics,name=num-partitions,state={loading|active|failed} | The number of `__consumer_offsets` partitions hosted by the broker, broken down by state +---|---|--- +Partition Maximum Loading Time | kafka.server:type=group-coordinator-metrics,name=partition-load-time-max | The maximum loading time needed to read the state from the `__consumer_offsets` partitions +Partition Average Loading Time | kafka.server:type=group-coordinator-metrics,name=partition-load-time-avg | The average loading time needed to read the state from the `__consumer_offsets` partitions +Average Thread Idle Ratio | kafka.server:type=group-coordinator-metrics,name=thread-idle-ratio-avg | The average idle ratio of the coordinator threads +Event Queue Size | kafka.server:type=group-coordinator-metrics,name=event-queue-size | The number of events waiting to be processed in the queue +Event Queue Time (Ms) | kafka.server:type=group-coordinator-metrics,name=event-queue-time-ms-[max|p50|p95|p99|p999] | The time that an event spent waiting in the queue to be processed +Event Processing Time (Ms) | kafka.server:type=group-coordinator-metrics,name=event-processing-time-ms-[max|p50|p95|p99|p999] | The time that an event took to be processed +Event Purgatory Time (Ms) | kafka.server:type=group-coordinator-metrics,name=event-purgatory-time-ms-[max|p50|p95|p99|p999] | The time that an event waited in the purgatory before being completed +Batch Flush Time (Ms) | kafka.server:type=group-coordinator-metrics,name=batch-flush-time-ms-[max|p50|p95|p99|p999] | The time that a batch took to be flushed to the local partition +Group Count, per group type | kafka.server:type=group-coordinator-metrics,name=group-count,protocol={consumer|classic} | Total number of group per group type: Classic or Consumer +Consumer Group Count, per state | kafka.server:type=group-coordinator-metrics,name=consumer-group-count,state=[empty|assigning|reconciling|stable|dead] | Total number of Consumer Groups in each state: Empty, Assigning, Reconciling, Stable, Dead +Consumer Group Rebalance Rate | kafka.server:type=group-coordinator-metrics,name=consumer-group-rebalance-rate | The rebalance rate of consumer groups +Consumer Group Rebalance Count | kafka.server:type=group-coordinator-metrics,name=consumer-group-rebalance-count | Total number of Consumer Group Rebalances +Classic Group Count | kafka.server:type=GroupMetadataManager,name=NumGroups | Total number of Classic Groups +Classic Group Count, per State | kafka.server:type=GroupMetadataManager,name=NumGroups[PreparingRebalance,CompletingRebalance,Empty,Stable,Dead] | The number of Classic Groups in each state: PreparingRebalance, CompletingRebalance, Empty, Stable, Dead +Classic Group Completed Rebalance Rate | kafka.server:type=group-coordinator-metrics,name=group-completed-rebalance-rate | The rate of classic group completed rebalances +Classic Group Completed Rebalance Count | kafka.server:type=group-coordinator-metrics,name=group-completed-rebalance-count | The total number of classic group completed rebalances +Group Offset Count | kafka.server:type=GroupMetadataManager,name=NumOffsets | Total number of committed offsets for Classic and Consumer Groups +Offset Commit Rate | kafka.server:type=group-coordinator-metrics,name=offset-commit-rate | The rate of committed offsets +Offset Commit Count | kafka.server:type=group-coordinator-metrics,name=offset-commit-count | The total number of committed offsets +Offset Expiration Rate | kafka.server:type=group-coordinator-metrics,name=offset-expiration-rate | The rate of expired offsets +Offset Expiration Count | kafka.server:type=group-coordinator-metrics,name=offset-expiration-count | The total number of expired offsets +Offset Deletion Rate | kafka.server:type=group-coordinator-metrics,name=offset-deletion-rate | The rate of administrative deleted offsets +Offset Deletion Count | kafka.server:type=group-coordinator-metrics,name=offset-deletion-count | The total number of administrative deleted offsets + +## Tiered Storage Monitoring + +The following set of metrics are available for monitoring of the tiered storage feature: + +Metric/Attribute name | Description | Mbean name +---|---|--- +Remote Fetch Bytes Per Sec | Rate of bytes read from remote storage per topic. Omitting 'topic=(...)' will yield the all-topic rate | kafka.server:type=BrokerTopicMetrics,name=RemoteFetchBytesPerSec,topic=([-.\w]+) +Remote Fetch Requests Per Sec | Rate of read requests from remote storage per topic. Omitting 'topic=(...)' will yield the all-topic rate | kafka.server:type=BrokerTopicMetrics,name=RemoteFetchRequestsPerSec,topic=([-.\w]+) +Remote Fetch Errors Per Sec | Rate of read errors from remote storage per topic. Omitting 'topic=(...)' will yield the all-topic rate | kafka.server:type=BrokerTopicMetrics,name=RemoteFetchErrorsPerSec,topic=([-.\w]+) +Remote Copy Bytes Per Sec | Rate of bytes copied to remote storage per topic. Omitting 'topic=(...)' will yield the all-topic rate | kafka.server:type=BrokerTopicMetrics,name=RemoteCopyBytesPerSec,topic=([-.\w]+) +Remote Copy Requests Per Sec | Rate of write requests to remote storage per topic. Omitting 'topic=(...)' will yield the all-topic rate | kafka.server:type=BrokerTopicMetrics,name=RemoteCopyRequestsPerSec,topic=([-.\w]+) +Remote Copy Errors Per Sec | Rate of write errors from remote storage per topic. Omitting 'topic=(...)' will yield the all-topic rate | kafka.server:type=BrokerTopicMetrics,name=RemoteCopyErrorsPerSec,topic=([-.\w]+) +Remote Copy Lag Bytes | Bytes which are eligible for tiering, but are not in remote storage yet. Omitting 'topic=(...)' will yield the all-topic sum | kafka.server:type=BrokerTopicMetrics,name=RemoteCopyLagBytes,topic=([-.\w]+) +Remote Copy Lag Segments | Segments which are eligible for tiering, but are not in remote storage yet. Omitting 'topic=(...)' will yield the all-topic count | kafka.server:type=BrokerTopicMetrics,name=RemoteCopyLagSegments,topic=([-.\w]+) +Remote Delete Requests Per Sec | Rate of delete requests to remote storage per topic. Omitting 'topic=(...)' will yield the all-topic rate | kafka.server:type=BrokerTopicMetrics,name=RemoteDeleteRequestsPerSec,topic=([-.\w]+) +Remote Delete Errors Per Sec | Rate of delete errors from remote storage per topic. Omitting 'topic=(...)' will yield the all-topic rate | kafka.server:type=BrokerTopicMetrics,name=RemoteDeleteErrorsPerSec,topic=([-.\w]+) +Remote Delete Lag Bytes | Tiered bytes which are eligible for deletion, but have not been deleted yet. Omitting 'topic=(...)' will yield the all-topic sum | kafka.server:type=BrokerTopicMetrics,name=RemoteDeleteLagBytes,topic=([-.\w]+) +Remote Delete Lag Segments | Tiered segments which are eligible for deletion, but have not been deleted yet. Omitting 'topic=(...)' will yield the all-topic count | kafka.server:type=BrokerTopicMetrics,name=RemoteDeleteLagSegments,topic=([-.\w]+) +Build Remote Log Aux State Requests Per Sec | Rate of requests for rebuilding the auxiliary state from remote storage per topic. Omitting 'topic=(...)' will yield the all-topic rate | kafka.server:type=BrokerTopicMetrics,name=BuildRemoteLogAuxStateRequestsPerSec,topic=([-.\w]+) +Build Remote Log Aux State Errors Per Sec | Rate of errors for rebuilding the auxiliary state from remote storage per topic. Omitting 'topic=(...)' will yield the all-topic rate | kafka.server:type=BrokerTopicMetrics,name=BuildRemoteLogAuxStateErrorsPerSec,topic=([-.\w]+) +Remote Log Size Computation Time | The amount of time needed to compute the size of the remote log. Omitting 'topic=(...)' will yield the all-topic time | kafka.server:type=BrokerTopicMetrics,name=RemoteLogSizeComputationTime,topic=([-.\w]+) +Remote Log Size Bytes | The total size of a remote log in bytes. Omitting 'topic=(...)' will yield the all-topic sum | kafka.server:type=BrokerTopicMetrics,name=RemoteLogSizeBytes,topic=([-.\w]+) +Remote Log Metadata Count | The total number of metadata entries for remote storage. Omitting 'topic=(...)' will yield the all-topic count | kafka.server:type=BrokerTopicMetrics,name=RemoteLogMetadataCount,topic=([-.\w]+) +Delayed Remote Fetch Expires Per Sec | The number of expired remote fetches per second. Omitting 'topic=(...)' will yield the all-topic rate | kafka.server:type=DelayedRemoteFetchMetrics,name=ExpiresPerSec,topic=([-.\w]+) +RemoteLogReader Task Queue Size | Size of the queue holding remote storage read tasks | org.apache.kafka.storage.internals.log:type=RemoteStorageThreadPool,name=RemoteLogReaderTaskQueueSize +RemoteLogReader Avg Idle Percent | Average idle percent of thread pool for processing remote storage read tasks | org.apache.kafka.storage.internals.log:type=RemoteStorageThreadPool,name=RemoteLogReaderAvgIdlePercent +RemoteLogManager Tasks Avg Idle Percent | Average idle percent of thread pool for copying data to remote storage | kafka.log.remote:type=RemoteLogManager,name=RemoteLogManagerTasksAvgIdlePercent +RemoteLogManager Avg Broker Fetch Throttle Time | The average time in millis remote fetches was throttled by a broker | kafka.server:type=RemoteLogManager, name=remote-fetch-throttle-time-avg +RemoteLogManager Max Broker Fetch Throttle Time | The max time in millis remote fetches was throttled by a broker | kafka.server:type=RemoteLogManager, name=remote-fetch-throttle-time-max +RemoteLogManager Avg Broker Copy Throttle Time | The average time in millis remote copies was throttled by a broker | kafka.server:type=RemoteLogManager, name=remote-copy-throttle-time-avg +RemoteLogManager Max Broker Copy Throttle Time | The max time in millis remote copies was throttled by a broker | kafka.server:type=RemoteLogManager, name=remote-copy-throttle-time-max + +## KRaft Monitoring Metrics + +The set of metrics that allow monitoring of the KRaft quorum and the metadata log. +Note that some exposed metrics depend on the role of the node as defined by `process.roles` + +### KRaft Quorum Monitoring Metrics + +These metrics are reported on both Controllers and Brokers in a KRaft Cluster Metric/Attribute name | Description | Mbean name +---|---|--- +Current State | The current state of this member; possible values are leader, candidate, voted, follower, unattached, observer. | kafka.server:type=raft-metrics +Current Leader | The current quorum leader's id; -1 indicates unknown. | kafka.server:type=raft-metrics +Current Voted | The current voted leader's id; -1 indicates not voted for anyone. | kafka.server:type=raft-metrics +Current Epoch | The current quorum epoch. | kafka.server:type=raft-metrics +High Watermark | The high watermark maintained on this member; -1 if it is unknown. | kafka.server:type=raft-metrics +Log End Offset | The current raft log end offset. | kafka.server:type=raft-metrics +Number of Unknown Voter Connections | Number of unknown voters whose connection information is not cached. This value of this metric is always 0. | kafka.server:type=raft-metrics +Average Commit Latency | The average time in milliseconds to commit an entry in the raft log. | kafka.server:type=raft-metrics +Maximum Commit Latency | The maximum time in milliseconds to commit an entry in the raft log. | kafka.server:type=raft-metrics +Average Election Latency | The average time in milliseconds spent on electing a new leader. | kafka.server:type=raft-metrics +Maximum Election Latency | The maximum time in milliseconds spent on electing a new leader. | kafka.server:type=raft-metrics +Fetch Records Rate | The average number of records fetched from the leader of the raft quorum. | kafka.server:type=raft-metrics +Append Records Rate | The average number of records appended per sec by the leader of the raft quorum. | kafka.server:type=raft-metrics +Average Poll Idle Ratio | The average fraction of time the client's poll() is idle as opposed to waiting for the user code to process records. | kafka.server:type=raft-metrics +Current Metadata Version | Outputs the feature level of the current effective metadata version. | kafka.server:type=MetadataLoader,name=CurrentMetadataVersion +Metadata Snapshot Load Count | The total number of times we have loaded a KRaft snapshot since the process was started. | kafka.server:type=MetadataLoader,name=HandleLoadSnapshotCount +Latest Metadata Snapshot Size | The total size in bytes of the latest snapshot that the node has generated. If none have been generated yet, this is the size of the latest snapshot that was loaded. If no snapshots have been generated or loaded, this is 0. | kafka.server:type=SnapshotEmitter,name=LatestSnapshotGeneratedBytes +Latest Metadata Snapshot Age | The interval in milliseconds since the latest snapshot that the node has generated. If none have been generated yet, this is approximately the time delta since the process was started. | kafka.server:type=SnapshotEmitter,name=LatestSnapshotGeneratedAgeMs + +### KRaft Controller Monitoring Metrics + +Metric/Attribute name | Description | Mbean name +---|---|--- +Active Controller Count | The number of Active Controllers on this node. Valid values are '0' or '1'. | kafka.controller:type=KafkaController,name=ActiveControllerCount +Event Queue Time Ms | A Histogram of the time in milliseconds that requests spent waiting in the Controller Event Queue. | kafka.controller:type=ControllerEventManager,name=EventQueueTimeMs +Event Queue Processing Time Ms | A Histogram of the time in milliseconds that requests spent being processed in the Controller Event Queue. | kafka.controller:type=ControllerEventManager,name=EventQueueProcessingTimeMs +Fenced Broker Count | The number of fenced brokers as observed by this Controller. | kafka.controller:type=KafkaController,name=FencedBrokerCount +Active Broker Count | The number of active brokers as observed by this Controller. | kafka.controller:type=KafkaController,name=ActiveBrokerCount +Global Topic Count | The number of global topics as observed by this Controller. | kafka.controller:type=KafkaController,name=GlobalTopicCount +Global Partition Count | The number of global partitions as observed by this Controller. | kafka.controller:type=KafkaController,name=GlobalPartitionCount +Offline Partition Count | The number of offline topic partitions (non-internal) as observed by this Controller. | kafka.controller:type=KafkaController,name=OfflinePartitionsCount +Preferred Replica Imbalance Count | The count of topic partitions for which the leader is not the preferred leader. | kafka.controller:type=KafkaController,name=PreferredReplicaImbalanceCount +Metadata Error Count | The number of times this controller node has encountered an error during metadata log processing. | kafka.controller:type=KafkaController,name=MetadataErrorCount +Last Applied Record Offset | The offset of the last record from the cluster metadata partition that was applied by the Controller. | kafka.controller:type=KafkaController,name=LastAppliedRecordOffset +Last Committed Record Offset | The offset of the last record committed to this Controller. | kafka.controller:type=KafkaController,name=LastCommittedRecordOffset +Last Applied Record Timestamp | The timestamp of the last record from the cluster metadata partition that was applied by the Controller. | kafka.controller:type=KafkaController,name=LastAppliedRecordTimestamp +Last Applied Record Lag Ms | The difference between now and the timestamp of the last record from the cluster metadata partition that was applied by the controller. For active Controllers the value of this lag is always zero. | kafka.controller:type=KafkaController,name=LastAppliedRecordLagMs +Timed-out Broker Heartbeat Count | The number of broker heartbeats that timed out on this controller since the process was started. Note that only active controllers handle heartbeats, so only they will see increases in this metric. | kafka.controller:type=KafkaController,name=TimedOutBrokerHeartbeatCount +Number Of Operations Started In Event Queue | The total number of controller event queue operations that were started. This includes deferred operations. | kafka.controller:type=KafkaController,name=EventQueueOperationsStartedCount +Number of Operations Timed Out In Event Queue | The total number of controller event queue operations that timed out before they could be performed. | kafka.controller:type=KafkaController,name=EventQueueOperationsTimedOutCount +Number Of New Controller Elections | Counts the number of times this node has seen a new controller elected. A transition to the "no leader" state is not counted here. If the same controller as before becomes active, that still counts. | kafka.controller:type=KafkaController,name=NewActiveControllersCount + +### KRaft Broker Monitoring Metrics + +Metric/Attribute name | Description | Mbean name +---|---|--- +Last Applied Record Offset | The offset of the last record from the cluster metadata partition that was applied by the broker | kafka.server:type=broker-metadata-metrics +Last Applied Record Timestamp | The timestamp of the last record from the cluster metadata partition that was applied by the broker. | kafka.server:type=broker-metadata-metrics +Last Applied Record Lag Ms | The difference between now and the timestamp of the last record from the cluster metadata partition that was applied by the broker | kafka.server:type=broker-metadata-metrics +Metadata Load Error Count | The number of errors encountered by the BrokerMetadataListener while loading the metadata log and generating a new MetadataDelta based on it. | kafka.server:type=broker-metadata-metrics +Metadata Apply Error Count | The number of errors encountered by the BrokerMetadataPublisher while applying a new MetadataImage based on the latest MetadataDelta. | kafka.server:type=broker-metadata-metrics + +## Common monitoring metrics for producer/consumer/connect/streams + +The following metrics are available on producer/consumer/connector/streams instances. For specific metrics, please see following sections. Metric/Attribute name | Description | Mbean name +---|---|--- +connection-close-rate | Connections closed per second in the window. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +connection-close-total | Total connections closed in the window. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +connection-creation-rate | New connections established per second in the window. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +connection-creation-total | Total new connections established in the window. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +network-io-rate | The average number of network operations (reads or writes) on all connections per second. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +network-io-total | The total number of network operations (reads or writes) on all connections. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +outgoing-byte-rate | The average number of outgoing bytes sent per second to all servers. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +outgoing-byte-total | The total number of outgoing bytes sent to all servers. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +request-rate | The average number of requests sent per second. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +request-total | The total number of requests sent. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +request-size-avg | The average size of all requests in the window. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +request-size-max | The maximum size of any request sent in the window. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +incoming-byte-rate | Bytes/second read off all sockets. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +incoming-byte-total | Total bytes read off all sockets. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +response-rate | Responses received per second. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +response-total | Total responses received. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +select-rate | Number of times the I/O layer checked for new I/O to perform per second. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +select-total | Total number of times the I/O layer checked for new I/O to perform. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +io-wait-time-ns-avg | The average length of time the I/O thread spent waiting for a socket ready for reads or writes in nanoseconds. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +io-wait-time-ns-total | The total time the I/O thread spent waiting in nanoseconds. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +io-wait-ratio | The fraction of time the I/O thread spent waiting. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +io-time-ns-avg | The average length of time for I/O per select call in nanoseconds. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +io-time-ns-total | The total time the I/O thread spent doing I/O in nanoseconds. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +io-ratio | The fraction of time the I/O thread spent doing I/O. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +connection-count | The current number of active connections. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +successful-authentication-rate | Connections per second that were successfully authenticated using SASL or SSL. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +successful-authentication-total | Total connections that were successfully authenticated using SASL or SSL. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +failed-authentication-rate | Connections per second that failed authentication. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +failed-authentication-total | Total connections that failed authentication. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +successful-reauthentication-rate | Connections per second that were successfully re-authenticated using SASL. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +successful-reauthentication-total | Total connections that were successfully re-authenticated using SASL. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +reauthentication-latency-max | The maximum latency in ms observed due to re-authentication. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +reauthentication-latency-avg | The average latency in ms observed due to re-authentication. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +failed-reauthentication-rate | Connections per second that failed re-authentication. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +failed-reauthentication-total | Total connections that failed re-authentication. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) +successful-authentication-no-reauth-total | Total connections that were successfully authenticated by older, pre-2.2.0 SASL clients that do not support re-authentication. May only be non-zero. | kafka.[producer|consumer|connect]:type=[producer|consumer|connect]-metrics,client-id=([-.\w]+) + +## Common Per-broker metrics for producer/consumer/connect/streams + +The following metrics are available on producer/consumer/connector/streams instances. For specific metrics, please see following sections. Metric/Attribute name | Description | Mbean name +---|---|--- +outgoing-byte-rate | The average number of outgoing bytes sent per second for a node. | kafka.[producer|consumer|connect]:type=[consumer|producer|connect]-node-metrics,client-id=([-.\w]+),node-id=([0-9]+) +outgoing-byte-total | The total number of outgoing bytes sent for a node. | kafka.[producer|consumer|connect]:type=[consumer|producer|connect]-node-metrics,client-id=([-.\w]+),node-id=([0-9]+) +request-rate | The average number of requests sent per second for a node. | kafka.[producer|consumer|connect]:type=[consumer|producer|connect]-node-metrics,client-id=([-.\w]+),node-id=([0-9]+) +request-total | The total number of requests sent for a node. | kafka.[producer|consumer|connect]:type=[consumer|producer|connect]-node-metrics,client-id=([-.\w]+),node-id=([0-9]+) +request-size-avg | The average size of all requests in the window for a node. | kafka.[producer|consumer|connect]:type=[consumer|producer|connect]-node-metrics,client-id=([-.\w]+),node-id=([0-9]+) +request-size-max | The maximum size of any request sent in the window for a node. | kafka.[producer|consumer|connect]:type=[consumer|producer|connect]-node-metrics,client-id=([-.\w]+),node-id=([0-9]+) +incoming-byte-rate | The average number of bytes received per second for a node. | kafka.[producer|consumer|connect]:type=[consumer|producer|connect]-node-metrics,client-id=([-.\w]+),node-id=([0-9]+) +incoming-byte-total | The total number of bytes received for a node. | kafka.[producer|consumer|connect]:type=[consumer|producer|connect]-node-metrics,client-id=([-.\w]+),node-id=([0-9]+) +request-latency-avg | The average request latency in ms for a node. | kafka.[producer|consumer|connect]:type=[consumer|producer|connect]-node-metrics,client-id=([-.\w]+),node-id=([0-9]+) +request-latency-max | The maximum request latency in ms for a node. | kafka.[producer|consumer|connect]:type=[consumer|producer|connect]-node-metrics,client-id=([-.\w]+),node-id=([0-9]+) +response-rate | Responses received per second for a node. | kafka.[producer|consumer|connect]:type=[consumer|producer|connect]-node-metrics,client-id=([-.\w]+),node-id=([0-9]+) +response-total | Total responses received for a node. | kafka.[producer|consumer|connect]:type=[consumer|producer|connect]-node-metrics,client-id=([-.\w]+),node-id=([0-9]+) + +## Producer monitoring + +The following metrics are available on producer instances. Metric/Attribute name | Description | Mbean name +---|---|--- +waiting-threads | The number of user threads blocked waiting for buffer memory to enqueue their records. | kafka.producer:type=producer-metrics,client-id=([-.\w]+) +buffer-total-bytes | The maximum amount of buffer memory the client can use (whether or not it is currently used). | kafka.producer:type=producer-metrics,client-id=([-.\w]+) +buffer-available-bytes | The total amount of buffer memory that is not being used (either unallocated or in the free list). | kafka.producer:type=producer-metrics,client-id=([-.\w]+) +buffer-exhausted-rate | The average per-second number of record sends that are dropped due to buffer exhaustion | kafka.producer:type=producer-metrics,client-id=([-.\w]+) +buffer-exhausted-total | The total number of record sends that are dropped due to buffer exhaustion | kafka.producer:type=producer-metrics,client-id=([-.\w]+) +bufferpool-wait-time | The fraction of time an appender waits for space allocation. | kafka.producer:type=producer-metrics,client-id=([-.\w]+) +bufferpool-wait-ratio | The fraction of time an appender waits for space allocation. | kafka.producer:type=producer-metrics,client-id=([-.\w]+) +bufferpool-wait-time-ns-total | The total time an appender waits for space allocation in nanoseconds. | kafka.producer:type=producer-metrics,client-id=([-.\w]+) +flush-time-ns-total | The total time the Producer spent in Producer.flush in nanoseconds. | kafka.producer:type=producer-metrics,client-id=([-.\w]+) +txn-init-time-ns-total | The total time the Producer spent initializing transactions in nanoseconds (for EOS). | kafka.producer:type=producer-metrics,client-id=([-.\w]+) +txn-begin-time-ns-total | The total time the Producer spent in beginTransaction in nanoseconds (for EOS). | kafka.producer:type=producer-metrics,client-id=([-.\w]+) +txn-send-offsets-time-ns-total | The total time the Producer spent sending offsets to transactions in nanoseconds (for EOS). | kafka.producer:type=producer-metrics,client-id=([-.\w]+) +txn-commit-time-ns-total | The total time the Producer spent committing transactions in nanoseconds (for EOS). | kafka.producer:type=producer-metrics,client-id=([-.\w]+) +txn-abort-time-ns-total | The total time the Producer spent aborting transactions in nanoseconds (for EOS). | kafka.producer:type=producer-metrics,client-id=([-.\w]+) +metadata-wait-time-ns-total | the total time in nanoseconds that has spent waiting for metadata from the Kafka broker | kafka.producer:type=producer-metrics,client-id=([-.\w]+) + +### Producer Sender Metrics + +{{< include-html file="/static/41/generated/producer_metrics.html" >}} + +## Consumer monitoring + +The following metrics are available on consumer instances. Metric/Attribute name | Description | Mbean name +---|---|--- +time-between-poll-avg | The average delay between invocations of poll(). | kafka.consumer:type=consumer-metrics,client-id=([-.\w]+) +time-between-poll-max | The max delay between invocations of poll(). | kafka.consumer:type=consumer-metrics,client-id=([-.\w]+) +last-poll-seconds-ago | The number of seconds since the last poll() invocation. | kafka.consumer:type=consumer-metrics,client-id=([-.\w]+) +poll-idle-ratio-avg | The average fraction of time the consumer's poll() is idle as opposed to waiting for the user code to process records. | kafka.consumer:type=consumer-metrics,client-id=([-.\w]+) +committed-time-ns-total | The total time the Consumer spent in committed in nanoseconds. | kafka.consumer:type=consumer-metrics,client-id=([-.\w]+) +commit-sync-time-ns-total | The total time the Consumer spent committing offsets in nanoseconds (for AOS). | kafka.consumer:type=consumer-metrics,client-id=([-.\w]+) + +### Consumer Group Metrics + +Metric/Attribute name | Description | Mbean name +---|---|--- +commit-latency-avg | The average time taken for a commit request | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +commit-latency-max | The max time taken for a commit request | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +commit-rate | The number of commit calls per second | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +commit-total | The total number of commit calls | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +assigned-partitions | The number of partitions currently assigned to this consumer | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +heartbeat-response-time-max | The max time taken to receive a response to a heartbeat request | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +heartbeat-rate | The average number of heartbeats per second | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +heartbeat-total | The total number of heartbeats | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +join-time-avg | The average time taken for a group rejoin | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +join-time-max | The max time taken for a group rejoin | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +join-rate | The number of group joins per second | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +join-total | The total number of group joins | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +sync-time-avg | The average time taken for a group sync | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +sync-time-max | The max time taken for a group sync | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +sync-rate | The number of group syncs per second | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +sync-total | The total number of group syncs | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +rebalance-latency-avg | The average time taken for a group rebalance | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +rebalance-latency-max | The max time taken for a group rebalance | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +rebalance-latency-total | The total time taken for group rebalances so far | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +rebalance-total | The total number of group rebalances participated | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +rebalance-rate-per-hour | The number of group rebalance participated per hour | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +failed-rebalance-total | The total number of failed group rebalances | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +failed-rebalance-rate-per-hour | The number of failed group rebalance event per hour | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +last-rebalance-seconds-ago | The number of seconds since the last rebalance event | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +last-heartbeat-seconds-ago | The number of seconds since the last controller heartbeat | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +partitions-revoked-latency-avg | The average time taken by the on-partitions-revoked rebalance listener callback | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +partitions-revoked-latency-max | The max time taken by the on-partitions-revoked rebalance listener callback | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +partitions-assigned-latency-avg | The average time taken by the on-partitions-assigned rebalance listener callback | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +partitions-assigned-latency-max | The max time taken by the on-partitions-assigned rebalance listener callback | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +partitions-lost-latency-avg | The average time taken by the on-partitions-lost rebalance listener callback | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +partitions-lost-latency-max | The max time taken by the on-partitions-lost rebalance listener callback | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) + +### Consumer Fetch Metrics + +{{< include-html file="/static/41/generated/consumer_metrics.html" >}} + +## Connect Monitoring + +A Connect worker process contains all the producer and consumer metrics as well as metrics specific to Connect. The worker process itself has a number of metrics, while each connector and task have additional metrics. {{< include-html file="/static/41/generated/connect_metrics.html" >}} + +## Streams Monitoring + +A Kafka Streams instance contains all the producer and consumer metrics as well as additional metrics specific to Streams. The metrics have three recording levels: `info`, `debug`, and `trace`. + +Note that the metrics have a 4-layer hierarchy. At the top level there are client-level metrics for each started Kafka Streams client. Each client has stream threads, with their own metrics. Each stream thread has tasks, with their own metrics. Each task has a number of processor nodes, with their own metrics. Each task also has a number of state stores and record caches, all with their own metrics. + +Use the following configuration option to specify which metrics you want collected: + + + metrics.recording.level="info" + +### Client Metrics + +All the following metrics have a recording level of `info`: Metric/Attribute name | Description | Mbean name +---|---|--- +version | The version of the Kafka Streams client. | kafka.streams:type=stream-metrics,client-id=([-.\w]+) +commit-id | The version control commit ID of the Kafka Streams client. | kafka.streams:type=stream-metrics,client-id=([-.\w]+) +application-id | The application ID of the Kafka Streams client. | kafka.streams:type=stream-metrics,client-id=([-.\w]+) +topology-description | The description of the topology executed in the Kafka Streams client. | kafka.streams:type=stream-metrics,client-id=([-.\w]+) +state | The state of the Kafka Streams client as a string. | kafka.streams:type=stream-metrics,client-id=([-.\w]+) +client-state | The state of the Kafka Streams client as a number (`ordinal()` of the corresponding enum). | kafka.streams:type=stream-metrics,client-id=([-.\w]+),process-id=([-.\w]+) +alive-stream-threads | The current number of alive stream threads that are running or participating in rebalance. | kafka.streams:type=stream-metrics,client-id=([-.\w]+) +failed-stream-threads | The number of failed stream threads since the start of the Kafka Streams client. | kafka.streams:type=stream-metrics,client-id=([-.\w]+) +recording-level | The metric recording level as a number (0 = INFO, 1 = DEBUG, 2 = TRACE). | kafka.streams:type=stream-metrics,client-id=([-.\w]+),process-id=([-.\w]+) + +### Thread Metrics + +All the following metrics have a recording level of `info`: Metric/Attribute name | Description | Mbean name +---|---|--- +state | The state of the thread as a string. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +thread-state | The state of the thread as a number (`ordinal()` of the corresponding enum). | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+),process-id=([-.\w]+) +commit-latency-avg | The average execution time in ms, for committing, across all running tasks of this thread. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +commit-latency-max | The maximum execution time in ms, for committing, across all running tasks of this thread. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +poll-latency-avg | The average execution time in ms, for consumer polling. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +poll-latency-max | The maximum execution time in ms, for consumer polling. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +process-latency-avg | The average execution time in ms, for processing. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +process-latency-max | The maximum execution time in ms, for processing. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +punctuate-latency-avg | The average execution time in ms, for punctuating. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +punctuate-latency-max | The maximum execution time in ms, for punctuating. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +commit-ratio | The fraction of time the thread spent on committing all tasks | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +commit-rate | The average number of commits per sec. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +commit-total | The total number of commit calls. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +poll-ratio | The fraction of time the thread spent on polling records from consumer | kafka.consumer:type=consumer-coordinator-metrics,client-id=([-.\w]+) +poll-rate | The average number of consumer poll calls per sec. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +poll-total | The total number of consumer poll calls. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +poll-records-avg | The average number of records polled from consumer within an iteration. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +poll-records-max | The maximum number of records polled from consumer within an iteration. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +process-ratio | The fraction of time the thread spent on processing active tasks | kafka.streams:type=type=stream-thread-metrics,client-id=([-.\w]+) +process-rate | The average number of processed records per sec. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +process-total | The total number of processed records. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +process-records-avg | The average number of records processed within an iteration (total count of processed records over total number of iterations). | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +process-records-max | The maximum number of records processed within an iteration. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +punctuate-ratio | The fraction of time the thread spends performing punctuating actions on active tasks | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +punctuate-rate | The average number of punctuate calls per sec. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +punctuate-total | The total number of punctuate calls. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +task-created-rate | The average number of tasks created per sec. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +task-created-total | The total number of tasks created. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +task-closed-rate | The average number of tasks closed per sec. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +task-closed-total | The total number of tasks closed. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +blocked-time-ns-total | The total time in ns the thread spent blocked on Kafka brokers. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) +thread-start-time | The system timestamp in ms that the thread was started. | kafka.streams:type=stream-thread-metrics,thread-id=([-.\w]+) + +### Task Metrics + +All the following metrics have a recording level of `debug`, except for the dropped-records-* and active-process-ratio metrics which have a recording level of `info`: Metric/Attribute name | Description | Mbean name +---|---|--- +process-latency-avg | The average execution time in ns, for processing. | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) +process-latency-max | The maximum execution time in ns, for processing. | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) +process-rate | The average number of processed records per sec across all source processor nodes of this task. | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) +process-total | The total number of processed records across all source processor nodes of this task. | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) +punctuate-latency-avg | The average amount of time taken to execute periodic tasks per call to punctuate | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) +punctuate-latency-max | The maximum amount of time taken for any single call to punctuate to complete. | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) +punctuate-total | The total number of times the punctuate method was called to trigger periodic actions during task processing. | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) +punctuate-rate | The average number of calls to punctuate per second. | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) +record-lateness-avg | The average observed lateness in ms of records (stream time - record timestamp). | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) +record-lateness-max | The max observed lateness in ms of records (stream time - record timestamp). | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) +enforced-processing-rate | The average number of enforced processings per sec. | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) +enforced-processing-total | The total number enforced processings. | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) +dropped-records-rate | The average number of records dropped per sec within this task. | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) +dropped-records-total | The total number of records dropped within this task. | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) +active-process-ratio | The fraction of time the stream thread spent on processing this task among all assigned active tasks. | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) +active-buffer-count | The count of buffered records that are polled from consumer and not yet processed for this active task. | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) +input-buffer-bytes-total | The total number of bytes accumulated by this task, | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) +cache-size-bytes-total | The cache size in bytes accumulated by this task. | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) +record-rate | The average number of records restored per second. | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) +record-total | The total number of records restored | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) +update-rate | The average number of records updated per second. | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) +update-total | The total number of records updated | kafka.streams:type=stream-task-metrics,thread-id=([-.\w]+),task-id=([-.\w]+) + +### Processor Node Metrics + +The following metrics are only available on certain types of nodes, i.e., the process-* metrics are only available for source processor nodes, the `suppression-emit-*` metrics are only available for suppression operation nodes, `emit-final-*` metrics are only available for windowed aggregations nodes, and the `record-e2e-latency-*` metrics are only available for source processor nodes and terminal nodes (nodes without successor nodes). All the metrics have a recording level of `debug`, except for the `record-e2e-latency-*` metrics which have a recording level of `info`: Metric/Attribute name | Description | Mbean name +---|---|--- +bytes-consumed-total | The total number of bytes consumed by a source processor node. | kafka.streams:type=stream-topic-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),processor-node-id=([-.\w]+),topic=([-.\w]+) +bytes-produced-total | The total number of bytes produced by a sink processor node. | kafka.streams:type=stream-topic-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),processor-node-id=([-.\w]+),topic=([-.\w]+) +process-rate | The average number of records processed by a source processor node per sec. | kafka.streams:type=stream-processor-node-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),processor-node-id=([-.\w]+) +process-total | The total number of records processed by a source processor node per sec. | kafka.streams:type=stream-processor-node-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),processor-node-id=([-.\w]+) +suppression-emit-rate | The rate of records emitted per sec that have been emitted downstream from suppression operation nodes. | kafka.streams:type=stream-processor-node-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),processor-node-id=([-.\w]+) +suppression-emit-total | The total number of records that have been emitted downstream from suppression operation nodes. | kafka.streams:type=stream-processor-node-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),processor-node-id=([-.\w]+) +emit-final-latency-max | The max latency in ms to emit final records when a record could be emitted. | kafka.streams:type=stream-processor-node-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),processor-node-id=([-.\w]+) +emit-final-latency-avg | The avg latency in ms to emit final records when a record could be emitted. | kafka.streams:type=stream-processor-node-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),processor-node-id=([-.\w]+) +emit-final-records-rate | The rate of records emitted per sec when records could be emitted. | kafka.streams:type=stream-processor-node-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),processor-node-id=([-.\w]+) +emit-final-records-total | The total number of records emitted. | kafka.streams:type=stream-processor-node-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),processor-node-id=([-.\w]+) +record-e2e-latency-avg | The average end-to-end latency in ms of a record, measured by comparing the record timestamp with the system time when it has been fully processed by the node. | kafka.streams:type=stream-processor-node-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),processor-node-id=([-.\w]+) +record-e2e-latency-max | The maximum end-to-end latency in ms of a record, measured by comparing the record timestamp with the system time when it has been fully processed by the node. | kafka.streams:type=stream-processor-node-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),processor-node-id=([-.\w]+) +record-e2e-latency-min | The minimum end-to-end latency in ms of a record, measured by comparing the record timestamp with the system time when it has been fully processed by the node. | kafka.streams:type=stream-processor-node-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),processor-node-id=([-.\w]+) +records-consumed-total | The total number of records consumed by a source processor node. | kafka.streams:type=stream-topic-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),processor-node-id=([-.\w]+),topic=([-.\w]+) +records-produced-total | The total number of records produced by a sink processor node. | kafka.streams:type=stream-topic-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),processor-node-id=([-.\w]+),topic=([-.\w]+) +Idempotent-update-skip-rate | The average number of skipped idempotent updates per second. | kafka.streams:type=stream-topic-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),processor-node-id=([-.\w]+),topic=([-.\w]+) +Idempotent-update-skip-total | The total number of skipped updates. | kafka.streams:type=stream-topic-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),processor-node-id=([-.\w]+),topic=([-.\w]+) + +### State Store Metrics + +All the following metrics have a recording level of `debug`, except for the `record-e2e-latency-*` metrics which have a recording level `trace` and `num-open-iterators` which has recording level `info`. Note that the `store-scope` value is specified in `StoreSupplier#metricsScope()` for user's customized state stores; for built-in state stores, currently we have: + + * `in-memory-state` + * `in-memory-lru-state` + * `in-memory-window-state` + * `in-memory-suppression` (for suppression buffers) + * `rocksdb-state` (for RocksDB backed key-value store) + * `rocksdb-window-state` (for RocksDB backed window store) + * `rocksdb-session-state` (for RocksDB backed session store) + +Metrics suppression-buffer-size-avg, suppression-buffer-size-max, suppression-buffer-count-avg, and suppression-buffer-count-max are only available for suppression buffers. All other metrics are not available for suppression buffers. Metric/Attribute name | Description | Mbean name +---|---|--- +put-latency-avg | The average put execution time in ns. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +put-latency-max | The maximum put execution time in ns. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +put-if-absent-latency-avg | The average put-if-absent execution time in ns. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +put-if-absent-latency-max | The maximum put-if-absent execution time in ns. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +get-latency-avg | The average get execution time in ns. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +get-latency-max | The maximum get execution time in ns. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +delete-latency-avg | The average delete execution time in ns. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +delete-latency-max | The maximum delete execution time in ns. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +put-all-latency-avg | The average put-all execution time in ns. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +put-all-latency-max | The maximum put-all execution time in ns. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +all-latency-avg | The average execution time in ns, from iterator create to close time. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +all-latency-max, from iterator create to close time. | The maximum all operation execution time in ns. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +range-latency-avg, from iterator create to close time. | The average range execution time in ns, from iterator create to close time. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +range-latency-max, from iterator create to close time. | The maximum range execution time in ns. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +prefix-scan-latency-avg | The average prefix-scan execution time in ns, from iterator create to close time. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +prefix-scan-latency-max | The maximum prefix-scan execution time in ns, from iterator create to close time. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +flush-latency-avg | The average flush execution time in ns. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +flush-latency-max | The maximum flush execution time in ns. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +restore-latency-avg | The average restore execution time in ns. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +restore-latency-max | The maximum restore execution time in ns. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +put-rate | The average put rate per sec for this store. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +put-if-absent-rate | The average put-if-absent rate per sec for this store. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +get-rate | The average get rate per sec for this store. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +delete-rate | The average delete rate per sec for this store. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +put-all-rate | The average put-all rate per sec for this store. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +all-rate | The average all operation rate per sec for this store. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +range-rate | The average range rate per sec for this store. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +prefix-scan-rate | The average prefix-scan rate per sec for this store. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +flush-rate | The average flush rate for this store. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +restore-rate | The average restore rate for this store. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +suppression-buffer-size-avg | The average total size in bytes of the buffered data over the sampling window. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),in-memory-suppression-id=([-.\w]+) +suppression-buffer-size-max | The maximum total size, in bytes, of the buffered data over the sampling window. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),in-memory-suppression-id=([-.\w]+) +suppression-buffer-count-avg | The average number of records buffered over the sampling window. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),in-memory-suppression-id=([-.\w]+) +suppression-buffer-count-max | The maximum number of records buffered over the sampling window. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),in-memory-suppression-id=([-.\w]+) +record-e2e-latency-avg | The average end-to-end latency in ms of a record, measured by comparing the record timestamp with the system time when it has been fully processed by the node. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +record-e2e-latency-max | The maximum end-to-end latency in ms of a record, measured by comparing the record timestamp with the system time when it has been fully processed by the node. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +record-e2e-latency-min | The minimum end-to-end latency in ms of a record, measured by comparing the record timestamp with the system time when it has been fully processed by the node. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +num-open-iterators | The current number of iterators on the store that have been created, but not yet closed. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +iterator-duration-avg | The average time in ns spent between creating an iterator and closing it. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +iterator-duration-max | The maximum time in ns spent between creating an iterator and closing it. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +oldest-iterator-open-since-ms | The system timestamp in ms the oldest still open iterator was created. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) + +### RocksDB Metrics + +RocksDB metrics are grouped into statistics-based metrics and properties-based metrics. The former are recorded from statistics that a RocksDB state store collects whereas the latter are recorded from properties that RocksDB exposes. Statistics collected by RocksDB provide cumulative measurements over time, e.g. bytes written to the state store. Properties exposed by RocksDB provide current measurements, e.g., the amount of memory currently used. Note that the `store-scope` for built-in RocksDB state stores are currently the following: + + * `rocksdb-state` (for RocksDB backed key-value store) + * `rocksdb-window-state` (for RocksDB backed window store) + * `rocksdb-session-state` (for RocksDB backed session store) + +**RocksDB Statistics-based Metrics:** All the following statistics-based metrics have a recording level of `debug` because collecting statistics in [RocksDB may have an impact on performance](https://github.com/facebook/rocksdb/wiki/Statistics#stats-level-and-performance-costs). Statistics-based metrics are collected every minute from the RocksDB state stores. If a state store consists of multiple RocksDB instances, as is the case for WindowStores and SessionStores, each metric reports an aggregation over the RocksDB instances of the state store. Metric/Attribute name | Description | Mbean name +---|---|--- +bytes-written-rate | The average number of bytes written per sec to the RocksDB state store. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +bytes-written-total | The total number of bytes written to the RocksDB state store. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +bytes-read-rate | The average number of bytes read per second from the RocksDB state store. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +bytes-read-total | The total number of bytes read from the RocksDB state store. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +memtable-bytes-flushed-rate | The average number of bytes flushed per sec from the memtable to disk. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +memtable-bytes-flushed-total | The total number of bytes flushed from the memtable to disk. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +memtable-hit-ratio | The ratio of memtable hits relative to all lookups to the memtable. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +memtable-flush-time-avg | The average duration in ms of memtable flushes to disc. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +memtable-flush-time-min | The minimum duration of memtable flushes to disc in ms. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +memtable-flush-time-max | The maximum duration in ms of memtable flushes to disc. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +block-cache-data-hit-ratio | The ratio of block cache hits for data blocks relative to all lookups for data blocks to the block cache. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +block-cache-index-hit-ratio | The ratio of block cache hits for index blocks relative to all lookups for index blocks to the block cache. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +block-cache-filter-hit-ratio | The ratio of block cache hits for filter blocks relative to all lookups for filter blocks to the block cache. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +write-stall-duration-avg | The average duration in ms of write stalls. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +write-stall-duration-total | The total duration in ms of write stalls. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +bytes-read-compaction-rate | The average number of bytes read per sec during compaction. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +bytes-written-compaction-rate | The average number of bytes written per sec during compaction. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +compaction-time-avg | The average duration in ms of disc compactions. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +compaction-time-min | The minimum duration of disc compactions in ms. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +compaction-time-max | The maximum duration in ms of disc compactions. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +number-open-files | This metric will return constant -1 because the RocksDB's counter NO_FILE_CLOSES has been removed in RocksDB 9.7.3 | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +number-file-errors-total | The total number of file errors occurred. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +**RocksDB Properties-based Metrics:** All the following properties-based metrics have a recording level of `info` and are recorded when the metrics are accessed. If a state store consists of multiple RocksDB instances, as is the case for WindowStores and SessionStores, each metric reports the sum over all the RocksDB instances of the state store, except for the block cache metrics `block-cache-*`. The block cache metrics report the sum over all RocksDB instances if each instance uses its own block cache, and they report the recorded value from only one instance if a single block cache is shared among all instances. Metric/Attribute name | Description | Mbean name +---|---|--- +num-immutable-mem-table | The number of immutable memtables that have not yet been flushed. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +cur-size-active-mem-table | The approximate size in bytes of the active memtable. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +cur-size-all-mem-tables | The approximate size in bytes of active and unflushed immutable memtables. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +size-all-mem-tables | The approximate size in bytes of active, unflushed immutable, and pinned immutable memtables. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +num-entries-active-mem-table | The number of entries in the active memtable. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +num-entries-imm-mem-tables | The number of entries in the unflushed immutable memtables. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +num-deletes-active-mem-table | The number of delete entries in the active memtable. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +num-deletes-imm-mem-tables | The number of delete entries in the unflushed immutable memtables. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +mem-table-flush-pending | This metric reports 1 if a memtable flush is pending, otherwise it reports 0. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +num-running-flushes | The number of currently running flushes. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +compaction-pending | This metric reports 1 if at least one compaction is pending, otherwise it reports 0. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +num-running-compactions | The number of currently running compactions. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +estimate-pending-compaction-bytes | The estimated total number of bytes a compaction needs to rewrite on disk to get all levels down to under target size (only valid for level compaction). | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +total-sst-files-size | The total size in bytes of all SST files. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +live-sst-files-size | The total size in bytes of all SST files that belong to the latest LSM tree. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +num-live-versions | Number of live versions of the LSM tree. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +block-cache-capacity | The capacity in bytes of the block cache. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +block-cache-usage | The memory size in bytes of the entries residing in block cache. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +block-cache-pinned-usage | The memory size in bytes for the entries being pinned in the block cache. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +estimate-num-keys | The estimated number of keys in the active and unflushed immutable memtables and storage. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +estimate-table-readers-mem | The estimated memory in bytes used for reading SST tables, excluding memory used in block cache. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) +background-errors | The total number of background errors. | kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+) + +### Record Cache Metrics + +All the following metrics have a recording level of `debug`: Metric/Attribute name | Description | Mbean name +---|---|--- +hit-ratio-avg | The average cache hit ratio defined as the ratio of cache read hits over the total cache read requests. | kafka.streams:type=stream-record-cache-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),record-cache-id=([-.\w]+) +hit-ratio-min | The minimum cache hit ratio. | kafka.streams:type=stream-record-cache-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),record-cache-id=([-.\w]+) +hit-ratio-max | The maximum cache hit ratio. | kafka.streams:type=stream-record-cache-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),record-cache-id=([-.\w]+) + +## Others + +We recommend monitoring GC time and other stats and various server stats such as CPU utilization, I/O service time, etc. On the client side, we recommend monitoring the message/byte rate (global and per topic), request rate/size/time, and on the consumer side, max lag in messages among all partitions and min fetch request rate. For a consumer to keep up, max lag needs to be less than a threshold and min fetch rate needs to be larger than 0. diff --git a/content/en/41/operations/multi-tenancy.md b/content/en/41/operations/multi-tenancy.md new file mode 100644 index 000000000..6de13375c --- /dev/null +++ b/content/en/41/operations/multi-tenancy.md @@ -0,0 +1,117 @@ +--- +title: Multi-Tenancy +description: Multi-Tenancy +weight: 4 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Multi-Tenancy + +## Multi-Tenancy Overview + +As a highly scalable event streaming platform, Kafka is used by many users as their central nervous system, connecting in real-time a wide range of different systems and applications from various teams and lines of businesses. Such multi-tenant cluster environments command proper control and management to ensure the peaceful coexistence of these different needs. This section highlights features and best practices to set up such shared environments, which should help you operate clusters that meet SLAs/OLAs and that minimize potential collateral damage caused by "noisy neighbors". + +Multi-tenancy is a many-sided subject, including but not limited to: + + * Creating user spaces for tenants (sometimes called namespaces) + * Configuring topics with data retention policies and more + * Securing topics and clusters with encryption, authentication, and authorization + * Isolating tenants with quotas and rate limits + * Monitoring and metering + * Inter-cluster data sharing (cf. geo-replication) + + + +## Creating User Spaces (Namespaces) For Tenants With Topic Naming + +Kafka administrators operating a multi-tenant cluster typically need to define user spaces for each tenant. For the purpose of this section, "user spaces" are a collection of topics, which are grouped together under the management of a single entity or user. + +In Kafka, the main unit of data is the topic. Users can create and name each topic. They can also delete them, but it is not possible to rename a topic directly. Instead, to rename a topic, the user must create a new topic, move the messages from the original topic to the new, and then delete the original. With this in mind, it is recommended to define logical spaces, based on an hierarchical topic naming structure. This setup can then be combined with security features, such as prefixed ACLs, to isolate different spaces and tenants, while also minimizing the administrative overhead for securing the data in the cluster. + +These logical user spaces can be grouped in different ways, and the concrete choice depends on how your organization prefers to use your Kafka clusters. The most common groupings are as follows. + +_By team or organizational unit:_ Here, the team is the main aggregator. In an organization where teams are the main user of the Kafka infrastructure, this might be the best grouping. + +Example topic naming structure: + + * `...` +(e.g., "acme.infosec.telemetry.logins") + + + +_By project or product:_ Here, a team manages more than one project. Their credentials will be different for each project, so all the controls and settings will always be project related. + +Example topic naming structure: + + * `..` +(e.g., "mobility.payments.suspicious") + + + +Certain information should normally not be put in a topic name, such as information that is likely to change over time (e.g., the name of the intended consumer) or that is a technical detail or metadata that is available elsewhere (e.g., the topic's partition count and other configuration settings). + +To enforce a topic naming structure, several options are available: + + * Use prefix ACLs (cf. [KIP-290](https://cwiki.apache.org/confluence/x/QpvLB)) to enforce a common prefix for topic names. For example, team A may only be permitted to create topics whose names start with `payments.teamA.`. + * Define a custom `CreateTopicPolicy` (cf. [KIP-108](https://cwiki.apache.org/confluence/x/Iw8IB) and the setting create.topic.policy.class.name) to enforce strict naming patterns. These policies provide the most flexibility and can cover complex patterns and rules to match an organization's needs. + * Disable topic creation for normal users by denying it with an ACL, and then rely on an external process to create topics on behalf of users (e.g., scripting or your favorite automation toolkit). + * It may also be useful to disable the Kafka feature to auto-create topics on demand by setting `auto.create.topics.enable=false` in the broker configuration. Note that you should not rely solely on this option. + + + +## Configuring Topics: Data Retention And More + +Kafka's configuration is very flexible due to its fine granularity, and it supports a plethora of per-topic configuration settings to help administrators set up multi-tenant clusters. For example, administrators often need to define data retention policies to control how much and/or for how long data will be stored in a topic, with settings such as retention.bytes (size) and retention.ms (time). This limits storage consumption within the cluster, and helps complying with legal requirements such as GDPR. + +## Securing Clusters and Topics: Authentication, Authorization, Encryption + +Because the documentation has a dedicated chapter on security that applies to any Kafka deployment, this section focuses on additional considerations for multi-tenant environments. + +Security settings for Kafka fall into three main categories, which are similar to how administrators would secure other client-server data systems, like relational databases and traditional messaging systems. + + 1. **Encryption** of data transferred between Kafka brokers and Kafka clients, between brokers, and between brokers and other optional tools. + 2. **Authentication** of connections from Kafka clients and applications to Kafka brokers, as well as connections between Kafka brokers. + 3. **Authorization** of client operations such as creating, deleting, and altering the configuration of topics; writing events to or reading events from a topic; creating and deleting ACLs. Administrators can also define custom policies to put in place additional restrictions, such as a `CreateTopicPolicy` and `AlterConfigPolicy` (see [KIP-108](https://cwiki.apache.org/confluence/x/Iw8IB) and the settings create.topic.policy.class.name, alter.config.policy.class.name). + + + +When securing a multi-tenant Kafka environment, the most common administrative task is the third category (authorization), i.e., managing the user/client permissions that grant or deny access to certain topics and thus to the data stored by users within a cluster. This task is performed predominantly through the setting of access control lists (ACLs). Here, administrators of multi-tenant environments in particular benefit from putting a hierarchical topic naming structure in place as described in a previous section, because they can conveniently control access to topics through prefixed ACLs (`--resource-pattern-type Prefixed`). This significantly minimizes the administrative overhead of securing topics in multi-tenant environments: administrators can make their own trade-offs between higher developer convenience (more lenient permissions, using fewer and broader ACLs) vs. tighter security (more stringent permissions, using more and narrower ACLs). + +In the following example, user Alice—a new member of ACME corporation's InfoSec team—is granted write permissions to all topics whose names start with "acme.infosec.", such as "acme.infosec.telemetry.logins" and "acme.infosec.syslogs.events". + + + # Grant permissions to user Alice + $ bin/kafka-acls.sh \ + --bootstrap-server localhost:9092 \ + --add --allow-principal User:Alice \ + --producer \ + --resource-pattern-type prefixed --topic acme.infosec. + +You can similarly use this approach to isolate different customers on the same shared cluster. + +## Isolating Tenants: Quotas, Rate Limiting, Throttling + +Multi-tenant clusters should generally be configured with quotas, which protect against users (tenants) eating up too many cluster resources, such as when they attempt to write or read very high volumes of data, or create requests to brokers at an excessively high rate. This may cause network saturation, monopolize broker resources, and impact other clients—all of which you want to avoid in a shared environment. + +**Client quotas:** Kafka supports different types of (per-user principal) client quotas. Because a client's quotas apply irrespective of which topics the client is writing to or reading from, they are a convenient and effective tool to allocate resources in a multi-tenant cluster. Request rate quotas, for example, help to limit a user's impact on broker CPU usage by limiting the time a broker spends on the [request handling path](/protocol.html) for that user, after which throttling kicks in. In many situations, isolating users with request rate quotas has a bigger impact in multi-tenant clusters than setting incoming/outgoing network bandwidth quotas, because excessive broker CPU usage for processing requests reduces the effective bandwidth the broker can serve. Furthermore, administrators can also define quotas on topic operations—such as create, delete, and alter—to prevent Kafka clusters from being overwhelmed by highly concurrent topic operations (see [KIP-599](https://cwiki.apache.org/confluence/x/6DLcC) and the quota type `controller_mutation_rate`). + +**Server quotas:** Kafka also supports different types of broker-side quotas. For example, administrators can set a limit on the rate with which the broker accepts new connections, set the maximum number of connections per broker, or set the maximum number of connections allowed from a specific IP address. + +For more information, please refer to the quota overview and how to set quotas. + +## Monitoring and Metering + +Monitoring is a broader subject that is covered elsewhere in the documentation. Administrators of any Kafka environment, but especially multi-tenant ones, should set up monitoring according to these instructions. Kafka supports a wide range of metrics, such as the rate of failed authentication attempts, request latency, consumer lag, total number of consumer groups, metrics on the quotas described in the previous section, and many more. + +For example, monitoring can be configured to track the size of topic-partitions (with the JMX metric `kafka.log.Log.Size.`), and thus the total size of data stored in a topic. You can then define alerts when tenants on shared clusters are getting close to using too much storage space. + +## Multi-Tenancy and Geo-Replication + +Kafka lets you share data across different clusters, which may be located in different geographical regions, data centers, and so on. Apart from use cases such as disaster recovery, this functionality is useful when a multi-tenant setup requires inter-cluster data sharing. See the section Geo-Replication (Cross-Cluster Data Mirroring) for more information. + +## Further considerations + +**Data contracts:** You may need to define data contracts between the producers and the consumers of data in a cluster, using event schemas. This ensures that events written to Kafka can always be read properly again, and prevents malformed or corrupt events being written. The best way to achieve this is to deploy a so-called schema registry alongside the cluster. (Kafka does not include a schema registry, but there are third-party implementations available.) A schema registry manages the event schemas and maps the schemas to topics, so that producers know which topics are accepting which types (schemas) of events, and consumers know how to read and parse events in a topic. Some registry implementations provide further functionality, such as schema evolution, storing a history of all schemas, and schema compatibility settings. diff --git a/content/en/41/operations/tiered-storage.md b/content/en/41/operations/tiered-storage.md new file mode 100644 index 000000000..3489f877c --- /dev/null +++ b/content/en/41/operations/tiered-storage.md @@ -0,0 +1,153 @@ +--- +title: Tiered Storage +description: Tiered Storage +weight: 9 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Tiered Storage + +## Tiered Storage Overview + +Kafka data is mostly consumed in a streaming fashion using tail reads. Tail reads leverage OS's page cache to serve the data instead of disk reads. Older data is typically read from the disk for backfill or failure recovery purposes and is infrequent. + +In the tiered storage approach, Kafka cluster is configured with two tiers of storage - local and remote. The local tier is the same as the current Kafka that uses the local disks on the Kafka brokers to store the log segments. The new remote tier uses external storage systems, such as HDFS or S3, to store the completed log segments. Please check [KIP-405](https://cwiki.apache.org/confluence/x/KJDQBQ) for more information. + +## Configuration + +### Broker Configurations + +By default, the Kafka server will not enable the tiered storage feature. `remote.log.storage.system.enable` is the property to control whether to enable tiered storage functionality in a broker or not. Setting it to "true" enables this feature. + +`RemoteStorageManager` is an interface to provide the lifecycle of remote log segments and indexes. Kafka server doesn't provide out-of-the-box implementation of RemoteStorageManager. Users must configure `remote.log.storage.manager.class.name` and `remote.log.storage.manager.class.path` to specify the implementation of RemoteStorageManager. + +`RemoteLogMetadataManager` is an interface to provide the lifecycle of metadata about remote log segments with strongly consistent semantics. By default, Kafka provides an implementation with storage as an internal topic. This implementation can be changed by configuring `remote.log.metadata.manager.class.name` and `remote.log.metadata.manager.class.path`. When adopting the default kafka internal topic based implementation, `remote.log.metadata.manager.listener.name` is a mandatory property to specify which listener the clients created by the default RemoteLogMetadataManager implementation. + +### Topic Configurations + +After correctly configuring broker side configurations for tiered storage feature, there are still configurations in topic level needed to be set. `remote.storage.enable` is the switch to determine if a topic wants to use tiered storage or not. By default it is set to false. After enabling `remote.storage.enable` property, the next thing to consider is the log retention. When tiered storage is enabled for a topic, there are 2 additional log retention configurations to set: + + * `local.retention.ms` + * `retention.ms` + * `local.retention.bytes` + * `retention.bytes` + + + +The configuration prefixed with `local` are to specify the time/size the "local" log file can accept before moving to remote storage, and then get deleted. If unset, The value in `retention.ms` and `retention.bytes` will be used. + +## Quick Start Example + +Apache Kafka doesn't provide an out-of-the-box RemoteStorageManager implementation. To have a preview of the tiered storage feature, the [LocalTieredStorage](https://github.com/apache/kafka/blob/trunk/storage/src/test/java/org/apache/kafka/server/log/remote/storage/LocalTieredStorage.java) implemented for integration test can be used, which will create a temporary directory in local storage to simulate the remote storage. + +To adopt the `LocalTieredStorage`, the test library needs to be built locally + + + # please checkout to the specific version tag you're using before building it + # ex: `git checkout 4.1.0` + $ ./gradlew clean :storage:testJar + +After build successfully, there should be a `kafka-storage-x.x.x-test.jar` file under `storage/build/libs`. Next, setting configurations in the broker side to enable tiered storage feature. + + + # Sample KRaft broker server.properties listening on PLAINTEXT://:9092 + remote.log.storage.system.enable=true + + # Setting the listener for the clients in RemoteLogMetadataManager to talk to the brokers. + remote.log.metadata.manager.listener.name=PLAINTEXT + + # Please provide the implementation info for remoteStorageManager. + # This is the mandatory configuration for tiered storage. + # Here, we use the `LocalTieredStorage` built above. + remote.log.storage.manager.class.name=org.apache.kafka.server.log.remote.storage.LocalTieredStorage + remote.log.storage.manager.class.path=/PATH/TO/kafka-storage-4.1.0-test.jar + + # These 2 prefix are default values, but customizable + remote.log.storage.manager.impl.prefix=rsm.config. + remote.log.metadata.manager.impl.prefix=rlmm.config. + + # Configure the directory used for `LocalTieredStorage` + # Note, please make sure the brokers need to have access to this directory + rsm.config.dir=/tmp/kafka-remote-storage + + # This needs to be changed if number of brokers in the cluster is more than 1 + rlmm.config.remote.log.metadata.topic.replication.factor=1 + + # Try to speed up the log retention check interval for testing + log.retention.check.interval.ms=1000 + +Following quick start guide to start up the kafka environment. Then, create a topic with tiered storage enabled with configs: + + + # remote.storage.enable=true -> enables tiered storage on the topic + # local.retention.ms=1000 -> The number of milliseconds to keep the local log segment before it gets deleted. + # Note that a local log segment is eligible for deletion only after it gets uploaded to remote. + # retention.ms=3600000 -> when segments exceed this time, the segments in remote storage will be deleted + # segment.bytes=1048576 -> for test only, to speed up the log segment rolling interval + # file.delete.delay.ms=10000 -> for test only, to speed up the local-log segment file delete delay + + $ bin/kafka-topics.sh --create --topic tieredTopic --bootstrap-server localhost:9092 \ + --config remote.storage.enable=true --config local.retention.ms=1000 --config retention.ms=3600000 \ + --config segment.bytes=1048576 --config file.delete.delay.ms=1000 + +Try to send messages to the `tieredTopic` topic to roll the log segment: + + + $ bin/kafka-producer-perf-test.sh --topic tieredTopic --num-records 1200 --record-size 1024 --throughput -1 --producer-props bootstrap.servers=localhost:9092 + +Then, after the active segment is rolled, the old segment should be moved to the remote storage and get deleted. This can be verified by checking the remote log directory configured above. For example: + + + $ ls /tmp/kafka-remote-storage/kafka-tiered-storage/tieredTopic-0-jF8s79t9SrG_PNqlwv7bAA + 00000000000000000000-knnxbs3FSRyKdPcSAOQC-w.index + 00000000000000000000-knnxbs3FSRyKdPcSAOQC-w.snapshot + 00000000000000000000-knnxbs3FSRyKdPcSAOQC-w.leader_epoch_checkpoint + 00000000000000000000-knnxbs3FSRyKdPcSAOQC-w.timeindex + 00000000000000000000-knnxbs3FSRyKdPcSAOQC-w.log + +Lastly, we can try to consume some data from the beginning and print offset number, to make sure it will successfully fetch offset 0 from the remote storage. + + + $ bin/kafka-console-consumer.sh --topic tieredTopic --from-beginning --max-messages 1 --bootstrap-server localhost:9092 --property print.offset=true + +In KRaft mode, you can disable tiered storage at the topic level, to make the remote logs as read-only logs, or completely delete all remote logs. + +If you want to let the remote logs become read-only and no more local logs copied to the remote storage, you can set `remote.storage.enable=true,remote.log.copy.disable=true` to the topic. + +Note: You also need to set `local.retention.ms` and `local.retention.bytes` to the same value as `retention.ms` and `retention.bytes`, or set to "-2". This is because after disabling remote log copy, the local retention policies will not be applied anymore, and that might confuse users and cause unexpected disk full. + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 \ + --alter --entity-type topics --entity-name tieredTopic \ + --add-config 'remote.storage.enable=true,remote.log.copy.disable=true,local.retention.ms=-2,local.retention.bytes=-2' + +If you want to completely disable tiered storage at the topic level with all remote logs deleted, you can set `remote.storage.enable=false,remote.log.delete.on.disable=true` to the topic. + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 \ + --alter --entity-type topics --entity-name tieredTopic \ + --add-config 'remote.storage.enable=false,remote.log.delete.on.disable=true' + +You can also re-enable tiered storage feature at the topic level. Please note, if you want to disable tiered storage at the cluster level, you should delete the tiered storage enabled topics explicitly. Attempting to disable tiered storage at the cluster level without deleting the topics using tiered storage will result in an exception during startup. + + + $ bin/kafka-topics.sh --delete --topic tieredTopic --bootstrap-server localhost:9092 + +After topics are deleted, you're safe to set `remote.log.storage.system.enable=false` in the broker configuration. + +## Limitations + +While the Tiered Storage works for most use cases, it is still important to be aware of the following limitations: + + * No support for compacted topics + * Disabling tiered storage on all topics where it is enabled is required before disabling tiered storage at the broker level + * Admin actions related to tiered storage feature are only supported on clients from version 3.0 onwards + * No support for log segments missing producer snapshot file. It can happen when topic is created before v2.8.0. + * Only one partition per fetch request is served from the remote store. This limitation can become a bottleneck for consumer client throughput - consider configuring `max.partition.fetch.bytes` appropriately. + + + +For more information, please check [Kafka Tiered Storage GA Release Notes](https://cwiki.apache.org/confluence/x/9xDOEg). diff --git a/content/en/41/operations/transaction-protocol.md b/content/en/41/operations/transaction-protocol.md new file mode 100644 index 000000000..5e291a700 --- /dev/null +++ b/content/en/41/operations/transaction-protocol.md @@ -0,0 +1,38 @@ +--- +title: Transaction Protocol +description: Transaction Protocol +weight: 11 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Transaction Protocol + +## Overview + +Starting from Apache Kafka 4.0, Transactions Server Side Defense ([KIP-890](https://cwiki.apache.org/confluence/x/B40ODg)) brings a strengthened transactional protocol. When enabled and using 4.0 producer clients, the producer epoch is bumped on every transaction to ensure every transaction includes the intended messages and duplicates are not written as part of the next transaction. + +The protocol is automatically enabled on the server since Apache Kafka 4.0. Enabling and disabling the protocol is controlled by the `transaction.version` feature flag. This flag can be set using the storage tool on new cluster creation, or dynamically to an existing cluster via the features tool. Producer clients starting 4.0 and above will use the new transactional protocol as long as it is enabled on the server. + +## Upgrade & Downgrade + +To enable the new protocol on the server, set `transaction.version=2`. The producer clients do not need to be restarted, and will dynamically upgrade the next time they connect or re-connect to a broker. (Alternatively, the client can be restarted to force this connection). A producer will not upgrade mid-transaction, but on the start of the next transaction after it becomes aware of the server-side upgrade. + +Downgrades are safe to perform and work similarly. The older protocol will be used by the clients on the first transaction after the producer becomes aware of the downgraded protocol. + +## Performance + +The new transactional protocol improves performance over verification by only sending a single call to add partitions on the server side, rather than one from the client to add and one from the server to verify. + +One consequence of this change is that we can no longer use the hardcoded retry backoff introduced by [KAFKA-5477](https://issues.apache.org/jira/browse/KAFKA-5477). Due to the asynchronous nature of the `endTransaction` api, the client can start adding partitions to the next transaction before the markers are written. When this happens, the server will return `CONCURRENT_TRANSACTIONS` until the previous transaction completes. Rather than the default client backoff for these retries, there was a shorter retry backoff of 20ms. + +Now with the server-side request, the server will attempt to retry adding the partition a few times when it sees the `CONCURRENT_TRANSACTIONS` error before it returns the error to the client. This can result in higher produce latencies reported on these requests. The transaction end to end latency (measured from the time the client begins the transaction to the time to commit) does not increase overall with this change. The time just shifts from client-side backoff to being calculated as part of the produce latency. + +The server-side backoff and total retry time can be configured with the following new configs: + + * `add.partitions.to.txn.retry.backoff.ms` + * `add.partitions.to.txn.retry.backoff.max.ms` + + diff --git a/content/en/41/security/_index.md b/content/en/41/security/_index.md new file mode 100644 index 000000000..734b80a62 --- /dev/null +++ b/content/en/41/security/_index.md @@ -0,0 +1,10 @@ +--- +title: Security +description: +weight: 7 +tags: ['kafka', 'docs', 'security'] +aliases: +keywords: +type: docs +--- + diff --git a/content/en/41/security/authentication-using-sasl.md b/content/en/41/security/authentication-using-sasl.md new file mode 100644 index 000000000..f82ef1aa9 --- /dev/null +++ b/content/en/41/security/authentication-using-sasl.md @@ -0,0 +1,570 @@ +--- +title: Authentication using SASL +description: Authentication using SASL +weight: 4 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Authentication using SASL + + 1. #### JAAS configuration + +Kafka uses the Java Authentication and Authorization Service ([JAAS](https://docs.oracle.com/javase/8/docs/technotes/guides/security/jaas/JAASRefGuide.html)) for SASL configuration. + + 1. ##### JAAS configuration for Kafka brokers + +`KafkaServer` is the section name in the JAAS file used by each KafkaServer/Broker. This section provides SASL configuration options for the broker including any SASL client connections made by the broker for inter-broker communication. If multiple listeners are configured to use SASL, the section name may be prefixed with the listener name in lower-case followed by a period, e.g. `sasl_ssl.KafkaServer`. + +Brokers may also configure JAAS using the broker configuration property `sasl.jaas.config`. The property name must be prefixed with the listener prefix including the SASL mechanism, i.e. `listener.name.{listenerName}.{saslMechanism}.sasl.jaas.config`. Only one login module may be specified in the config value. If multiple mechanisms are configured on a listener, configs must be provided for each mechanism using the listener and mechanism prefix. For example, + + listener.name.sasl_ssl.scram-sha-256.sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ + username="admin" \ + password="admin-secret"; + listener.name.sasl_ssl.plain.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ + username="admin" \ + password="admin-secret" \ + user_admin="admin-secret" \ + user_alice="alice-secret"; + +If JAAS configuration is defined at different levels, the order of precedence used is: + * Broker configuration property `listener.name.{listenerName}.{saslMechanism}.sasl.jaas.config` + * `{listenerName}.KafkaServer` section of static JAAS configuration + * `KafkaServer` section of static JAAS configuration + +See GSSAPI (Kerberos), PLAIN, SCRAM, or non-production/production OAUTHBEARER for example broker configurations. + + 2. ##### JAAS configuration for Kafka clients + +Clients may configure JAAS using the client configuration property sasl.jaas.config or using the static JAAS config file similar to brokers. + + 1. ###### JAAS configuration using client configuration property + +Clients may specify JAAS configuration as a producer or consumer property without creating a physical configuration file. This mode also enables different producers and consumers within the same JVM to use different credentials by specifying different properties for each client. If both static JAAS configuration system property `java.security.auth.login.config` and client property `sasl.jaas.config` are specified, the client property will be used. + +See GSSAPI (Kerberos), PLAIN, SCRAM, or non-production/production OAUTHBEARER for example client configurations. + + 2. ###### JAAS configuration using static config file + +To configure SASL authentication on the clients using static JAAS config file: + 1. Add a JAAS config file with a client login section named `KafkaClient`. Configure a login module in `KafkaClient` for the selected mechanism as described in the examples for setting up GSSAPI (Kerberos), PLAIN, SCRAM, or non-production/production OAUTHBEARER. For example, GSSAPI credentials may be configured as: + + KafkaClient { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + storeKey=true + keyTab="/etc/security/keytabs/kafka_client.keytab" + principal="kafka-client-1@EXAMPLE.COM"; + }; + + 2. Pass the JAAS config file location as JVM parameter to each client JVM. For example: + + -Djava.security.auth.login.config=/etc/kafka/kafka_client_jaas.conf + + 2. #### SASL configuration + +SASL may be used with PLAINTEXT or SSL as the transport layer using the security protocol SASL_PLAINTEXT or SASL_SSL respectively. If SASL_SSL is used, then SSL must also be configured. + + 1. ##### SASL mechanisms + +Kafka supports the following SASL mechanisms: + * GSSAPI (Kerberos) + * PLAIN + * SCRAM-SHA-256 + * SCRAM-SHA-512 + * OAUTHBEARER + 2. ##### SASL configuration for Kafka brokers + + 1. Configure a SASL port in server.properties, by adding at least one of SASL_PLAINTEXT or SASL_SSL to the _listeners_ parameter, which contains one or more comma-separated values: + + listeners=SASL_PLAINTEXT://host.name:port + +If you are only configuring a SASL port (or if you want the Kafka brokers to authenticate each other using SASL) then make sure you set the same SASL protocol for inter-broker communication: + + security.inter.broker.protocol=SASL_PLAINTEXT (or SASL_SSL) + + 2. Select one or more supported mechanisms to enable in the broker and follow the steps to configure SASL for the mechanism. To enable multiple mechanisms in the broker, follow the steps here. + 3. ##### SASL configuration for Kafka clients + +SASL authentication is only supported for the new Java Kafka producer and consumer, the older API is not supported. + +To configure SASL authentication on the clients, select a SASL mechanism that is enabled in the broker for client authentication and follow the steps to configure SASL for the selected mechanism. + +Note: When establishing connections to brokers via SASL, clients may perform a reverse DNS lookup of the broker address. Due to how the JRE implements reverse DNS lookups, clients may observe slow SASL handshakes if fully qualified domain names are not used, for both the client's `bootstrap.servers` and a broker's `advertised.listeners`. + + 3. #### Authentication using SASL/Kerberos + + 1. ##### Prerequisites + + 1. **Kerberos** +If your organization is already using a Kerberos server (for example, by using Active Directory), there is no need to install a new server just for Kafka. Otherwise you will need to install one, your Linux vendor likely has packages for Kerberos and a short guide on how to install and configure it ([Ubuntu](https://help.ubuntu.com/community/Kerberos), [Redhat](https://access.redhat.com/en-US/Red_Hat_Enterprise_Linux/6/html/Managing_Smart_Cards/installing-kerberos.html)). Note that if you are using Oracle Java, you will need to download JCE policy files for your Java version and copy them to $JAVA_HOME/jre/lib/security. + 2. **Create Kerberos Principals** +If you are using the organization's Kerberos or Active Directory server, ask your Kerberos administrator for a principal for each Kafka broker in your cluster and for every operating system user that will access Kafka with Kerberos authentication (via clients and tools). +If you have installed your own Kerberos, you will need to create these principals yourself using the following commands: + + $ sudo /usr/sbin/kadmin.local -q 'addprinc -randkey kafka/{hostname}@{REALM}' + $ sudo /usr/sbin/kadmin.local -q "ktadd -k /etc/security/keytabs/{keytabname}.keytab kafka/{hostname}@{REALM}" + + 3. **Make sure all hosts can be reachable using hostnames** \- it is a Kerberos requirement that all your hosts can be resolved with their FQDNs. + 2. ##### Configuring Kafka Brokers + + 1. Add a suitably modified JAAS file similar to the one below to each Kafka broker's config directory, let's call it kafka_server_jaas.conf for this example (note that each broker should have its own keytab): + + KafkaServer { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + storeKey=true + keyTab="/etc/security/keytabs/kafka_server.keytab" + principal="kafka/kafka1.hostname.com@EXAMPLE.COM"; + }; + +`KafkaServer` section in the JAAS file tells the broker which principal to use and the location of the keytab where this principal is stored. It allows the broker to login using the keytab specified in this section. + 2. Pass the JAAS and optionally the krb5 file locations as JVM parameters to each Kafka broker (see [here](https://docs.oracle.com/javase/8/docs/technotes/guides/security/jgss/tutorials/KerberosReq.html) for more details): + + -Djava.security.krb5.conf=/etc/kafka/krb5.conf + -Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf + + 3. Make sure the keytabs configured in the JAAS file are readable by the operating system user who is starting kafka broker. + 4. Configure SASL port and SASL mechanisms in server.properties as described here. For example: + + listeners=SASL_PLAINTEXT://host.name:port + security.inter.broker.protocol=SASL_PLAINTEXT + sasl.mechanism.inter.broker.protocol=GSSAPI + sasl.enabled.mechanisms=GSSAPI + +We must also configure the service name in server.properties, which should match the principal name of the kafka brokers. In the above example, principal is "kafka/kafka1.hostname.com@EXAMPLE.com", so: + + sasl.kerberos.service.name=kafka + + 3. ##### Configuring Kafka Clients + +To configure SASL authentication on the clients: + 1. Clients (producers, consumers, connect workers, etc) will authenticate to the cluster with their own principal (usually with the same name as the user running the client), so obtain or create these principals as needed. Then configure the JAAS configuration property for each client. Different clients within a JVM may run as different users by specifying different principals. The property `sasl.jaas.config` in producer.properties or consumer.properties describes how clients like producer and consumer can connect to the Kafka Broker. The following is an example configuration for a client using a keytab (recommended for long-running processes): + + sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ + useKeyTab=true \ + storeKey=true \ + keyTab="/etc/security/keytabs/kafka_client.keytab" \ + principal="kafka-client-1@EXAMPLE.COM"; + +For command-line utilities like kafka-console-consumer or kafka-console-producer, kinit can be used along with "useTicketCache=true" as in: + + sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ + useTicketCache=true; + +JAAS configuration for clients may alternatively be specified as a JVM parameter similar to brokers as described here. Clients use the login section named `KafkaClient`. This option allows only one user for all client connections from a JVM. + 2. Make sure the keytabs configured in the JAAS configuration are readable by the operating system user who is starting kafka client. + 3. Optionally pass the krb5 file locations as JVM parameters to each client JVM (see [here](https://docs.oracle.com/javase/8/docs/technotes/guides/security/jgss/tutorials/KerberosReq.html) for more details): + + -Djava.security.krb5.conf=/etc/kafka/krb5.conf + + 4. Configure the following properties in producer.properties or consumer.properties: + + security.protocol=SASL_PLAINTEXT (or SASL_SSL) + sasl.mechanism=GSSAPI + sasl.kerberos.service.name=kafka + + 4. #### Authentication using SASL/PLAIN + +SASL/PLAIN is a simple username/password authentication mechanism that is typically used with TLS for encryption to implement secure authentication. Kafka supports a default implementation for SASL/PLAIN which can be extended for production use as described here. + +Under the default implementation of `principal.builder.class`, the username is used as the authenticated `Principal` for configuration of ACLs etc. + 1. ##### Configuring Kafka Brokers + + 1. Add a suitably modified JAAS file similar to the one below to each Kafka broker's config directory, let's call it kafka_server_jaas.conf for this example: + + KafkaServer { + org.apache.kafka.common.security.plain.PlainLoginModule required + username="admin" + password="admin-secret" + user_admin="admin-secret" + user_alice="alice-secret"; + }; + +This configuration defines two users (_admin_ and _alice_). The properties `username` and `password` in the `KafkaServer` section are used by the broker to initiate connections to other brokers. In this example, _admin_ is the user for inter-broker communication. The set of properties `user__userName_` defines the passwords for all users that connect to the broker and the broker validates all client connections including those from other brokers using these properties. + 2. Pass the JAAS config file location as JVM parameter to each Kafka broker: + + -Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf + + 3. Configure SASL port and SASL mechanisms in server.properties as described here. For example: + + listeners=SASL_SSL://host.name:port + security.inter.broker.protocol=SASL_SSL + sasl.mechanism.inter.broker.protocol=PLAIN + sasl.enabled.mechanisms=PLAIN + + 2. ##### Configuring Kafka Clients + +To configure SASL authentication on the clients: + 1. Configure the JAAS configuration property for each client in producer.properties or consumer.properties. The login module describes how the clients like producer and consumer can connect to the Kafka Broker. The following is an example configuration for a client for the PLAIN mechanism: + + sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ + username="alice" \ + password="alice-secret"; + +The options `username` and `password` are used by clients to configure the user for client connections. In this example, clients connect to the broker as user _alice_. Different clients within a JVM may connect as different users by specifying different user names and passwords in `sasl.jaas.config`. + +JAAS configuration for clients may alternatively be specified as a JVM parameter similar to brokers as described here. Clients use the login section named `KafkaClient`. This option allows only one user for all client connections from a JVM. + + 2. Configure the following properties in producer.properties or consumer.properties: + + security.protocol=SASL_SSL + sasl.mechanism=PLAIN + + 3. ##### Use of SASL/PLAIN in production + + * SASL/PLAIN should be used only with SSL as transport layer to ensure that clear passwords are not transmitted on the wire without encryption. + * The default implementation of SASL/PLAIN in Kafka specifies usernames and passwords in the JAAS configuration file as shown here. From Kafka version 2.0 onwards, you can avoid storing clear passwords on disk by configuring your own callback handlers that obtain username and password from an external source using the configuration options `sasl.server.callback.handler.class` and `sasl.client.callback.handler.class`. + * In production systems, external authentication servers may implement password authentication. From Kafka version 2.0 onwards, you can plug in your own callback handlers that use external authentication servers for password verification by configuring `sasl.server.callback.handler.class`. + 5. #### Authentication using SASL/SCRAM + +Salted Challenge Response Authentication Mechanism (SCRAM) is a family of SASL mechanisms that addresses the security concerns with traditional mechanisms that perform username/password authentication like PLAIN and DIGEST-MD5. The mechanism is defined in [RFC 5802](https://tools.ietf.org/html/rfc5802). Kafka supports [SCRAM-SHA-256](https://tools.ietf.org/html/rfc7677) and SCRAM-SHA-512 which can be used with TLS to perform secure authentication. Under the default implementation of `principal.builder.class`, the username is used as the authenticated `Principal` for configuration of ACLs etc. The default SCRAM implementation in Kafka stores SCRAM credentials in the metadata log. Refer to Security Considerations for more details. + + 1. ##### Creating SCRAM Credentials + +The SCRAM implementation in Kafka uses the metadata log as credential store. Credentials can be created in the metadata log using `kafka-storage.sh` or `kafka-configs.sh`. For each SCRAM mechanism enabled, credentials must be created by adding a config with the mechanism name. Credentials for inter-broker communication must be created before Kafka brokers are started. `kafka-storage.sh` can format storage with initial credentials. Client credentials may be created and updated dynamically and updated credentials will be used to authenticate new connections. `kafka-configs.sh` can be used to create and update credentials after Kafka brokers are started. + +Create initial SCRAM credentials for user _admin_ with password _admin-secret_ : + + $ bin/kafka-storage.sh format -t $(bin/kafka-storage.sh random-uuid) -c config/server.properties --add-scram 'SCRAM-SHA-256=[name="admin",password="admin-secret"]' + +Create SCRAM credentials for user _alice_ with password _alice-secret_ (refer to Configuring Kafka Clients for client configuration): + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --alter --add-config 'SCRAM-SHA-256=[iterations=8192,password=alice-secret]' --entity-type users --entity-name alice --command-config client.properties + +The default iteration count of 4096 is used if iterations are not specified. A random salt is created if it's not specified. The SCRAM identity consisting of salt, iterations, StoredKey and ServerKey are stored in the metadata log. See [RFC 5802](https://tools.ietf.org/html/rfc5802) for details on SCRAM identity and the individual fields. + +Existing credentials may be listed using the _\--describe_ option: + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --describe --entity-type users --entity-name alice --command-config client.properties + +Credentials may be deleted for one or more SCRAM mechanisms using the _\--alter --delete-config_ option: + + $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --alter --delete-config 'SCRAM-SHA-256' --entity-type users --entity-name alice --command-config client.properties + + 2. ##### Configuring Kafka Brokers + + 1. Add a suitably modified JAAS file similar to the one below to each Kafka broker's config directory, let's call it kafka_server_jaas.conf for this example: + + KafkaServer { + org.apache.kafka.common.security.scram.ScramLoginModule required + username="admin" + password="admin-secret"; + }; + +The properties `username` and `password` in the `KafkaServer` section are used by the broker to initiate connections to other brokers. In this example, _admin_ is the user for inter-broker communication. + 2. Pass the JAAS config file location as JVM parameter to each Kafka broker: + + -Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf + + 3. Configure SASL port and SASL mechanisms in server.properties as described here. For example: + + listeners=SASL_SSL://host.name:port + security.inter.broker.protocol=SASL_SSL + sasl.mechanism.inter.broker.protocol=SCRAM-SHA-256 (or SCRAM-SHA-512) + sasl.enabled.mechanisms=SCRAM-SHA-256 (or SCRAM-SHA-512) + + 3. ##### Configuring Kafka Clients + +To configure SASL authentication on the clients: + 1. Configure the JAAS configuration property for each client in producer.properties or consumer.properties. The login module describes how the clients like producer and consumer can connect to the Kafka Broker. The following is an example configuration for a client for the SCRAM mechanisms: + + sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ + username="alice" \ + password="alice-secret"; + +The options `username` and `password` are used by clients to configure the user for client connections. In this example, clients connect to the broker as user _alice_. Different clients within a JVM may connect as different users by specifying different user names and passwords in `sasl.jaas.config`. + +JAAS configuration for clients may alternatively be specified as a JVM parameter similar to brokers as described here. Clients use the login section named `KafkaClient`. This option allows only one user for all client connections from a JVM. + + 2. Configure the following properties in producer.properties or consumer.properties: + + security.protocol=SASL_SSL + sasl.mechanism=SCRAM-SHA-256 (or SCRAM-SHA-512) + + 4. ##### Security Considerations for SASL/SCRAM + + * The default implementation of SASL/SCRAM in Kafka stores SCRAM credentials in the metadata log. This is suitable for production use in installations where KRaft controllers are secure and on a private network. + * Kafka supports only the strong hash functions SHA-256 and SHA-512 with a minimum iteration count of 4096. Strong hash functions combined with strong passwords and high iteration counts protect against brute force attacks if KRaft controllers security is compromised. + * SCRAM should be used only with TLS-encryption to prevent interception of SCRAM exchanges. This protects against dictionary or brute force attacks and against impersonation if KRaft controllers security is compromised. + * From Kafka version 2.0 onwards, the default SASL/SCRAM credential store may be overridden using custom callback handlers by configuring `sasl.server.callback.handler.class` in installations where KRaft controllers are not secure. + * For more details on security considerations, refer to [RFC 5802](https://tools.ietf.org/html/rfc5802#section-9). + 6. #### Authentication using SASL/OAUTHBEARER + +The [OAuth 2 Authorization Framework](https://tools.ietf.org/html/rfc6749) "enables a third-party application to obtain limited access to an HTTP service, either on behalf of a resource owner by orchestrating an approval interaction between the resource owner and the HTTP service, or by allowing the third-party application to obtain access on its own behalf." The SASL OAUTHBEARER mechanism enables the use of the framework in a SASL (i.e. a non-HTTP) context; it is defined in [RFC 7628](https://tools.ietf.org/html/rfc7628). The default OAUTHBEARER implementation in Kafka creates and validates [Unsecured JSON Web Tokens](https://tools.ietf.org/html/rfc7515#appendix-A.5) and is only suitable for use in non-production Kafka installations. Refer to Security Considerations for more details. Recent versions of Apache Kafka have added production-ready OAUTHBEARER implementations that support interaction with an OAuth 2.0-standards compliant identity provider. Both modes are described in the following, noted where applicable. + +Under the default implementation of `principal.builder.class`, the principalName of OAuthBearerToken is used as the authenticated `Principal` for configuration of ACLs etc. + 1. ##### Configuring Non-production Kafka Brokers + +The default implementation of SASL/OAUTHBEARER in Kafka creates and validates [Unsecured JSON Web Tokens](https://tools.ietf.org/html/rfc7515#appendix-A.5). While suitable only for non-production use, it does provide the flexibility to create arbitrary tokens in a DEV or TEST environment. + + 1. Add a suitably modified JAAS file similar to the one below to each Kafka broker's config directory, let's call it kafka_server_jaas.conf for this example: + + KafkaServer { + org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required + unsecuredLoginStringClaim_sub="admin"; + }; + +The property `unsecuredLoginStringClaim_sub` in the `KafkaServer` section is used by the broker when it initiates connections to other brokers. In this example, _admin_ will appear in the subject (`sub`) claim and will be the user for inter-broker communication. + +Here are the various supported JAAS module options on the broker side for [Unsecured JSON Web Token](https://tools.ietf.org/html/rfc7515#appendix-A.5) validation: JAAS Module Option for Unsecured Token Validation | Documentation +---|--- +`unsecuredValidatorPrincipalClaimName="value"` | Set to a non-empty value if you wish a particular `String` claim holding a principal name to be checked for existence; the default is to check for the existence of the '`sub`' claim. +`unsecuredValidatorScopeClaimName="value"` | Set to a custom claim name if you wish the name of the `String` or `String List` claim holding any token scope to be something other than '`scope`'. +`unsecuredValidatorRequiredScope="value"` | Set to a space-delimited list of scope values if you wish the `String/String List` claim holding the token scope to be checked to make sure it contains certain values. +`unsecuredValidatorAllowableClockSkewMs="value"` | Set to a positive integer value if you wish to allow up to some number of positive milliseconds of clock skew (the default is 0). + + 2. Pass the JAAS config file location as JVM parameter to each Kafka broker: + + -Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf + + 3. Configure SASL port and SASL mechanisms in server.properties as described here. For example: + + listeners=SASL_SSL://host.name:port (or SASL_PLAINTEXT if non-production) + security.inter.broker.protocol=SASL_SSL (or SASL_PLAINTEXT if non-production) + sasl.mechanism.inter.broker.protocol=OAUTHBEARER + sasl.enabled.mechanisms=OAUTHBEARER + + 2. ##### Configuring Production Kafka Brokers + + 1. Add a suitably modified JAAS file similar to the one below to each Kafka broker's config directory, let's call it kafka_server_jaas.conf for this example: + + KafkaServer { + org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required ; + }; + + 2. Pass the JAAS config file location as JVM parameter to each Kafka broker: + + -Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf + + 3. Configure SASL port and SASL mechanisms in server.properties as described here. For example: + + listeners=SASL_SSL://host.name:port + security.inter.broker.protocol=SASL_SSL + sasl.mechanism.inter.broker.protocol=OAUTHBEARER + sasl.enabled.mechanisms=OAUTHBEARER + listener.name..oauthbearer.sasl.server.callback.handler.class=org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallbackHandler + listener.name..oauthbearer.sasl.oauthbearer.jwks.endpoint.url=https://example.com/oauth2/v1/keys + +The OAUTHBEARER broker configuration includes: + * sasl.oauthbearer.clock.skew.seconds + * sasl.oauthbearer.expected.audience + * sasl.oauthbearer.expected.issuer + * sasl.oauthbearer.jwks.endpoint.refresh.ms + * sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms + * sasl.oauthbearer.jwks.endpoint.retry.backoff.ms + * sasl.oauthbearer.jwks.endpoint.url + * sasl.oauthbearer.scope.claim.name + * sasl.oauthbearer.sub.claim.name + 3. ##### Configuring Non-production Kafka Clients + +To configure SASL authentication on the clients: + 1. Configure the JAAS configuration property for each client in producer.properties or consumer.properties. The login module describes how the clients like producer and consumer can connect to the Kafka Broker. The following is an example configuration for a client for the OAUTHBEARER mechanisms: + + sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required \ + unsecuredLoginStringClaim_sub="alice"; + +The option `unsecuredLoginStringClaim_sub` is used by clients to configure the subject (`sub`) claim, which determines the user for client connections. In this example, clients connect to the broker as user _alice_. Different clients within a JVM may connect as different users by specifying different subject (`sub`) claims in `sasl.jaas.config`. + +The default implementation of SASL/OAUTHBEARER in Kafka creates and validates [Unsecured JSON Web Tokens](https://tools.ietf.org/html/rfc7515#appendix-A.5). While suitable only for non-production use, it does provide the flexibility to create arbitrary tokens in a DEV or TEST environment. + +Here are the various supported JAAS module options on the client side (and on the broker side if OAUTHBEARER is the inter-broker protocol): JAAS Module Option for Unsecured Token Creation | Documentation +---|--- +`unsecuredLoginStringClaim_="value"` | Creates a `String` claim with the given name and value. Any valid claim name can be specified except '`iat`' and '`exp`' (these are automatically generated). +`unsecuredLoginNumberClaim_="value"` | Creates a `Number` claim with the given name and value. Any valid claim name can be specified except '`iat`' and '`exp`' (these are automatically generated). +`unsecuredLoginListClaim_="value"` | Creates a `String List` claim with the given name and values parsed from the given value where the first character is taken as the delimiter. For example: `unsecuredLoginListClaim_fubar="|value1|value2"`. Any valid claim name can be specified except '`iat`' and '`exp`' (these are automatically generated). +`unsecuredLoginExtension_="value"` | Creates a `String` extension with the given name and value. For example: `unsecuredLoginExtension_traceId="123"`. A valid extension name is any sequence of lowercase or uppercase alphabet characters. In addition, the "auth" extension name is reserved. A valid extension value is any combination of characters with ASCII codes 1-127. +`unsecuredLoginPrincipalClaimName` | Set to a custom claim name if you wish the name of the `String` claim holding the principal name to be something other than '`sub`'. +`unsecuredLoginLifetimeSeconds` | Set to an integer value if the token expiration is to be set to something other than the default value of 3600 seconds (which is 1 hour). The '`exp`' claim will be set to reflect the expiration time. +`unsecuredLoginScopeClaimName` | Set to a custom claim name if you wish the name of the `String` or `String List` claim holding any token scope to be something other than '`scope`'. + +JAAS configuration for clients may alternatively be specified as a JVM parameter similar to brokers as described here. Clients use the login section named `KafkaClient`. This option allows only one user for all client connections from a JVM. + + 2. Configure the following properties in producer.properties or consumer.properties: + + security.protocol=SASL_SSL (or SASL_PLAINTEXT if non-production) + sasl.mechanism=OAUTHBEARER + + 3. The default implementation of SASL/OAUTHBEARER depends on the jackson-databind library. Since it's an optional dependency, users have to configure it as a dependency via their build tool. + 4. ##### Configuring Production Kafka Clients + +To configure SASL authentication on the clients: + 1. Configure the JAAS configuration property for each client in producer.properties or consumer.properties. The login module describes how the clients like producer and consumer can connect to the Kafka Broker. The following is an example configuration for a client for the OAUTHBEARER mechanisms: + + sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required ; + +JAAS configuration for clients may alternatively be specified as a JVM parameter similar to brokers as described here. Clients use the login section named `KafkaClient`. This option allows only one user for all client connections from a JVM. + + 2. Configure the following properties in producer.properties or consumer.properties. For example, if using the OAuth `client_credentials` grant type to communicate with the OAuth identity provider, the configuration might look like this: + + security.protocol=SASL_SSL + sasl.mechanism=OAUTHBEARER + sasl.oauthbearer.jwt.retriever.class=org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever + sasl.oauthbearer.client.credentials.client.id=jdoe + sasl.oauthbearer.client.credentials.client.secret=$3cr3+ + sasl.oauthbearer.scope=my-application-scope + sasl.oauthbearer.token.endpoint.url=https://example.com/oauth2/v1/token + +Or, if using the OAuth `urn:ietf:params:oauth:grant-type:jwt-bearer` grant type to communicate with the OAuth identity provider, the configuration might look like this: + + security.protocol=SASL_SSL + sasl.mechanism=OAUTHBEARER + sasl.oauthbearer.jwt.retriever.class=org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever + sasl.oauthbearer.assertion.private.key.file=/path/to/private.key + sasl.oauthbearer.assertion.algorithm=RS256 + sasl.oauthbearer.assertion.claim.exp.seconds=600 + sasl.oauthbearer.assertion.template.file=/path/to/template.json + sasl.oauthbearer.scope=my-application-scope + sasl.oauthbearer.token.endpoint.url=https://example.com/oauth2/v1/token + +The OAUTHBEARER client configuration includes: + * sasl.oauthbearer.assertion.algorithm + * sasl.oauthbearer.assertion.claim.aud + * sasl.oauthbearer.assertion.claim.exp.seconds + * sasl.oauthbearer.assertion.claim.iss + * sasl.oauthbearer.assertion.claim.jti.include + * sasl.oauthbearer.assertion.claim.nbf.seconds + * sasl.oauthbearer.assertion.claim.sub + * sasl.oauthbearer.assertion.file + * sasl.oauthbearer.assertion.private.key.file + * sasl.oauthbearer.assertion.private.key.passphrase + * sasl.oauthbearer.assertion.template.file + * sasl.oauthbearer.client.credentials.client.id + * sasl.oauthbearer.client.credentials.client.secret + * sasl.oauthbearer.header.urlencode + * sasl.oauthbearer.jwt.retriever.class + * sasl.oauthbearer.jwt.validator.class + * sasl.oauthbearer.scope + * sasl.oauthbearer.token.endpoint.url + 3. The default implementation of SASL/OAUTHBEARER depends on the jackson-databind library. Since it's an optional dependency, users have to configure it as a dependency via their build tool. + 5. ##### Token Refresh for SASL/OAUTHBEARER + +Kafka periodically refreshes any token before it expires so that the client can continue to make connections to brokers. The parameters that impact how the refresh algorithm operates are specified as part of the producer/consumer/broker configuration and are as follows. See the documentation for these properties elsewhere for details. The default values are usually reasonable, in which case these configuration parameters would not need to be explicitly set. Producer/Consumer/Broker Configuration Property +--- +`sasl.login.refresh.window.factor` +`sasl.login.refresh.window.jitter` +`sasl.login.refresh.min.period.seconds` +`sasl.login.refresh.min.buffer.seconds` + 6. ##### Secure/Production Use of SASL/OAUTHBEARER + +Production use cases will require writing an implementation of `org.apache.kafka.common.security.auth.AuthenticateCallbackHandler` that can handle an instance of `org.apache.kafka.common.security.oauthbearer.OAuthBearerTokenCallback` and declaring it via either the `sasl.login.callback.handler.class` configuration option for a non-broker client or via the `listener.name.sasl_ssl.oauthbearer.sasl.login.callback.handler.class` configuration option for brokers (when SASL/OAUTHBEARER is the inter-broker protocol). + +Production use cases will also require writing an implementation of `org.apache.kafka.common.security.auth.AuthenticateCallbackHandler` that can handle an instance of `org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallback` and declaring it via the `listener.name.sasl_ssl.oauthbearer.sasl.server.callback.handler.class` broker configuration option. + 7. ##### Security Considerations for SASL/OAUTHBEARER + + * The default implementation of SASL/OAUTHBEARER in Kafka creates and validates [Unsecured JSON Web Tokens](https://tools.ietf.org/html/rfc7515#appendix-A.5). This is suitable only for non-production use. + * OAUTHBEARER should be used in production environments only with TLS-encryption to prevent interception of tokens. + * The default unsecured SASL/OAUTHBEARER implementation may be overridden (and must be overridden in production environments) using custom login and SASL Server callback handlers as described above. + * For more details on OAuth 2 security considerations in general, refer to [RFC 6749, Section 10](https://tools.ietf.org/html/rfc6749#section-10). + 7. #### Enabling multiple SASL mechanisms in a broker + + 1. Specify configuration for the login modules of all enabled mechanisms in the `KafkaServer` section of the JAAS config file. For example: + + KafkaServer { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + storeKey=true + keyTab="/etc/security/keytabs/kafka_server.keytab" + principal="kafka/kafka1.hostname.com@EXAMPLE.COM"; + + org.apache.kafka.common.security.plain.PlainLoginModule required + username="admin" + password="admin-secret" + user_admin="admin-secret" + user_alice="alice-secret"; + }; + + 2. Enable the SASL mechanisms in server.properties: + + sasl.enabled.mechanisms=GSSAPI,PLAIN,SCRAM-SHA-256,SCRAM-SHA-512,OAUTHBEARER + + 3. Specify the SASL security protocol and mechanism for inter-broker communication in server.properties if required: + + security.inter.broker.protocol=SASL_PLAINTEXT (or SASL_SSL) + sasl.mechanism.inter.broker.protocol=GSSAPI (or one of the other enabled mechanisms) + + 4. Follow the mechanism-specific steps in GSSAPI (Kerberos), PLAIN, SCRAM, and non-production/production OAUTHBEARER to configure SASL for the enabled mechanisms. + 8. #### Modifying SASL mechanism in a Running Cluster + +SASL mechanism can be modified in a running cluster using the following sequence: + + 1. Enable new SASL mechanism by adding the mechanism to `sasl.enabled.mechanisms` in server.properties for each broker. Update JAAS config file to include both mechanisms as described here. Incrementally bounce the cluster nodes. + 2. Restart clients using the new mechanism. + 3. To change the mechanism of inter-broker communication (if this is required), set `sasl.mechanism.inter.broker.protocol` in server.properties to the new mechanism and incrementally bounce the cluster again. + 4. To remove old mechanism (if this is required), remove the old mechanism from `sasl.enabled.mechanisms` in server.properties and remove the entries for the old mechanism from JAAS config file. Incrementally bounce the cluster again. + 9. #### Authentication using Delegation Tokens + +Delegation token based authentication is a lightweight authentication mechanism to complement existing SASL/SSL methods. Delegation tokens are shared secrets between kafka brokers and clients. Delegation tokens will help processing frameworks to distribute the workload to available workers in a secure environment without the added cost of distributing Kerberos TGT/keytabs or keystores when 2-way SSL is used. See [KIP-48](https://cwiki.apache.org/confluence/x/tfmnAw) for more details. + +Under the default implementation of `principal.builder.class`, the owner of delegation token is used as the authenticated `Principal` for configuration of ACLs etc. + +Typical steps for delegation token usage are: + + 1. User authenticates with the Kafka cluster via SASL or SSL, and obtains a delegation token. This can be done using Admin APIs or using `kafka-delegation-tokens.sh` script. + 2. User securely passes the delegation token to Kafka clients for authenticating with the Kafka cluster. + 3. Token owner/renewer can renew/expire the delegation tokens. + 1. ##### Token Management + +A secret is used to generate and verify delegation tokens. This is supplied using config option `delegation.token.secret.key`. The same secret key must be configured across all the brokers. The controllers must also be configured with the secret using the same config option. If the secret is not set or set to empty string, delegation token authentication and API operations will fail. + +The token details are stored with the other metadata on the controller nodes and delegation tokens are suitable for use when the controllers are on a private network or when all communications between brokers and controllers is encrypted. Currently, this secret is stored as plain text in the server.properties config file. We intend to make these configurable in a future Kafka release. + +A token has a current life, and a maximum renewable life. By default, tokens must be renewed once every 24 hours for up to 7 days. These can be configured using `delegation.token.expiry.time.ms` and `delegation.token.max.lifetime.ms` config options. + +Tokens can also be cancelled explicitly. If a token is not renewed by the token’s expiration time or if token is beyond the max life time, it will be deleted from all broker caches. + + 2. ##### Creating Delegation Tokens + +Tokens can be created by using Admin APIs or using `kafka-delegation-tokens.sh` script. Delegation token requests (create/renew/expire/describe) should be issued only on SASL or SSL authenticated channels. Tokens can not be requests if the initial authentication is done through delegation token. A token can be created by the user for that user or others as well by specifying the `--owner-principal` parameter. Owner/Renewers can renew or expire tokens. Owner/renewers can always describe their own tokens. To describe other tokens, a DESCRIBE_TOKEN permission needs to be added on the User resource representing the owner of the token. `kafka-delegation-tokens.sh` script examples are given below. + +Create a delegation token: + + $ bin/kafka-delegation-tokens.sh --bootstrap-server localhost:9092 --create --max-life-time-period -1 --command-config client.properties --renewer-principal User:user1 + +Create a delegation token for a different owner: + + $ bin/kafka-delegation-tokens.sh --bootstrap-server localhost:9092 --create --max-life-time-period -1 --command-config client.properties --renewer-principal User:user1 --owner-principal User:owner1 + +Renew a delegation token: + + $ bin/kafka-delegation-tokens.sh --bootstrap-server localhost:9092 --renew --renew-time-period -1 --command-config client.properties --hmac ABCDEFGHIJK + +Expire a delegation token: + + $ bin/kafka-delegation-tokens.sh --bootstrap-server localhost:9092 --expire --expiry-time-period -1 --command-config client.properties --hmac ABCDEFGHIJK + +Existing tokens can be described using the --describe option: + + $ bin/kafka-delegation-tokens.sh --bootstrap-server localhost:9092 --describe --command-config client.properties --owner-principal User:user1 + + 3. ##### Token Authentication + +Delegation token authentication piggybacks on the current SASL/SCRAM authentication mechanism. We must enable SASL/SCRAM mechanism on Kafka cluster as described in here. + +Configuring Kafka Clients: + + 1. Configure the JAAS configuration property for each client in producer.properties or consumer.properties. The login module describes how the clients like producer and consumer can connect to the Kafka Broker. The following is an example configuration for a client for the token authentication: + + sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ + username="tokenID123" \ + password="lAYYSFmLs4bTjf+lTZ1LCHR/ZZFNA==" \ + tokenauth="true"; + +The options `username` and `password` are used by clients to configure the token id and token HMAC. And the option `tokenauth` is used to indicate the server about token authentication. In this example, clients connect to the broker using token id: _tokenID123_. Different clients within a JVM may connect using different tokens by specifying different token details in `sasl.jaas.config`. + +JAAS configuration for clients may alternatively be specified as a JVM parameter similar to brokers as described here. Clients use the login section named `KafkaClient`. This option allows only one user for all client connections from a JVM. + + 4. ##### Procedure to manually rotate the secret: + +We require a re-deployment when the secret needs to be rotated. During this process, already connected clients will continue to work. But any new connection requests and renew/expire requests with old tokens can fail. Steps are given below. + + 1. Expire all existing tokens. + 2. Rotate the secret by rolling upgrade, and + 3. Generate new tokens + +We intend to automate this in a future Kafka release. + + + diff --git a/content/en/41/security/authorization-and-acls.md b/content/en/41/security/authorization-and-acls.md new file mode 100644 index 000000000..a440e7219 --- /dev/null +++ b/content/en/41/security/authorization-and-acls.md @@ -0,0 +1,355 @@ +--- +title: Authorization and ACLs +description: Authorization and ACLs +weight: 5 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Authorization and ACLs + +Kafka ships with a pluggable authorization framework, which is configured with the `authorizer.class.name` property in the server configuration. Configured implementations must extend `org.apache.kafka.server.authorizer.Authorizer`. Kafka provides a default implementation which store ACLs in the cluster metadata (KRaft metadata log). For KRaft clusters, use the following configuration on all nodes (brokers, controllers, or combined broker/controller nodes): + + + authorizer.class.name=org.apache.kafka.metadata.authorizer.StandardAuthorizer + +Kafka ACLs are defined in the general format of "Principal {P} is [Allowed|Denied] Operation {O} From Host {H} on any Resource {R} matching ResourcePattern {RP}". You can read more about the ACL structure in [KIP-11](https://cwiki.apache.org/confluence/x/XIUWAw) and resource patterns in [KIP-290](https://cwiki.apache.org/confluence/x/QpvLB). In order to add, remove, or list ACLs, you can use the Kafka ACL CLI `kafka-acls.sh`. + +### _Behavior Without ACLs:_ + +If a resource (R) does not have any ACLs defined, meaning that no ACL matches the resource, Kafka will restrict access to that resource. In this situation, only super users are allowed to access it. + +### _Changing the Default Behavior:_ + +If you prefer that resources without any ACLs be accessible by all users (instead of just super users), you can change the default behavior. To do this, add the following line to your server.properties file: + + + allow.everyone.if.no.acl.found=true + +With this setting enabled, if a resource does not have any ACLs defined, Kafka will allow access to everyone. If a resource has one or more ACLs defined, those ACL rules will be enforced as usual, regardless of the setting. One can also add super users in server.properties like the following (note that the delimiter is semicolon since SSL user names may contain comma). Default PrincipalType string "User" is case sensitive. + + + super.users=User:Bob;User:Alice + +### KRaft Principal Forwarding + +In KRaft clusters, admin requests such as `CreateTopics` and `DeleteTopics` are sent to the broker listeners by the client. The broker then forwards the request to the active controller through the first listener configured in `controller.listener.names`. Authorization of these requests is done on the controller node. This is achieved by way of an `Envelope` request which packages both the underlying request from the client as well as the client principal. When the controller receives the forwarded `Envelope` request from the broker, it first authorizes the `Envelope` request using the authenticated broker principal. Then it authorizes the underlying request using the forwarded principal. +All of this implies that Kafka must understand how to serialize and deserialize the client principal. The authentication framework allows for customized principals by overriding the `principal.builder.class` configuration. In order for customized principals to work with KRaft, the configured class must implement `org.apache.kafka.common.security.auth.KafkaPrincipalSerde` so that Kafka knows how to serialize and deserialize the principals. The default implementation `org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder` uses the Kafka RPC format defined in the source code: `clients/src/main/resources/common/message/DefaultPrincipalData.json`. + +### Customizing SSL User Name + +By default, the SSL user name will be of the form "CN=writeuser,OU=Unknown,O=Unknown,L=Unknown,ST=Unknown,C=Unknown". One can change that by setting `ssl.principal.mapping.rules` to a customized rule in server.properties. This config allows a list of rules for mapping X.500 distinguished name to short name. The rules are evaluated in order and the first rule that matches a distinguished name is used to map it to a short name. Any later rules in the list are ignored. +The format of `ssl.principal.mapping.rules` is a list where each rule starts with "RULE:" and contains an expression as the following formats. Default rule will return string representation of the X.500 certificate distinguished name. If the distinguished name matches the pattern, then the replacement command will be run over the name. This also supports lowercase/uppercase options, to force the translated result to be all lower/uppercase case. This is done by adding a "/L" or "/U' to the end of the rule. + + + RULE:pattern/replacement/ + RULE:pattern/replacement/[LU] + +Example `ssl.principal.mapping.rules` values are: + + + RULE:^CN=(.*?),OU=ServiceUsers.*$/$1/, + RULE:^CN=(.*?),OU=(.*?),O=(.*?),L=(.*?),ST=(.*?),C=(.*?)$/$1@$2/L, + RULE:^.*[Cc][Nn]=([a-zA-Z0-9.]*).*$/$1/L, + DEFAULT + +Above rules translate distinguished name "CN=serviceuser,OU=ServiceUsers,O=Unknown,L=Unknown,ST=Unknown,C=Unknown" to "serviceuser" and "CN=adminUser,OU=Admin,O=Unknown,L=Unknown,ST=Unknown,C=Unknown" to "adminuser@admin". +For advanced use cases, one can customize the name by setting a customized PrincipalBuilder in server.properties like the following. + + + principal.builder.class=CustomizedPrincipalBuilderClass + +### Customizing SASL User Name + +By default, the SASL user name will be the primary part of the Kerberos principal. One can change that by setting `sasl.kerberos.principal.to.local.rules` to a customized rule in server.properties. The format of `sasl.kerberos.principal.to.local.rules` is a list where each rule works in the same way as the auth_to_local in [Kerberos configuration file (krb5.conf)](https://web.mit.edu/Kerberos/krb5-latest/doc/admin/conf_files/krb5_conf.html). This also support additional lowercase/uppercase rule, to force the translated result to be all lowercase/uppercase. This is done by adding a "/L" or "/U" to the end of the rule. check below formats for syntax. Each rules starts with RULE: and contains an expression as the following formats. See the kerberos documentation for more details. + + + RULE:[n:string](regexp)s/pattern/replacement/ + RULE:[n:string](regexp)s/pattern/replacement/g + RULE:[n:string](regexp)s/pattern/replacement//L + RULE:[n:string](regexp)s/pattern/replacement/g/L + RULE:[n:string](regexp)s/pattern/replacement//U + RULE:[n:string](regexp)s/pattern/replacement/g/U + +An example of adding a rule to properly translate user@MYDOMAIN.COM to user while also keeping the default rule in place is: + + + sasl.kerberos.principal.to.local.rules=RULE:[1:$1@$0](.*@MYDOMAIN.COM)s/@.*//,DEFAULT + +## Command Line Interface + +Kafka Authorization management CLI can be found under bin directory with all the other CLIs. The CLI script is called **kafka-acls.sh**. Following lists all the options that the script supports: + +Option | Description | Default | Option type +---|---|---|--- +\--add | Indicates to the script that user is trying to add an acl. | | Action +\--remove | Indicates to the script that user is trying to remove an acl. | | Action +\--list | Indicates to the script that user is trying to list acls. | | Action +\--bootstrap-server | A list of host/port pairs to use for establishing the connection to the Kafka cluster broker. Only one of --bootstrap-server or --bootstrap-controller option must be specified. | | Configuration +\--bootstrap-controller | A list of host/port pairs to use for establishing the connection to the Kafka cluster controller. Only one of --bootstrap-server or --bootstrap-controller option must be specified. | | Configuration +\--command-config | A property file containing configs to be passed to Admin Client. This option can only be used with --bootstrap-server option. | | Configuration +\--cluster | Indicates to the script that the user is trying to interact with acls on the singular cluster resource. | | ResourcePattern +\--topic [topic-name] | Indicates to the script that the user is trying to interact with acls on topic resource pattern(s). | | ResourcePattern +\--group [group-name] | Indicates to the script that the user is trying to interact with acls on consumer-group resource pattern(s) | | ResourcePattern +\--transactional-id [transactional-id] | The transactionalId to which ACLs should be added or removed. A value of * indicates the ACLs should apply to all transactionalIds. | | ResourcePattern +\--delegation-token [delegation-token] | Delegation token to which ACLs should be added or removed. A value of * indicates ACL should apply to all tokens. | | ResourcePattern +\--user-principal [user-principal] | A user resource to which ACLs should be added or removed. This is currently supported in relation with delegation tokens. A value of * indicates ACL should apply to all users. | | ResourcePattern +\--resource-pattern-type [pattern-type] | Indicates to the script the type of resource pattern, (for --add), or resource pattern filter, (for --list and --remove), the user wishes to use. +When adding acls, this should be a specific pattern type, e.g. 'literal' or 'prefixed'. +When listing or removing acls, a specific pattern type filter can be used to list or remove acls from a specific type of resource pattern, or the filter values of 'any' or 'match' can be used, where 'any' will match any pattern type, but will match the resource name exactly, and 'match' will perform pattern matching to list or remove all acls that affect the supplied resource(s). +WARNING: 'match', when used in combination with the '--remove' switch, should be used with care. | literal | Configuration +\--allow-principal | Principal is in PrincipalType:name format that will be added to ACL with Allow permission. Default PrincipalType string "User" is case sensitive. +You can specify multiple --allow-principal in a single command. | | Principal +\--deny-principal | Principal is in PrincipalType:name format that will be added to ACL with Deny permission. Default PrincipalType string "User" is case sensitive. +You can specify multiple --deny-principal in a single command. | | Principal +\--principal | Principal is in PrincipalType:name format that will be used along with --list option. Default PrincipalType string "User" is case sensitive. This will list the ACLs for the specified principal. +You can specify multiple --principal in a single command. | | Principal +\--allow-host | IP address from which principals listed in --allow-principal will have access. | if --allow-principal is specified defaults to * which translates to "all hosts" | Host +\--deny-host | IP address from which principals listed in --deny-principal will be denied access. | if --deny-principal is specified defaults to * which translates to "all hosts" | Host +\--operation | Operation that will be allowed or denied. +Valid values are: + + * Read + * Write + * Create + * Delete + * Alter + * Describe + * ClusterAction + * DescribeConfigs + * AlterConfigs + * IdempotentWrite + * CreateTokens + * DescribeTokens + * All + +| All | Operation +\--producer | Convenience option to add/remove acls for producer role. This will generate acls that allows WRITE, DESCRIBE and CREATE on topic. | | Convenience +\--consumer | Convenience option to add/remove acls for consumer role. This will generate acls that allows READ, DESCRIBE on topic and READ on consumer-group. | | Convenience +\--idempotent | Enable idempotence for the producer. This should be used in combination with the --producer option. +Note that idempotence is enabled automatically if the producer is authorized to a particular transactional-id. | | Convenience +\--force | Convenience option to assume yes to all queries and do not prompt. | | Convenience + +## Examples + + * **Adding Acls** +Suppose you want to add an acl "Principals User:Bob and User:Alice are allowed to perform Operation Read and Write on Topic Test-Topic from IP 198.51.100.0 and IP 198.51.100.1". You can do that by executing the CLI with following options: + + $ bin/kafka-acls.sh --bootstrap-server localhost:9092 --add --allow-principal User:Bob --allow-principal User:Alice --allow-host 198.51.100.0 --allow-host 198.51.100.1 --operation Read --operation Write --topic Test-topic + +By default, all principals that don't have an explicit acl that allows access for an operation to a resource are denied. In rare cases where an allow acl is defined that allows access to all but some principal we will have to use the --deny-principal and --deny-host option. For example, if we want to allow all users to Read from Test-topic but only deny User:BadBob from IP 198.51.100.3 we can do so using following commands: + + $ bin/kafka-acls.sh --bootstrap-server localhost:9092 --add --allow-principal User:'*' --allow-host '*' --deny-principal User:BadBob --deny-host 198.51.100.3 --operation Read --topic Test-topic + +Note that `--allow-host` and `--deny-host` only support IP addresses (hostnames are not supported). Above examples add acls to a topic by specifying --topic [topic-name] as the resource pattern option. Similarly user can add acls to cluster by specifying --cluster and to a consumer group by specifying --group [group-name]. You can add acls on any resource of a certain type, e.g. suppose you wanted to add an acl "Principal User:Peter is allowed to produce to any Topic from IP 198.51.200.0" You can do that by using the wildcard resource '*', e.g. by executing the CLI with following options: + + $ bin/kafka-acls.sh --bootstrap-server localhost:9092 --add --allow-principal User:Peter --allow-host 198.51.200.1 --producer --topic '*' + +You can add acls on prefixed resource patterns, e.g. suppose you want to add an acl "Principal User:Jane is allowed to produce to any Topic whose name starts with 'Test-' from any host". You can do that by executing the CLI with following options: + + $ bin/kafka-acls.sh --bootstrap-server localhost:9092 --add --allow-principal User:Jane --producer --topic Test- --resource-pattern-type prefixed + +Note, --resource-pattern-type defaults to 'literal', which only affects resources with the exact same name or, in the case of the wildcard resource name '*', a resource with any name. + * **Removing Acls** +Removing acls is pretty much the same. The only difference is instead of --add option users will have to specify --remove option. To remove the acls added by the first example above we can execute the CLI with following options: + + $ bin/kafka-acls.sh --bootstrap-server localhost:9092 --remove --allow-principal User:Bob --allow-principal User:Alice --allow-host 198.51.100.0 --allow-host 198.51.100.1 --operation Read --operation Write --topic Test-topic + +If you want to remove the acl added to the prefixed resource pattern above we can execute the CLI with following options: + + $ bin/kafka-acls.sh --bootstrap-server localhost:9092 --remove --allow-principal User:Jane --producer --topic Test- --resource-pattern-type Prefixed + + * **List Acls** +We can list acls for any resource by specifying the --list option with the resource. To list all acls on the literal resource pattern Test-topic, we can execute the CLI with following options: + + $ bin/kafka-acls.sh --bootstrap-server localhost:9092 --list --topic Test-topic + +However, this will only return the acls that have been added to this exact resource pattern. Other acls can exist that affect access to the topic, e.g. any acls on the topic wildcard '*', or any acls on prefixed resource patterns. Acls on the wildcard resource pattern can be queried explicitly: + + $ bin/kafka-acls.sh --bootstrap-server localhost:9092 --list --topic '*' + +However, it is not necessarily possible to explicitly query for acls on prefixed resource patterns that match Test-topic as the name of such patterns may not be known. We can list _all_ acls affecting Test-topic by using '--resource-pattern-type match', e.g. + + > bin/kafka-acls.sh --bootstrap-server localhost:9092 --list --topic Test-topic --resource-pattern-type match + +This will list acls on all matching literal, wildcard and prefixed resource patterns. + * **Adding or removing a principal as producer or consumer** +The most common use case for acl management are adding/removing a principal as producer or consumer so we added convenience options to handle these cases. In order to add User:Bob as a producer of Test-topic we can execute the following command: + + $ bin/kafka-acls.sh --bootstrap-server localhost:9092 --add --allow-principal User:Bob --producer --topic Test-topic + +Similarly to add Alice as a consumer of Test-topic with consumer group Group-1 we just have to pass --consumer option: + + $ bin/kafka-acls.sh --bootstrap-server localhost:9092 --add --allow-principal User:Bob --consumer --topic Test-topic --group Group-1 + +Note that for consumer option we must also specify the consumer group. In order to remove a principal from producer or consumer role we just need to pass --remove option. + + + +## Authorization Primitives + +Protocol calls are usually performing some operations on certain resources in Kafka. It is required to know the operations and resources to set up effective protection. In this section we'll list these operations and resources, then list the combination of these with the protocols to see the valid scenarios. + +### Operations in Kafka + +There are a few operation primitives that can be used to build up privileges. These can be matched up with certain resources to allow specific protocol calls for a given user. These are: + + * Read + * Write + * Create + * Delete + * Alter + * Describe + * ClusterAction + * DescribeConfigs + * AlterConfigs + * IdempotentWrite + * CreateTokens + * DescribeTokens + * All + + + +### Resources in Kafka + +The operations above can be applied on certain resources which are described below. + + * **Topic:** this simply represents a Topic. All protocol calls that are acting on topics (such as reading, writing them) require the corresponding privilege to be added. If there is an authorization error with a topic resource, then a TOPIC_AUTHORIZATION_FAILED (error code: 29) will be returned. + * **Group:** this represents the consumer groups in the brokers. All protocol calls that are working with consumer groups, like joining a group must have privileges with the group in subject. If the privilege is not given then a GROUP_AUTHORIZATION_FAILED (error code: 30) will be returned in the protocol response. + * **Cluster:** this resource represents the cluster. Operations that are affecting the whole cluster, like controlled shutdown are protected by privileges on the Cluster resource. If there is an authorization problem on a cluster resource, then a CLUSTER_AUTHORIZATION_FAILED (error code: 31) will be returned. + * **TransactionalId:** this resource represents actions related to transactions, such as committing. If any error occurs, then a TRANSACTIONAL_ID_AUTHORIZATION_FAILED (error code: 53) will be returned by brokers. + * **DelegationToken:** this represents the delegation tokens in the cluster. Actions, such as describing delegation tokens could be protected by a privilege on the DelegationToken resource. Since these objects have a little special behavior in Kafka it is recommended to read [KIP-48](https://cwiki.apache.org/confluence/x/tfmnAw) and the related upstream documentation at Authentication using Delegation Tokens. + * **User:** CreateToken and DescribeToken operations can be granted to User resources to allow creating and describing tokens for other users. More info can be found in [KIP-373](https://cwiki.apache.org/confluence/x/cwOQBQ). + + + +### Operations and Resources on Protocols + +In the below table we'll list the valid operations on resources that are executed by the Kafka API protocols. + +Protocol (API key) | Operation | Resource | Note +---|---|---|--- +PRODUCE (0) | Write | TransactionalId | A transactional producer which has its transactional.id set requires this privilege. +PRODUCE (0) | IdempotentWrite | Cluster | An idempotent produce action requires this privilege. +PRODUCE (0) | Write | Topic | This applies to a normal produce action. +FETCH (1) | ClusterAction | Cluster | A follower must have ClusterAction on the Cluster resource in order to fetch partition data. +FETCH (1) | Read | Topic | Regular Kafka consumers need READ permission on each partition they are fetching. +LIST_OFFSETS (2) | Describe | Topic | +METADATA (3) | Describe | Topic | +METADATA (3) | Create | Cluster | If topic auto-creation is enabled, then the broker-side API will check for the existence of a Cluster level privilege. If it's found then it'll allow creating the topic, otherwise it'll iterate through the Topic level privileges (see the next one). +METADATA (3) | Create | Topic | This authorizes auto topic creation if enabled but the given user doesn't have a cluster level permission (above). +LEADER_AND_ISR (4) | ClusterAction | Cluster | +STOP_REPLICA (5) | ClusterAction | Cluster | +UPDATE_METADATA (6) | ClusterAction | Cluster | +CONTROLLED_SHUTDOWN (7) | ClusterAction | Cluster | +OFFSET_COMMIT (8) | Read | Group | An offset can only be committed if it's authorized to the given group and the topic too (see below). Group access is checked first, then Topic access. +OFFSET_COMMIT (8) | Read | Topic | Since offset commit is part of the consuming process, it needs privileges for the read action. +OFFSET_FETCH (9) | Describe | Group | Similarly to OFFSET_COMMIT, the application must have privileges on group and topic level too to be able to fetch. However in this case it requires describe access instead of read. Group access is checked first, then Topic access. +OFFSET_FETCH (9) | Describe | Topic | +FIND_COORDINATOR (10) | Describe | Group | The FIND_COORDINATOR request can be of "Group" type in which case it is looking for consumergroup coordinators. This privilege would represent the Group mode. +FIND_COORDINATOR (10) | Describe | TransactionalId | This applies only on transactional producers and checked when a producer tries to find the transaction coordinator. +JOIN_GROUP (11) | Read | Group | +HEARTBEAT (12) | Read | Group | +LEAVE_GROUP (13) | Read | Group | +SYNC_GROUP (14) | Read | Group | +DESCRIBE_GROUPS (15) | Describe | Group | +LIST_GROUPS (16) | Describe | Cluster | When the broker checks to authorize a list_groups request it first checks for this cluster level authorization. If none found then it proceeds to check the groups individually. This operation doesn't return CLUSTER_AUTHORIZATION_FAILED. +LIST_GROUPS (16) | Describe | Group | If none of the groups are authorized, then just an empty response will be sent back instead of an error. This operation doesn't return CLUSTER_AUTHORIZATION_FAILED. This is applicable from the 2.1 release. +SASL_HANDSHAKE (17) | | | The SASL handshake is part of the authentication process and therefore it's not possible to apply any kind of authorization here. +API_VERSIONS (18) | | | The API_VERSIONS request is part of the Kafka protocol handshake and happens on connection and before any authentication. Therefore it's not possible to control this with authorization. +CREATE_TOPICS (19) | Create | Cluster | If there is no cluster level authorization then it won't return CLUSTER_AUTHORIZATION_FAILED but fall back to use topic level, which is just below. That'll throw error if there is a problem. +CREATE_TOPICS (19) | Create | Topic | This is applicable from the 2.0 release. +DELETE_TOPICS (20) | Delete | Topic | +DELETE_RECORDS (21) | Delete | Topic | +INIT_PRODUCER_ID (22) | Write | TransactionalId | +INIT_PRODUCER_ID (22) | IdempotentWrite | Cluster | +OFFSET_FOR_LEADER_EPOCH (23) | ClusterAction | Cluster | If there is no cluster level privilege for this operation, then it'll check for topic level one. +OFFSET_FOR_LEADER_EPOCH (23) | Describe | Topic | This is applicable from the 2.1 release. +ADD_PARTITIONS_TO_TXN (24) | Write | TransactionalId | This API is only applicable to transactional requests. It first checks for the Write action on the TransactionalId resource, then it checks the Topic in subject (below). +ADD_PARTITIONS_TO_TXN (24) | Write | Topic | +ADD_OFFSETS_TO_TXN (25) | Write | TransactionalId | Similarly to ADD_PARTITIONS_TO_TXN this is only applicable to transactional requests. It first checks for Write action on the TransactionalId resource, then it checks whether it can Read on the given group (below). +ADD_OFFSETS_TO_TXN (25) | Read | Group | +END_TXN (26) | Write | TransactionalId | +WRITE_TXN_MARKERS (27) | Alter | Cluster | +WRITE_TXN_MARKERS (27) | ClusterAction | Cluster | +TXN_OFFSET_COMMIT (28) | Write | TransactionalId | +TXN_OFFSET_COMMIT (28) | Read | Group | +TXN_OFFSET_COMMIT (28) | Read | Topic | +DESCRIBE_ACLS (29) | Describe | Cluster | +CREATE_ACLS (30) | Alter | Cluster | +DELETE_ACLS (31) | Alter | Cluster | +DESCRIBE_CONFIGS (32) | DescribeConfigs | Cluster | If broker configs are requested, then the broker will check cluster level privileges. +DESCRIBE_CONFIGS (32) | DescribeConfigs | Topic | If topic configs are requested, then the broker will check topic level privileges. +ALTER_CONFIGS (33) | AlterConfigs | Cluster | If broker configs are altered, then the broker will check cluster level privileges. +ALTER_CONFIGS (33) | AlterConfigs | Topic | If topic configs are altered, then the broker will check topic level privileges. +ALTER_REPLICA_LOG_DIRS (34) | Alter | Cluster | +DESCRIBE_LOG_DIRS (35) | Describe | Cluster | An empty response will be returned on authorization failure. +SASL_AUTHENTICATE (36) | | | SASL_AUTHENTICATE is part of the authentication process and therefore it's not possible to apply any kind of authorization here. +CREATE_PARTITIONS (37) | Alter | Topic | +CREATE_DELEGATION_TOKEN (38) | | | Creating delegation tokens has special rules, for this please see the Authentication using Delegation Tokens section. +CREATE_DELEGATION_TOKEN (38) | CreateTokens | User | Allows creating delegation tokens for the User resource. +RENEW_DELEGATION_TOKEN (39) | | | Renewing delegation tokens has special rules, for this please see the Authentication using Delegation Tokens section. +EXPIRE_DELEGATION_TOKEN (40) | | | Expiring delegation tokens has special rules, for this please see the Authentication using Delegation Tokens section. +DESCRIBE_DELEGATION_TOKEN (41) | Describe | DelegationToken | Describing delegation tokens has special rules, for this please see the Authentication using Delegation Tokens section. +DESCRIBE_DELEGATION_TOKEN (41) | DescribeTokens | User | Allows describing delegation tokens of the User resource. +DELETE_GROUPS (42) | Delete | Group | +ELECT_PREFERRED_LEADERS (43) | ClusterAction | Cluster | +INCREMENTAL_ALTER_CONFIGS (44) | AlterConfigs | Cluster | If broker configs are altered, then the broker will check cluster level privileges. +INCREMENTAL_ALTER_CONFIGS (44) | AlterConfigs | Topic | If topic configs are altered, then the broker will check topic level privileges. +ALTER_PARTITION_REASSIGNMENTS (45) | Alter | Cluster | +LIST_PARTITION_REASSIGNMENTS (46) | Describe | Cluster | +OFFSET_DELETE (47) | Delete | Group | +OFFSET_DELETE (47) | Read | Topic | +DESCRIBE_CLIENT_QUOTAS (48) | DescribeConfigs | Cluster | +ALTER_CLIENT_QUOTAS (49) | AlterConfigs | Cluster | +DESCRIBE_USER_SCRAM_CREDENTIALS (50) | Describe | Cluster | +ALTER_USER_SCRAM_CREDENTIALS (51) | Alter | Cluster | +VOTE (52) | ClusterAction | Cluster | +BEGIN_QUORUM_EPOCH (53) | ClusterAction | Cluster | +END_QUORUM_EPOCH (54) | ClusterAction | Cluster | +DESCRIBE_QUORUM (55) | Describe | Cluster | +ALTER_PARTITION (56) | ClusterAction | Cluster | +UPDATE_FEATURES (57) | Alter | Cluster | +ENVELOPE (58) | ClusterAction | Cluster | +FETCH_SNAPSHOT (59) | ClusterAction | Cluster | +DESCRIBE_CLUSTER (60) | Describe | Cluster | +DESCRIBE_PRODUCERS (61) | Read | Topic | +BROKER_REGISTRATION (62) | ClusterAction | Cluster | +BROKER_HEARTBEAT (63) | ClusterAction | Cluster | +UNREGISTER_BROKER (64) | Alter | Cluster | +DESCRIBE_TRANSACTIONS (65) | Describe | TransactionalId | +LIST_TRANSACTIONS (66) | Describe | TransactionalId | +ALLOCATE_PRODUCER_IDS (67) | ClusterAction | Cluster | +CONSUMER_GROUP_HEARTBEAT (68) | Read | Group | +CONSUMER_GROUP_DESCRIBE (69) | Read | Group | +CONTROLLER_REGISTRATION (70) | ClusterAction | Cluster | +GET_TELEMETRY_SUBSCRIPTIONS (71) | | | No authorization check is performed for this request. +PUSH_TELEMETRY (72) | | | No authorization check is performed for this request. +ASSIGN_REPLICAS_TO_DIRS (73) | ClusterAction | Cluster | +LIST_CONFIG_RESOURCES (74) | DescribeConfigs | Cluster | +DESCRIBE_TOPIC_PARTITIONS (75) | Describe | Topic | +SHARE_GROUP_HEARTBEAT (76) | Read | Group | +SHARE_GROUP_DESCRIBE (77) | Describe | Group | +SHARE_FETCH (78) | Read | Group | +SHARE_FETCH (78) | Read | Topic | +SHARE_ACKNOWLEDGE (79) | Read | Group | +SHARE_ACKNOWLEDGE (79) | Read | Topic | +INITIALIZE_SHARE_GROUP_STATE (83) | ClusterAction | Cluster | +READ_SHARE_GROUP_STATE (84) | ClusterAction | Cluster | +WRITE_SHARE_GROUP_STATE (85) | ClusterAction | Cluster | +DELETE_SHARE_GROUP_STATE (86) | ClusterAction | Cluster | +READ_SHARE_GROUP_STATE_SUMMARY (87) | ClusterAction | Cluster | +STREAMS_GROUP_HEARTBEAT (88) | Read | Group | +STREAMS_GROUP_HEARTBEAT (88) | Describe | Topic | Required for all source topics and internal topics used in the topology of the group. +STREAMS_GROUP_HEARTBEAT (88) | Create | Topic | Required for all internal topics, for the broker to automatically create internal topics. Not required if internal topics exist. +STREAMS_GROUP_DESCRIBE (89) | Describe | Group | +STREAMS_GROUP_DESCRIBE (89) | Describe | Topic | Required for all source topics and internal topics used in the topology of the group. +DESCRIBE_SHARE_GROUP_OFFSETS (90) | Describe | Group | To describe the offset information for a partition in a share group, the application must have privileges on the group and the topic. Group access is checked first, then topic access. +DESCRIBE_SHARE_GROUP_OFFSETS (90) | Describe | Topic | +ALTER_SHARE_GROUP_OFFSETS (91) | Read | Group | To alter the offset information for a partition in a share group, the application must have privileges on the group and the topic. Group access is checked first, then topic access. +ALTER_SHARE_GROUP_OFFSETS (91) | Read | Topic | +DELETE_SHARE_GROUP_OFFSETS (92) | Delete | Group | To delete the offset information for a topic in a share group, the application must have privileges on the group and the topic. Group access is checked first, then topic access. +DELETE_SHARE_GROUP_OFFSETS (92) | Read | Topic | + \ No newline at end of file diff --git a/content/en/41/security/encryption-and-authentication-using-ssl.md b/content/en/41/security/encryption-and-authentication-using-ssl.md new file mode 100644 index 000000000..63f53567f --- /dev/null +++ b/content/en/41/security/encryption-and-authentication-using-ssl.md @@ -0,0 +1,288 @@ +--- +title: Encryption and Authentication using SSL +description: Encryption and Authentication using SSL +weight: 3 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Encryption and Authentication using SSL + +Apache Kafka allows clients to use SSL for encryption of traffic as well as authentication. By default, SSL is disabled but can be turned on if needed. The following paragraphs explain in detail how to set up your own PKI infrastructure, use it to create certificates and configure Kafka to use these. + + 1. #### Generate SSL key and certificate for each Kafka broker + +The first step of deploying one or more brokers with SSL support is to generate a public/private keypair for every server. Since Kafka expects all keys and certificates to be stored in keystores we will use Java's keytool command for this task. The tool supports two different keystore formats, the Java specific jks format which has been deprecated by now, as well as PKCS12. PKCS12 is the default format as of Java version 9, to ensure this format is being used regardless of the Java version in use all following commands explicitly specify the PKCS12 format. + + $ keytool -keystore {keystorefile} -alias localhost -validity {validity} -genkey -keyalg RSA -storetype pkcs12 + +You need to specify two parameters in the above command: + 1. keystorefile: the keystore file that stores the keys (and later the certificate) for this broker. The keystore file contains the private and public keys of this broker, therefore it needs to be kept safe. Ideally this step is run on the Kafka broker that the key will be used on, as this key should never be transmitted/leave the server that it is intended for. + 2. validity: the valid time of the key in days. Please note that this differs from the validity period for the certificate, which will be determined in Signing the certificate. You can use the same key to request multiple certificates: if your key has a validity of 10 years, but your CA will only sign certificates that are valid for one year, you can use the same key with 10 certificates over time. + +To obtain a certificate that can be used with the private key that was just created a certificate signing request needs to be created. This signing request, when signed by a trusted CA results in the actual certificate which can then be installed in the keystore and used for authentication purposes. +To generate certificate signing requests run the following command for all server keystores created so far. + + $ keytool -keystore server.keystore.jks -alias localhost -validity {validity} -genkey -keyalg RSA -destkeystoretype pkcs12 -ext SAN=DNS:{FQDN},IP:{IPADDRESS1} + +This command assumes that you want to add hostname information to the certificate, if this is not the case, you can omit the extension parameter `-ext SAN=DNS:{FQDN},IP:{IPADDRESS1}`. Please see below for more information on this. + +### Host Name Verification + +Host name verification, when enabled, is the process of checking attributes from the certificate that is presented by the server you are connecting to against the actual hostname or ip address of that server to ensure that you are indeed connecting to the correct server. +The main reason for this check is to prevent man-in-the-middle attacks. For Kafka, this check has been disabled by default for a long time, but as of Kafka 2.0.0 host name verification of servers is enabled by default for client connections as well as inter-broker connections. +Server host name verification may be disabled by setting `ssl.endpoint.identification.algorithm` to an empty string. +For dynamically configured broker listeners, hostname verification may be disabled using `kafka-configs.sh`: + + + $ bin/kafka-configs.sh --bootstrap-server localhost:9093 --entity-type brokers --entity-name 0 --alter --add-config "listener.name.internal.ssl.endpoint.identification.algorithm=" + +**Note:** + +Normally there is no good reason to disable hostname verification apart from being the quickest way to "just get it to work" followed by the promise to "fix it later when there is more time"! +Getting hostname verification right is not that hard when done at the right time, but gets much harder once the cluster is up and running - do yourself a favor and do it now! + +If host name verification is enabled, clients will verify the server's fully qualified domain name (FQDN) or ip address against one of the following two fields: + 1. Common Name (CN) + 2. [Subject Alternative Name (SAN)](https://tools.ietf.org/html/rfc5280#section-4.2.1.6) + +While Kafka checks both fields, usage of the common name field for hostname verification has been [deprecated](https://tools.ietf.org/html/rfc2818#section-3.1) since 2000 and should be avoided if possible. In addition the SAN field is much more flexible, allowing for multiple DNS and IP entries to be declared in a certificate. +Another advantage is that if the SAN field is used for hostname verification the common name can be set to a more meaningful value for authorization purposes. Since we need the SAN field to be contained in the signed certificate, it will be specified when generating the signing request. It can also be specified when generating the keypair, but this will not automatically be copied into the signing request. +To add a SAN field append the following argument ` -ext SAN=DNS:{FQDN},IP:{IPADDRESS}` to the keytool command: + + $ keytool -keystore server.keystore.jks -alias localhost -validity {validity} -genkey -keyalg RSA -destkeystoretype pkcs12 -ext SAN=DNS:{FQDN},IP:{IPADDRESS1} + + 2. #### Creating your own CA + +After this step each machine in the cluster has a public/private key pair which can already be used to encrypt traffic and a certificate signing request, which is the basis for creating a certificate. To add authentication capabilities this signing request needs to be signed by a trusted authority, which will be created in this step. + +A certificate authority (CA) is responsible for signing certificates. CAs works likes a government that issues passports - the government stamps (signs) each passport so that the passport becomes difficult to forge. Other governments verify the stamps to ensure the passport is authentic. Similarly, the CA signs the certificates, and the cryptography guarantees that a signed certificate is computationally difficult to forge. Thus, as long as the CA is a genuine and trusted authority, the clients have a strong assurance that they are connecting to the authentic machines. + +For this guide we will be our own Certificate Authority. When setting up a production cluster in a corporate environment these certificates would usually be signed by a corporate CA that is trusted throughout the company. Please see Common Pitfalls in Production for some things to consider for this case. + +Due to a [bug](https://www.openssl.org/docs/man1.1.1/man1/x509.html#BUGS) in OpenSSL, the x509 module will not copy requested extension fields from CSRs into the final certificate. Since we want the SAN extension to be present in our certificate to enable hostname verification, we'll use the _ca_ module instead. This requires some additional configuration to be in place before we generate our CA keypair. +Save the following listing into a file called openssl-ca.cnf and adjust the values for validity and common attributes as necessary. + + HOME = . + RANDFILE = $ENV::HOME/.rnd + + #################################################################### + [ ca ] + default_ca = CA_default # The default ca section + + [ CA_default ] + + base_dir = . + certificate = $base_dir/cacert.pem # The CA certificate + private_key = $base_dir/cakey.pem # The CA private key + new_certs_dir = $base_dir # Location for new certs after signing + database = $base_dir/index.txt # Database index file + serial = $base_dir/serial.txt # The current serial number + + default_days = 1000 # How long to certify for + default_crl_days = 30 # How long before next CRL + default_md = sha256 # Use public key default MD + preserve = no # Keep passed DN ordering + + x509_extensions = ca_extensions # The extensions to add to the cert + + email_in_dn = no # Don't concat the email in the DN + copy_extensions = copy # Required to copy SANs from CSR to cert + + #################################################################### + [ req ] + default_bits = 4096 + default_keyfile = cakey.pem + distinguished_name = ca_distinguished_name + x509_extensions = ca_extensions + string_mask = utf8only + + #################################################################### + [ ca_distinguished_name ] + countryName = Country Name (2 letter code) + countryName_default = DE + + stateOrProvinceName = State or Province Name (full name) + stateOrProvinceName_default = Test Province + + localityName = Locality Name (eg, city) + localityName_default = Test Town + + organizationName = Organization Name (eg, company) + organizationName_default = Test Company + + organizationalUnitName = Organizational Unit (eg, division) + organizationalUnitName_default = Test Unit + + commonName = Common Name (e.g. server FQDN or YOUR name) + commonName_default = Test Name + + emailAddress = Email Address + emailAddress_default = test@test.com + + #################################################################### + [ ca_extensions ] + + subjectKeyIdentifier = hash + authorityKeyIdentifier = keyid:always, issuer + basicConstraints = critical, CA:true + keyUsage = keyCertSign, cRLSign + + #################################################################### + [ signing_policy ] + countryName = optional + stateOrProvinceName = optional + localityName = optional + organizationName = optional + organizationalUnitName = optional + commonName = supplied + emailAddress = optional + + #################################################################### + [ signing_req ] + subjectKeyIdentifier = hash + authorityKeyIdentifier = keyid,issuer + basicConstraints = CA:FALSE + keyUsage = digitalSignature, keyEncipherment + +Then create a database and serial number file, these will be used to keep track of which certificates were signed with this CA. Both of these are simply text files that reside in the same directory as your CA keys. + + $ echo 01 > serial.txt + $ touch index.txt + +With these steps done you are now ready to generate your CA that will be used to sign certificates later. + + $ openssl req -x509 -config openssl-ca.cnf -newkey rsa:4096 -sha256 -nodes -out cacert.pem -outform PEM + +The CA is simply a public/private key pair and certificate that is signed by itself, and is only intended to sign other certificates. +This keypair should be kept very safe, if someone gains access to it, they can create and sign certificates that will be trusted by your infrastructure, which means they will be able to impersonate anybody when connecting to any service that trusts this CA. +The next step is to add the generated CA to the **clients' truststore** so that the clients can trust this CA: + + $ keytool -keystore client.truststore.jks -alias CARoot -import -file ca-cert + +**Note:** If you configure the Kafka brokers to require client authentication by setting ssl.client.auth to be "requested" or "required" in the Kafka brokers config then you must provide a truststore for the Kafka brokers as well and it should have all the CA certificates that clients' keys were signed by. + + $ keytool -keystore server.truststore.jks -alias CARoot -import -file ca-cert + +In contrast to the keystore in step 1 that stores each machine's own identity, the truststore of a client stores all the certificates that the client should trust. Importing a certificate into one's truststore also means trusting all certificates that are signed by that certificate. As the analogy above, trusting the government (CA) also means trusting all passports (certificates) that it has issued. This attribute is called the chain of trust, and it is particularly useful when deploying SSL on a large Kafka cluster. You can sign all certificates in the cluster with a single CA, and have all machines share the same truststore that trusts the CA. That way all machines can authenticate all other machines. + 3. #### Signing the certificate + +Then sign it with the CA: + + $ openssl ca -config openssl-ca.cnf -policy signing_policy -extensions signing_req -out {server certificate} -infiles {certificate signing request} + +Finally, you need to import both the certificate of the CA and the signed certificate into the keystore: + + $ keytool -keystore {keystore} -alias CARoot -import -file {CA certificate} + $ keytool -keystore {keystore} -alias localhost -import -file cert-signed + +The definitions of the parameters are the following: + 1. keystore: the location of the keystore + 2. CA certificate: the certificate of the CA + 3. certificate signing request: the csr created with the server key + 4. server certificate: the file to write the signed certificate of the server to +This will leave you with one truststore called _truststore.jks_ \- this can be the same for all clients and brokers and does not contain any sensitive information, so there is no need to secure this. +Additionally you will have one _server.keystore.jks_ file per node which contains that nodes keys, certificate and your CAs certificate, please refer to Configuring Kafka Brokers and Configuring Kafka Clients for information on how to use these files. + +For some tooling assistance on this topic, please check out the [easyRSA](https://github.com/OpenVPN/easy-rsa) project which has extensive scripting in place to help with these steps. + +### SSL key and certificates in PEM format + +From 2.7.0 onwards, SSL key and trust stores can be configured for Kafka brokers and clients directly in the configuration in PEM format. This avoids the need to store separate files on the file system and benefits from password protection features of Kafka configuration. PEM may also be used as the store type for file-based key and trust stores in addition to JKS and PKCS12. To configure PEM key store directly in the broker or client configuration, private key in PEM format should be provided in `ssl.keystore.key` and the certificate chain in PEM format should be provided in `ssl.keystore.certificate.chain`. To configure trust store, trust certificates, e.g. public certificate of CA, should be provided in `ssl.truststore.certificates`. Since PEM is typically stored as multi-line base-64 strings, the configuration value can be included in Kafka configuration as multi-line strings with lines terminating in backslash ('\') for line continuation. + +Store password configs `ssl.keystore.password` and `ssl.truststore.password` are not used for PEM. If private key is encrypted using a password, the key password must be provided in `ssl.key.password`. Private keys may be provided in unencrypted form without a password. In production deployments, configs should be encrypted or externalized using password protection feature in Kafka in this case. Note that the default SSL engine factory has limited capabilities for decryption of encrypted private keys when external tools like OpenSSL are used for encryption. Third party libraries like BouncyCastle may be integrated with a custom `SslEngineFactory` to support a wider range of encrypted private keys. + + 4. #### Common Pitfalls in Production + +The above paragraphs show the process to create your own CA and use it to sign certificates for your cluster. While very useful for sandbox, dev, test, and similar systems, this is usually not the correct process to create certificates for a production cluster in a corporate environment. Enterprises will normally operate their own CA and users can send in CSRs to be signed with this CA, which has the benefit of users not being responsible to keep the CA secure as well as a central authority that everybody can trust. However it also takes away a lot of control over the process of signing certificates from the user. Quite often the persons operating corporate CAs will apply tight restrictions on certificates that can cause issues when trying to use these certificates with Kafka. + 1. **[Extended Key Usage](https://tools.ietf.org/html/rfc5280#section-4.2.1.12)** +Certificates may contain an extension field that controls the purpose for which the certificate can be used. If this field is empty, there are no restrictions on the usage, but if any usage is specified in here, valid SSL implementations have to enforce these usages. +Relevant usages for Kafka are: + * Client authentication + * Server authentication +Kafka brokers need both these usages to be allowed, as for intra-cluster communication every broker will behave as both the client and the server towards other brokers. It is not uncommon for corporate CAs to have a signing profile for webservers and use this for Kafka as well, which will only contain the _serverAuth_ usage value and cause the SSL handshake to fail. + 2. **Intermediate Certificates** +Corporate Root CAs are often kept offline for security reasons. To enable day-to-day usage, so called intermediate CAs are created, which are then used to sign the final certificates. When importing a certificate into the keystore that was signed by an intermediate CA it is necessary to provide the entire chain of trust up to the root CA. This can be done by simply _cat_ ing the certificate files into one combined certificate file and then importing this with keytool. + 3. **Failure to copy extension fields** +CA operators are often hesitant to copy and requested extension fields from CSRs and prefer to specify these themselves as this makes it harder for a malicious party to obtain certificates with potentially misleading or fraudulent values. It is advisable to double check signed certificates, whether these contain all requested SAN fields to enable proper hostname verification. The following command can be used to print certificate details to the console, which should be compared with what was originally requested: + + $ openssl x509 -in certificate.crt -text -noout + + 5. #### Configuring Kafka Brokers + +If SSL is not enabled for inter-broker communication (see below for how to enable it), both PLAINTEXT and SSL ports will be necessary. + + listeners=PLAINTEXT://host.name:port,SSL://host.name:port + +Following SSL configs are needed on the broker side + + ssl.keystore.location=/var/private/ssl/server.keystore.jks + ssl.keystore.password=test1234 + ssl.key.password=test1234 + ssl.truststore.location=/var/private/ssl/server.truststore.jks + ssl.truststore.password=test1234 + +Note: ssl.truststore.password is technically optional but highly recommended. If a password is not set access to the truststore is still available, but integrity checking is disabled. Optional settings that are worth considering: + 1. ssl.client.auth=none ("required" => client authentication is required, "requested" => client authentication is requested and client without certs can still connect. The usage of "requested" is discouraged as it provides a false sense of security and misconfigured clients will still connect successfully.) + 2. ssl.cipher.suites (Optional). A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. (Default is an empty list) + 3. ssl.enabled.protocols=TLSv1.2,TLSv1.1,TLSv1 (list out the SSL protocols that you are going to accept from clients. Do note that SSL is deprecated in favor of TLS and using SSL in production is not recommended) + 4. ssl.keystore.type=JKS + 5. ssl.truststore.type=JKS + 6. ssl.secure.random.implementation=SHA1PRNG +If you want to enable SSL for inter-broker communication, add the following to the server.properties file (it defaults to PLAINTEXT) + + security.inter.broker.protocol=SSL + +Due to import regulations in some countries, the Oracle implementation limits the strength of cryptographic algorithms available by default. If stronger algorithms are needed (for example, AES with 256-bit keys), the [JCE Unlimited Strength Jurisdiction Policy Files](https://www.oracle.com/technetwork/java/javase/downloads/index.html) must be obtained and installed in the JDK/JRE. See the [JCA Providers Documentation](https://docs.oracle.com/javase/8/docs/technotes/guides/security/SunProviders.html) for more information. + +The JRE/JDK will have a default pseudo-random number generator (PRNG) that is used for cryptography operations, so it is not required to configure the implementation used with the `ssl.secure.random.implementation`. However, there are performance issues with some implementations (notably, the default chosen on Linux systems, `NativePRNG`, utilizes a global lock). In cases where performance of SSL connections becomes an issue, consider explicitly setting the implementation to be used. The `SHA1PRNG` implementation is non-blocking, and has shown very good performance characteristics under heavy load (50 MB/sec of produced messages, plus replication traffic, per-broker). + +Once you start the broker you should be able to see in the server.log + + with addresses: PLAINTEXT -> EndPoint(192.168.64.1,9092,PLAINTEXT),SSL -> EndPoint(192.168.64.1,9093,SSL) + +To check quickly if the server keystore and truststore are setup properly you can run the following command + + $ openssl s_client -debug -connect localhost:9093 -tls1 + +(Note: TLSv1 should be listed under ssl.enabled.protocols) +In the output of this command you should see server's certificate: + + -----BEGIN CERTIFICATE----- + {variable sized random bytes} + -----END CERTIFICATE----- + subject=/C=US/ST=CA/L=Santa Clara/O=org/OU=org/CN=Sriharsha Chintalapani + issuer=/C=US/ST=CA/L=Santa Clara/O=org/OU=org/CN=kafka/emailAddress=test@test.com + +If the certificate does not show up or if there are any other error messages then your keystore is not setup properly. + 6. #### Configuring Kafka Clients + +SSL is supported only for the new Kafka Producer and Consumer, the older API is not supported. The configs for SSL will be the same for both producer and consumer. +If client authentication is not required in the broker, then the following is a minimal configuration example: + + security.protocol=SSL + ssl.truststore.location=/var/private/ssl/client.truststore.jks + ssl.truststore.password=test1234 + +Note: ssl.truststore.password is technically optional but highly recommended. If a password is not set access to the truststore is still available, but integrity checking is disabled. If client authentication is required, then a keystore must be created like in step 1 and the following must also be configured: + + ssl.keystore.location=/var/private/ssl/client.keystore.jks + ssl.keystore.password=test1234 + ssl.key.password=test1234 + +Other configuration settings that may also be needed depending on our requirements and the broker configuration: + 1. ssl.provider (Optional). The name of the security provider used for SSL connections. Default value is the default security provider of the JVM. + 2. ssl.cipher.suites (Optional). A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. + 3. ssl.enabled.protocols=TLSv1.2,TLSv1.1,TLSv1. It should list at least one of the protocols configured on the broker side + 4. ssl.truststore.type=JKS + 5. ssl.keystore.type=JKS + +Examples using console-producer and console-consumer: + + $ bin/kafka-console-producer.sh --bootstrap-server localhost:9093 --topic test --producer.config client-ssl.properties + $ bin/kafka-console-consumer.sh --bootstrap-server localhost:9093 --topic test --consumer.config client-ssl.properties + + + diff --git a/content/en/41/security/incorporating-security-features-in-a-running-cluster.md b/content/en/41/security/incorporating-security-features-in-a-running-cluster.md new file mode 100644 index 000000000..d8fdd4d13 --- /dev/null +++ b/content/en/41/security/incorporating-security-features-in-a-running-cluster.md @@ -0,0 +1,74 @@ +--- +title: Incorporating Security Features in a Running Cluster +description: Incorporating Security Features in a Running Cluster +weight: 6 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Incorporating Security Features in a Running Cluster + +You can secure a running cluster via one or more of the supported protocols discussed previously. This is done in phases: + + * Incrementally bounce the cluster nodes to open additional secured port(s). + * Restart clients using the secured rather than PLAINTEXT port (assuming you are securing the client-broker connection). + * Incrementally bounce the cluster again to enable broker-to-broker security (if this is required) + * A final incremental bounce to close the PLAINTEXT port. + + + +The specific steps for configuring SSL and SASL are described in sections 7.3 and 7.4. Follow these steps to enable security for your desired protocol(s). + +The security implementation lets you configure different protocols for both broker-client and broker-broker communication. These must be enabled in separate bounces. A PLAINTEXT port must be left open throughout so brokers and/or clients can continue to communicate. + +When performing an incremental bounce stop the brokers cleanly via a SIGTERM. It's also good practice to wait for restarted replicas to return to the ISR list before moving onto the next node. + +As an example, say we wish to encrypt both broker-client and broker-broker communication with SSL. In the first incremental bounce, an SSL port is opened on each node: + + + listeners=PLAINTEXT://broker1:9091,SSL://broker1:9092 + +We then restart the clients, changing their config to point at the newly opened, secured port: + + + bootstrap.servers = [broker1:9092,...] + security.protocol = SSL + ...etc + +In the second incremental server bounce we instruct Kafka to use SSL as the broker-broker protocol (which will use the same SSL port): + + + listeners=PLAINTEXT://broker1:9091,SSL://broker1:9092 + security.inter.broker.protocol=SSL + +In the final bounce we secure the cluster by closing the PLAINTEXT port: + + + listeners=SSL://broker1:9092 + security.inter.broker.protocol=SSL + +Alternatively we might choose to open multiple ports so that different protocols can be used for broker-broker and broker-client communication. Say we wished to use SSL encryption throughout (i.e. for broker-broker and broker-client communication) but we'd like to add SASL authentication to the broker-client connection also. We would achieve this by opening two additional ports during the first bounce: + + + listeners=PLAINTEXT://broker1:9091,SSL://broker1:9092,SASL_SSL://broker1:9093 + +We would then restart the clients, changing their config to point at the newly opened, SASL & SSL secured port: + + + bootstrap.servers = [broker1:9093,...] + security.protocol = SASL_SSL + ...etc + +The second server bounce would switch the cluster to use encrypted broker-broker communication via the SSL port we previously opened on port 9092: + + + listeners=PLAINTEXT://broker1:9091,SSL://broker1:9092,SASL_SSL://broker1:9093 + security.inter.broker.protocol=SSL + +The final bounce secures the cluster by closing the PLAINTEXT port. + + + listeners=SSL://broker1:9092,SASL_SSL://broker1:9093 + security.inter.broker.protocol=SSL diff --git a/content/en/41/security/listener-configuration.md b/content/en/41/security/listener-configuration.md new file mode 100644 index 000000000..84c325e67 --- /dev/null +++ b/content/en/41/security/listener-configuration.md @@ -0,0 +1,80 @@ +--- +title: Listener Configuration +description: Listener Configuration +weight: 2 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Listener Configuration + +In order to secure a Kafka cluster, it is necessary to secure the channels that are used to communicate with the servers. Each server must define the set of listeners that are used to receive requests from clients as well as other servers. Each listener may be configured to authenticate clients using various mechanisms and to ensure traffic between the server and the client is encrypted. This section provides a primer for the configuration of listeners. + +Kafka servers support listening for connections on multiple ports. This is configured through the `listeners` property in the server configuration, which accepts a comma-separated list of the listeners to enable. At least one listener must be defined on each server. The format of each listener defined in `listeners` is given below: + + + {LISTENER_NAME}://{hostname}:{port} + +The `LISTENER_NAME` is usually a descriptive name which defines the purpose of the listener. For example, many configurations use a separate listener for client traffic, so they might refer to the corresponding listener as `CLIENT` in the configuration: + + + listeners=CLIENT://localhost:9092 + +The security protocol of each listener is defined in a separate configuration: `listener.security.protocol.map`. The value is a comma-separated list of each listener mapped to its security protocol. For example, the follow value configuration specifies that the `CLIENT` listener will use SSL while the `BROKER` listener will use plaintext. + + + listener.security.protocol.map=CLIENT:SSL,BROKER:PLAINTEXT + +Possible options (case-insensitive) for the security protocol are given below: + + 1. PLAINTEXT + 2. SSL + 3. SASL_PLAINTEXT + 4. SASL_SSL + + + +The plaintext protocol provides no security and does not require any additional configuration. In the following sections, this document covers how to configure the remaining protocols. + +If each required listener uses a separate security protocol, it is also possible to use the security protocol name as the listener name in `listeners`. Using the example above, we could skip the definition of the `CLIENT` and `BROKER` listeners using the following definition: + + + listeners=SSL://localhost:9092,PLAINTEXT://localhost:9093 + +However, we recommend users to provide explicit names for the listeners since it makes the intended usage of each listener clearer. + +Among the listeners in this list, it is possible to declare the listener to be used for inter-broker communication by setting the `inter.broker.listener.name` configuration to the name of the listener. The primary purpose of the inter-broker listener is partition replication. If not defined, then the inter-broker listener is determined by the security protocol defined by `security.inter.broker.protocol`, which defaults to `PLAINTEXT`. + +In a KRaft cluster, a broker is any server which has the `broker` role enabled in `process.roles` and a controller is any server which has the `controller` role enabled. Listener configuration depends on the role. The listener defined by `inter.broker.listener.name` is used exclusively for requests between brokers. Controllers, on the other hand, must use separate listener which is defined by the `controller.listener.names` configuration. This cannot be set to the same value as the inter-broker listener. + +Controllers receive requests both from other controllers and from brokers. For this reason, even if a server does not have the `controller` role enabled (i.e. it is just a broker), it must still define the controller listener along with any security properties that are needed to configure it. For example, we might use the following configuration on a standalone broker: + + + process.roles=broker + listeners=BROKER://localhost:9092 + inter.broker.listener.name=BROKER + controller.quorum.bootstrap.servers=localhost:9093 + controller.listener.names=CONTROLLER + listener.security.protocol.map=BROKER:SASL_SSL,CONTROLLER:SASL_SSL + +The controller listener is still configured in this example to use the `SASL_SSL` security protocol, but it is not included in `listeners` since the broker does not expose the controller listener itself. The port that will be used in this case comes from the `controller.quorum.voters` configuration, which defines the complete list of controllers. + +For KRaft servers which have both the broker and controller role enabled, the configuration is similar. The only difference is that the controller listener must be included in `listeners`: + + + process.roles=broker,controller + listeners=BROKER://localhost:9092,CONTROLLER://localhost:9093 + inter.broker.listener.name=BROKER + controller.quorum.bootstrap.servers=localhost:9093 + controller.listener.names=CONTROLLER + listener.security.protocol.map=BROKER:SASL_SSL,CONTROLLER:SASL_SSL + +It is a requirement that the host and port defined in `controller.quorum.bootstrap.servers` is routed to the exposed controller listeners. For example, here the `CONTROLLER` listener is bound to localhost:9093. The connection string defined by `controller.quorum.bootstrap.servers` must then also use localhost:9093, as it does here. + +The controller will accept requests on all listeners defined by `controller.listener.names`. Typically there would be just one controller listener, but it is possible to have more. For example, this provides a way to change the active listener from one port or security protocol to another through a roll of the cluster (one roll to expose the new listener, and one roll to remove the old listener). When multiple controller listeners are defined, the first one in the list will be used for outbound requests. + +It is conventional in Kafka to use a separate listener for clients. This allows the inter-cluster listeners to be isolated at the network level. In the case of the controller listener in KRaft, the listener should be isolated since clients do not work with it anyway. Clients are expected to connect to any other listener configured on a broker. Any requests that are bound for the controller will be forwarded as described below + +In the following section, this document covers how to enable SSL on a listener for encryption as well as authentication. The subsequent section will then cover additional authentication mechanisms using SASL. diff --git a/content/en/41/security/security-overview.md b/content/en/41/security/security-overview.md new file mode 100644 index 000000000..4cd43601d --- /dev/null +++ b/content/en/41/security/security-overview.md @@ -0,0 +1,24 @@ +--- +title: Security Overview +description: Security Overview +weight: 1 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Security Overview + +The following security measures are currently supported: + + 1. Authentication of connections to brokers from clients (producers and consumers), other brokers and tools, using either SSL or SASL. Kafka supports the following SASL mechanisms: + * SASL/GSSAPI (Kerberos) - starting at version 0.9.0.0 + * SASL/PLAIN - starting at version 0.10.0.0 + * SASL/SCRAM-SHA-256 and SASL/SCRAM-SHA-512 - starting at version 0.10.2.0 + * SASL/OAUTHBEARER - starting at version 2.0 + 2. Encryption of data transferred between brokers and clients, between brokers, or between brokers and tools using SSL (Note that there is a performance degradation when SSL is enabled, the magnitude of which depends on the CPU type and the JVM implementation.) + 3. Authorization of read / write operations by clients + 4. Authorization is pluggable and integration with external authorization services is supported + +It's worth noting that security is optional - non-secured clusters are supported, as well as a mix of authenticated, unauthenticated, encrypted and non-encrypted clients. The guides below explain how to configure and use the security features in both clients and brokers. diff --git a/content/en/41/streams/_index.md b/content/en/41/streams/_index.md new file mode 100644 index 000000000..f68d72a5c --- /dev/null +++ b/content/en/41/streams/_index.md @@ -0,0 +1,10 @@ +--- +title: Kafka Streams +description: +weight: 9 +tags: ['kafka', 'docs', 'streams'] +aliases: +keywords: +type: docs +--- + diff --git a/content/en/41/streams/architecture.md b/content/en/41/streams/architecture.md new file mode 100644 index 000000000..eb5fb1f89 --- /dev/null +++ b/content/en/41/streams/architecture.md @@ -0,0 +1,87 @@ +--- +title: Architecture +description: +weight: 5 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Architecture + +[Introduction](/41/streams/) [Run Demo App](/41/streams/quickstart) [Tutorial: Write App](/41/streams/tutorial) [Concepts](/41/streams/core-concepts) [Architecture](/41/streams/architecture) [Developer Guide](/41/streams/developer-guide/) [Upgrade](/41/streams/upgrade-guide) + +Kafka Streams simplifies application development by building on the Kafka producer and consumer libraries and leveraging the native capabilities of Kafka to offer data parallelism, distributed coordination, fault tolerance, and operational simplicity. In this section, we describe how Kafka Streams works underneath the covers. + +The picture below shows the anatomy of an application that uses the Kafka Streams library. Let's walk through some details. + +![](/41/images/streams-architecture-overview.jpg) + +# Stream Partitions and Tasks + +The messaging layer of Kafka partitions data for storing and transporting it. Kafka Streams partitions data for processing it. In both cases, this partitioning is what enables data locality, elasticity, scalability, high performance, and fault tolerance. Kafka Streams uses the concepts of **partitions** and **tasks** as logical units of its parallelism model based on Kafka topic partitions. There are close links between Kafka Streams and Kafka in the context of parallelism: + + * Each **stream partition** is a totally ordered sequence of data records and maps to a Kafka **topic partition**. + * A **data record** in the stream maps to a Kafka **message** from that topic. + * The **keys** of data records determine the partitioning of data in both Kafka and Kafka Streams, i.e., how data is routed to specific partitions within topics. + + + +An application's processor topology is scaled by breaking it into multiple tasks. More specifically, Kafka Streams creates a fixed number of tasks based on the input stream partitions for the application, with each task assigned a list of partitions from the input streams (i.e., Kafka topics). The assignment of partitions to tasks never changes so that each task is a fixed unit of parallelism of the application. Tasks can then instantiate their own processor topology based on the assigned partitions; they also maintain a buffer for each of its assigned partitions and process messages one-at-a-time from these record buffers. As a result stream tasks can be processed independently and in parallel without manual intervention. + +Slightly simplified, the maximum parallelism at which your application may run is bounded by the maximum number of stream tasks, which itself is determined by maximum number of partitions of the input topic(s) the application is reading from. For example, if your input topic has 5 partitions, then you can run up to 5 applications instances. These instances will collaboratively process the topic's data. If you run a larger number of app instances than partitions of the input topic, the "excess" app instances will launch but remain idle; however, if one of the busy instances goes down, one of the idle instances will resume the former's work. + +It is important to understand that Kafka Streams is not a resource manager, but a library that "runs" anywhere its stream processing application runs. Multiple instances of the application are executed either on the same machine, or spread across multiple machines and tasks can be distributed automatically by the library to those running application instances. The assignment of partitions to tasks never changes; if an application instance fails, all its assigned tasks will be automatically restarted on other instances and continue to consume from the same stream partitions. + +**NOTE:** Topic partitions are assigned to tasks, and tasks are assigned to all threads over all instances, in a best-effort attempt to trade off load-balancing and stickiness of stateful tasks. For this assignment, Kafka Streams uses the [StreamsPartitionAssignor](https://github.com/apache/kafka/blob/trunk/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsPartitionAssignor.java) class and doesn't let you change to a different assignor. If you try to use a different assignor, Kafka Streams ignores it. + +The following diagram shows two tasks each assigned with one partition of the input streams. + +![](/41/images/streams-architecture-tasks.jpg) + + +# Threading Model + +Kafka Streams allows the user to configure the number of **threads** that the library can use to parallelize processing within an application instance. Each thread can execute one or more tasks with their processor topologies independently. For example, the following diagram shows one stream thread running two stream tasks. + +![](/41/images/streams-architecture-threads.jpg) + +Starting more stream threads or more instances of the application merely amounts to replicating the topology and having it process a different subset of Kafka partitions, effectively parallelizing processing. It is worth noting that there is no shared state amongst the threads, so no inter-thread coordination is necessary. This makes it very simple to run topologies in parallel across the application instances and threads. The assignment of Kafka topic partitions amongst the various stream threads is transparently handled by Kafka Streams leveraging [Kafka's coordination](https://cwiki.apache.org/confluence/x/foynAw) functionality. + +As we described above, scaling your stream processing application with Kafka Streams is easy: you merely need to start additional instances of your application, and Kafka Streams takes care of distributing partitions amongst tasks that run in the application instances. You can start as many threads of the application as there are input Kafka topic partitions so that, across all running instances of an application, every thread (or rather, the tasks it runs) has at least one input partition to process. + +As of Kafka 2.8 you can scale stream threads much in the same way you can scale your Kafka Stream clients. Simply add or remove stream threads and Kafka Streams will take care of redistributing the partitions. You may also add threads to replace stream threads that have died removing the need to restart clients to recover the number of thread running. + + + + +# Local State Stores + +Kafka Streams provides so-called **state stores** , which can be used by stream processing applications to store and query data, which is an important capability when implementing stateful operations. The [Kafka Streams DSL](/41/streams/developer-guide/dsl-api.html), for example, automatically creates and manages such state stores when you are calling stateful operators such as `join()` or `aggregate()`, or when you are windowing a stream. + +Every stream task in a Kafka Streams application may embed one or more local state stores that can be accessed via APIs to store and query data required for processing. Kafka Streams offers fault-tolerance and automatic recovery for such local state stores. + +The following diagram shows two stream tasks with their dedicated local state stores. + +![](/41/images/streams-architecture-states.jpg) + + +# Fault Tolerance + +Kafka Streams builds on fault-tolerance capabilities integrated natively within Kafka. Kafka partitions are highly available and replicated; so when stream data is persisted to Kafka it is available even if the application fails and needs to re-process it. Tasks in Kafka Streams leverage the fault-tolerance capability offered by the Kafka consumer client to handle failures. If a task runs on a machine that fails, Kafka Streams automatically restarts the task in one of the remaining running instances of the application. + +In addition, Kafka Streams makes sure that the local state stores are robust to failures, too. For each state store, it maintains a replicated changelog Kafka topic in which it tracks any state updates. These changelog topics are partitioned as well so that each local state store instance, and hence the task accessing the store, has its own dedicated changelog topic partition. [Log compaction](/41/#compaction) is enabled on the changelog topics so that old data can be purged safely to prevent the topics from growing indefinitely. If tasks run on a machine that fails and are restarted on another machine, Kafka Streams guarantees to restore their associated state stores to the content before the failure by replaying the corresponding changelog topics prior to resuming the processing on the newly started tasks. As a result, failure handling is completely transparent to the end user. + +Note that the cost of task (re)initialization typically depends primarily on the time for restoring the state by replaying the state stores' associated changelog topics. To minimize this restoration time, users can configure their applications to have **standby replicas** of local states (i.e. fully replicated copies of the state). When a task migration happens, Kafka Streams will assign a task to an application instance where such a standby replica already exists in order to minimize the task (re)initialization cost. See `num.standby.replicas` in the [**Kafka Streams Configs**](/41/#streamsconfigs) section. Starting in 2.6, Kafka Streams will guarantee that a task is only ever assigned to an instance with a fully caught-up local copy of the state, if such an instance exists. Standby tasks will increase the likelihood that a caught-up instance exists in the case of a failure. + +You can also configure standby replicas with rack awareness. When configured, Kafka Streams will attempt to distribute a standby task on a different "rack" than the active one, thus having a faster recovery time when the rack of the active tasks fails. See `rack.aware.assignment.tags` in the [**Kafka Streams Developer Guide**](/41/streams/developer-guide/config-streams.html#rack-aware-assignment-tags) section. + +There is also a client config `client.rack` which can set the rack for a Kafka consumer. If brokers also have their rack set via `broker.rack`, then rack aware task assignment can be enabled via `rack.aware.assignment.strategy` (cf. [**Kafka Streams Developer Guide**](/41/streams/developer-guide/config-streams.html#rack-aware-assignment-strategy)) to compute a task assignment which can reduce cross rack traffic by trying to assign tasks to clients with the same rack. Note that `client.rack` can also be used to distribute standby tasks to different racks from the active ones, which has a similar functionality as `rack.aware.assignment.tags`. Currently, `rack.aware.assignment.tag` takes precedence in distributing standby tasks which means if both configs present, `rack.aware.assignment.tag` will be used for distributing standby tasks on different racks from the active ones because it can configure more tag keys. + +[Previous](/41/streams/core-concepts) [Next](/41/streams/developer-guide) + + * [Documentation](/documentation) + * [Kafka Streams](/streams) + + diff --git a/content/en/41/streams/core-concepts.md b/content/en/41/streams/core-concepts.md new file mode 100644 index 000000000..cd8016a83 --- /dev/null +++ b/content/en/41/streams/core-concepts.md @@ -0,0 +1,164 @@ +--- +title: Core Concepts +description: +weight: 4 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Core Concepts + +[Introduction](/41/streams/) [Run Demo App](/41/streams/quickstart) [Tutorial: Write App](/41/streams/tutorial) [Concepts](/41/streams/core-concepts) [Architecture](/41/streams/architecture) [Developer Guide](/41/streams/developer-guide/) [Upgrade](/41/streams/upgrade-guide) + +Kafka Streams is a client library for processing and analyzing data stored in Kafka. It builds upon important stream processing concepts such as properly distinguishing between event time and processing time, windowing support, and simple yet efficient management and real-time querying of application state. + +Kafka Streams has a **low barrier to entry** : You can quickly write and run a small-scale proof-of-concept on a single machine; and you only need to run additional instances of your application on multiple machines to scale up to high-volume production workloads. Kafka Streams transparently handles the load balancing of multiple instances of the same application by leveraging Kafka's parallelism model. + +Some highlights of Kafka Streams: + + * Designed as a **simple and lightweight client library** , which can be easily embedded in any Java application and integrated with any existing packaging, deployment and operational tools that users have for their streaming applications. + * Has **no external dependencies on systems other than Apache Kafka itself** as the internal messaging layer; notably, it uses Kafka's partitioning model to horizontally scale processing while maintaining strong ordering guarantees. + * Supports **fault-tolerant local state** , which enables very fast and efficient stateful operations like windowed joins and aggregations. + * Supports **exactly-once** processing semantics to guarantee that each record will be processed once and only once even when there is a failure on either Streams clients or Kafka brokers in the middle of processing. + * Employs **one-record-at-a-time processing** to achieve millisecond processing latency, and supports **event-time based windowing operations** with out-of-order arrival of records. + * Offers necessary stream processing primitives, along with a **high-level Streams DSL** and a **low-level Processor API**. + + + +We first summarize the key concepts of Kafka Streams. + +# Stream Processing Topology + + * A **stream** is the most important abstraction provided by Kafka Streams: it represents an unbounded, continuously updating data set. A stream is an ordered, replayable, and fault-tolerant sequence of immutable data records, where a **data record** is defined as a key-value pair. + * A **stream processing application** is any program that makes use of the Kafka Streams library. It defines its computational logic through one or more **processor topologies** , where a processor topology is a graph of stream processors (nodes) that are connected by streams (edges). + * A [**stream processor**](/41/streams/developer-guide/processor-api#defining-a-stream-processor) is a node in the processor topology; it represents a processing step to transform data in streams by receiving one input record at a time from its upstream processors in the topology, applying its operation to it, and may subsequently produce one or more output records to its downstream processors. + +There are two special processors in the topology: + * **Source Processor** : A source processor is a special type of stream processor that does not have any upstream processors. It produces an input stream to its topology from one or multiple Kafka topics by consuming records from these topics and forwarding them to its down-stream processors. + * **Sink Processor** : A sink processor is a special type of stream processor that does not have down-stream processors. It sends any received records from its up-stream processors to a specified Kafka topic. + +Note that in normal processor nodes other remote systems can also be accessed while processing the current record. Therefore the processed results can either be streamed back into Kafka or written to an external system. ![](/41/images/streams-architecture-topology.jpg) + +Kafka Streams offers two ways to define the stream processing topology: the [**Kafka Streams DSL**](/41/streams/developer-guide/dsl-api.html) provides the most common data transformation operations such as `map`, `filter`, `join` and `aggregations` out of the box; the lower-level [**Processor API**](/41/streams/developer-guide/processor-api.html) allows developers define and connect custom processors as well as to interact with state stores. + +A processor topology is merely a logical abstraction for your stream processing code. At runtime, the logical topology is instantiated and replicated inside the application for parallel processing (see [**Stream Partitions and Tasks**](/41/streams/architecture#streams_architecture_tasks) for details). + +# Time + +A critical aspect in stream processing is the notion of **time** , and how it is modeled and integrated. For example, some operations such as **windowing** are defined based on time boundaries. + +Common notions of time in streams are: + + * **Event time** \- The point in time when an event or data record occurred, i.e. was originally created "at the source". **Example:** If the event is a geo-location change reported by a GPS sensor in a car, then the associated event-time would be the time when the GPS sensor captured the location change. + * **Processing time** \- The point in time when the event or data record happens to be processed by the stream processing application, i.e. when the record is being consumed. The processing time may be milliseconds, hours, or days etc. later than the original event time. **Example:** Imagine an analytics application that reads and processes the geo-location data reported from car sensors to present it to a fleet management dashboard. Here, processing-time in the analytics application might be milliseconds or seconds (e.g. for real-time pipelines based on Apache Kafka and Kafka Streams) or hours (e.g. for batch pipelines based on Apache Hadoop or Apache Spark) after event-time. + * **Ingestion time** \- The point in time when an event or data record is stored in a topic partition by a Kafka broker. The difference to event time is that this ingestion timestamp is generated when the record is appended to the target topic by the Kafka broker, not when the record is created "at the source". The difference to processing time is that processing time is when the stream processing application processes the record. **For example,** if a record is never processed, there is no notion of processing time for it, but it still has an ingestion time. + + + +The choice between event-time and ingestion-time is actually done through the configuration of Kafka (not Kafka Streams): From Kafka 0.10.x onwards, timestamps are automatically embedded into Kafka messages. Depending on Kafka's configuration these timestamps represent event-time or ingestion-time. The respective Kafka configuration setting can be specified on the broker level or per topic. The default timestamp extractor in Kafka Streams will retrieve these embedded timestamps as-is. Hence, the effective time semantics of your application depend on the effective Kafka configuration for these embedded timestamps. + +Kafka Streams assigns a **timestamp** to every data record via the `TimestampExtractor` interface. These per-record timestamps describe the progress of a stream with regards to time and are leveraged by time-dependent operations such as window operations. As a result, this time will only advance when a new record arrives at the processor. We call this data-driven time the **stream time** of the application to differentiate with the **wall-clock time** when this application is actually executing. Concrete implementations of the `TimestampExtractor` interface will then provide different semantics to the stream time definition. For example retrieving or computing timestamps based on the actual contents of data records such as an embedded timestamp field to provide event time semantics, and returning the current wall-clock time thereby yield processing time semantics to stream time. Developers can thus enforce different notions of time depending on their business needs. + +Finally, whenever a Kafka Streams application writes records to Kafka, then it will also assign timestamps to these new records. The way the timestamps are assigned depends on the context: + + * When new output records are generated via processing some input record, for example, `context.forward()` triggered in the `process()` function call, output record timestamps are inherited from input record timestamps directly. + * When new output records are generated via periodic functions such as `Punctuator#punctuate()`, the output record timestamp is defined as the current internal time (obtained through `context.timestamp()`) of the stream task. + * For aggregations, the timestamp of a result update record will be the maximum timestamp of all input records contributing to the result. + + + +You can change the default behavior in the Processor API by assigning timestamps to output records explicitly when calling `#forward()`. + +For aggregations and joins, timestamps are computed by using the following rules. + + * For joins (stream-stream, table-table) that have left and right input records, the timestamp of the output record is assigned `max(left.ts, right.ts)`. + * For stream-table joins, the output record is assigned the timestamp from the stream record. + * For aggregations, Kafka Streams also computes the `max` timestamp over all records, per key, either globally (for non-windowed) or per-window. + * For stateless operations, the input record timestamp is passed through. For `flatMap` and siblings that emit multiple records, all output records inherit the timestamp from the corresponding input record. + + + +# Duality of Streams and Tables + +When implementing stream processing use cases in practice, you typically need both **streams** and also **databases**. An example use case that is very common in practice is an e-commerce application that enriches an incoming _stream_ of customer transactions with the latest customer information from a _database table_. In other words, streams are everywhere, but databases are everywhere, too. + +Any stream processing technology must therefore provide **first-class support for streams and tables**. Kafka's Streams API provides such functionality through its core abstractions for [streams](/41/streams/developer-guide/dsl-api#streams_concepts_kstream) and [tables](/41/streams/developer-guide/dsl-api#streams_concepts_ktable), which we will talk about in a minute. Now, an interesting observation is that there is actually a **close relationship between streams and tables** , the so-called stream-table duality. And Kafka exploits this duality in many ways: for example, to make your applications [elastic](/41/streams/developer-guide/running-app#elastic-scaling-of-your-application), to support [fault-tolerant stateful processing](/41/streams/architecture#streams_architecture_recovery), or to run [interactive queries](/41/streams/developer-guide/interactive-queries#interactive-queries) against your application's latest processing results. And, beyond its internal usage, the Kafka Streams API also allows developers to exploit this duality in their own applications. + +Before we discuss concepts such as [aggregations](/41/streams/developer-guide/dsl-api#aggregating) in Kafka Streams, we must first introduce **tables** in more detail, and talk about the aforementioned stream-table duality. Essentially, this duality means that a stream can be viewed as a table, and a table can be viewed as a stream. Kafka's log compaction feature, for example, exploits this duality. + +A simple form of a table is a collection of key-value pairs, also called a map or associative array. Such a table may look as follows: + +![](/41/images/streams-table-duality-01.png) The **stream-table duality** describes the close relationship between streams and tables. + + * **Stream as Table** : A stream can be considered a changelog of a table, where each data record in the stream captures a state change of the table. A stream is thus a table in disguise, and it can be easily turned into a "real" table by replaying the changelog from beginning to end to reconstruct the table. Similarly, in a more general analogy, aggregating data records in a stream - such as computing the total number of pageviews by user from a stream of pageview events - will return a table (here with the key and the value being the user and its corresponding pageview count, respectively). + * **Table as Stream** : A table can be considered a snapshot, at a point in time, of the latest value for each key in a stream (a stream's data records are key-value pairs). A table is thus a stream in disguise, and it can be easily turned into a "real" stream by iterating over each key-value entry in the table. + + + +Let's illustrate this with an example. Imagine a table that tracks the total number of pageviews by user (first column of diagram below). Over time, whenever a new pageview event is processed, the state of the table is updated accordingly. Here, the state changes between different points in time - and different revisions of the table - can be represented as a changelog stream (second column). + +![](/41/images/streams-table-duality-02.png) + +Interestingly, because of the stream-table duality, the same stream can be used to reconstruct the original table (third column): + +![](/41/images/streams-table-duality-03.png) + +The same mechanism is used, for example, to replicate databases via change data capture (CDC) and, within Kafka Streams, to replicate its so-called state stores across machines for fault-tolerance. The stream-table duality is such an important concept that Kafka Streams models it explicitly via the KStream, KTable, and GlobalKTable interfaces. + +# Aggregations + +An **aggregation** operation takes one input stream or table, and yields a new table by combining multiple input records into a single output record. Examples of aggregations are computing counts or sum. + +In the `Kafka Streams DSL`, an input stream of an `aggregation` can be a KStream or a KTable, but the output stream will always be a KTable. This allows Kafka Streams to update an aggregate value upon the out-of-order arrival of further records after the value was produced and emitted. When such out-of-order arrival happens, the aggregating KStream or KTable emits a new aggregate value. Because the output is a KTable, the new value is considered to overwrite the old value with the same key in subsequent processing steps. + +# Windowing + +Windowing lets you control how to _group records that have the same key_ for stateful operations such as `aggregations` or `joins` into so-called _windows_. Windows are tracked per record key. + +`Windowing operations` are available in the `Kafka Streams DSL`. When working with windows, you can specify a **grace period** for the window. This grace period controls how long Kafka Streams will wait for **out-of-order** data records for a given window. If a record arrives after the grace period of a window has passed, the record is discarded and will not be processed in that window. Specifically, a record is discarded if its timestamp dictates it belongs to a window, but the current stream time is greater than the end of the window plus the grace period. + +Out-of-order records are always possible in the real world and should be properly accounted for in your applications. It depends on the effective `time semantics ` how out-of-order records are handled. In the case of processing-time, the semantics are "when the record is being processed", which means that the notion of out-of-order records is not applicable as, by definition, no record can be out-of-order. Hence, out-of-order records can only be considered as such for event-time. In both cases, Kafka Streams is able to properly handle out-of-order records. + +# States + +Some stream processing applications don't require state, which means the processing of a message is independent from the processing of all other messages. However, being able to maintain state opens up many possibilities for sophisticated stream processing applications: you can join input streams, or group and aggregate data records. Many such stateful operators are provided by the [**Kafka Streams DSL**](/41/streams/developer-guide/dsl-api.html). + +Kafka Streams provides so-called **state stores** , which can be used by stream processing applications to store and query data. This is an important capability when implementing stateful operations. Every task in Kafka Streams embeds one or more state stores that can be accessed via APIs to store and query data required for processing. These state stores can either be a persistent key-value store, an in-memory hashmap, or another convenient data structure. Kafka Streams offers fault-tolerance and automatic recovery for local state stores. + +Kafka Streams allows direct read-only queries of the state stores by methods, threads, processes or applications external to the stream processing application that created the state stores. This is provided through a feature called **Interactive Queries**. All stores are named and Interactive Queries exposes only the read operations of the underlying implementation. + + + + +# Processing Guarantees + +In stream processing, one of the most frequently asked question is "does my stream processing system guarantee that each record is processed once and only once, even if some failures are encountered in the middle of processing?" Failing to guarantee exactly-once stream processing is a deal-breaker for many applications that cannot tolerate any data-loss or data duplicates, and in that case a batch-oriented framework is usually used in addition to the stream processing pipeline, known as the [Lambda Architecture](https://en.wikipedia.org/wiki/Lambda_architecture). Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline. +Since the 0.11.0.0 release, Kafka has added support to allow its producers to send messages to different topic partitions in a [transactional and idempotent manner](https://kafka.apache.org/#semantics), and Kafka Streams has hence added the end-to-end exactly-once processing semantics by leveraging these features. More specifically, it guarantees that for any record read from the source Kafka topics, its processing results will be reflected exactly once in the output Kafka topic as well as in the state stores for stateful operations. Note the key difference between Kafka Streams end-to-end exactly-once guarantee with other stream processing frameworks' claimed guarantees is that Kafka Streams tightly integrates with the underlying Kafka storage system and ensure that commits on the input topic offsets, updates on the state stores, and writes to the output topics will be completed atomically instead of treating Kafka as an external system that may have side-effects. For more information on how this is done inside Kafka Streams, see [KIP-129](https://cwiki.apache.org/confluence/x/0okYB). +As of the 2.6.0 release, Kafka Streams supports an improved implementation of exactly-once processing, named "exactly-once v2", which requires broker version 2.5.0 or newer. This implementation is more efficient, because it reduces client and broker resource utilization, like client threads and used network connections, and it enables higher throughput and improved scalability. As of the 3.0.0 release, the first version of exactly-once has been deprecated. Users are encouraged to use exactly-once v2 for exactly-once processing from now on, and prepare by upgrading their brokers if necessary. For more information on how this is done inside the brokers and Kafka Streams, see [KIP-447](https://cwiki.apache.org/confluence/x/vhYlBg). +To enable exactly-once semantics when running Kafka Streams applications, set the `processing.guarantee` config value (default value is **at_least_once**) to **StreamsConfig.EXACTLY_ONCE_V2** (requires brokers version 2.5 or newer). For more information, see the [Kafka Streams Configs](/41/streams/developer-guide/config-streams.html) section. + +# Out-of-Order Handling + +Besides the guarantee that each record will be processed exactly-once, another issue that many stream processing applications will face is how to handle [out-of-order data](https://dl.acm.org/citation.cfm?id=3242155) that may impact their business logic. In Kafka Streams, there are two causes that could potentially result in out-of-order data arrivals with respect to their timestamps: + + * Within a topic-partition, a record's timestamp may not be monotonically increasing along with their offsets. Since Kafka Streams will always try to process records within a topic-partition to follow the offset order, it can cause records with larger timestamps (but smaller offsets) to be processed earlier than records with smaller timestamps (but larger offsets) in the same topic-partition. + * Within a [stream task](/41/streams/architecture#streams_architecture_tasks) that may be processing multiple topic-partitions, if users configure the application to not wait for all partitions to contain some buffered data and pick from the partition with the smallest timestamp to process the next record, then later on when some records are fetched for other topic-partitions, their timestamps may be smaller than those processed records fetched from another topic-partition. + + + +For stateless operations, out-of-order data will not impact processing logic since only one record is considered at a time, without looking into the history of past processed records; for stateful operations such as aggregations and joins, however, out-of-order data could cause the processing logic to be incorrect. If users want to handle such out-of-order data, generally they need to allow their applications to wait for longer time while bookkeeping their states during the wait time, i.e. making trade-off decisions between latency, cost, and correctness. In Kafka Streams specifically, users can configure their window operators for windowed aggregations to achieve such trade-offs (details can be found in [**Developer Guide**](/41/streams/developer-guide)). As for Joins, users may use [versioned state stores](/41/streams/developer-guide/dsl-api.html#versioned-state-stores) to address concerns with out-of-order data, but out-of-order data will not be handled by default: + + * For Stream-Stream joins, all three types (inner, outer, left) handle out-of-order records correctly. + * For Stream-Table joins, if not using versioned stores, then out-of-order records are not handled (i.e., Streams applications don't check for out-of-order records and just process all records in offset order), and hence it may produce unpredictable results. With versioned stores, stream-side out-of-order data will be properly handled by performing a timestamp-based lookup in the table. Table-side out-of-order data is still not handled. + * For Table-Table joins, if not using versioned stores, then out-of-order records are not handled (i.e., Streams applications don't check for out-of-order records and just process all records in offset order). However, the join result is a changelog stream and hence will be eventually consistent. With versioned stores, table-table join semantics change from offset-based semantics to [timestamp-based semantics](/41/streams/developer-guide/dsl-api.html#versioned-state-stores) and out-of-order records are handled accordingly. + + + +[Previous](/41/streams/tutorial) [Next](/41/streams/architecture) + + * [Documentation](/documentation) + * [Kafka Streams](/streams) + + diff --git a/content/en/41/streams/developer-guide/_index.md b/content/en/41/streams/developer-guide/_index.md new file mode 100644 index 000000000..8879fea79 --- /dev/null +++ b/content/en/41/streams/developer-guide/_index.md @@ -0,0 +1,10 @@ +--- +title: Streams Developer Guide +description: +weight: 10 +tags: ['kafka', 'docs', 'streams', 'developer-guide'] +aliases: +keywords: +type: docs +--- + diff --git a/content/en/41/streams/developer-guide/app-reset-tool.md b/content/en/41/streams/developer-guide/app-reset-tool.md new file mode 100644 index 000000000..dfa9cc39b --- /dev/null +++ b/content/en/41/streams/developer-guide/app-reset-tool.md @@ -0,0 +1,124 @@ +--- +title: Application Reset Tool +description: +weight: 13 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Application Reset Tool + +You can reset an application and force it to reprocess its data from scratch by using the application reset tool. This can be useful for development and testing, or when fixing bugs. + +The application reset tool handles the Kafka Streams [user topics](manage-topics.html#streams-developer-guide-topics-user) (input, and output) and [internal topics](manage-topics.html#streams-developer-guide-topics-internal) differently when resetting the application. + +Here's what the application reset tool does for each topic type: + + * Input topics: Reset offsets to specified position (by default to the beginning of the topic). + * Internal topics: Delete the internal topic (this automatically deletes any committed offsets). + + + +The application reset tool does not: + + * Reset output topics of an application. If any output topics are consumed by downstream applications, it is your responsibility to adjust those downstream applications as appropriate when you reset the upstream application. + * Reset the local environment of your application instances. It is your responsibility to delete the local state on any machine on which an application instance was run. See the instructions in section Step 2: Reset the local environments of your application instances on how to do this. + + + +Prerequisites + + + * All instances of your application must be stopped. Otherwise, the application may enter an invalid state, crash, or produce incorrect results. You can verify whether the consumer group with ID `application.id` is still active by using `bin/kafka-consumer-groups`. When long session timeout has been configured, active members could take longer to get expired on the broker thus blocking the reset job to complete. Use the `--force` option could remove those left-over members immediately. Make sure to shut down all stream applications when this option is specified to avoid unexpected rebalances. + + * Use this tool with care and double-check its parameters: If you provide wrong parameter values (e.g., typos in `application.id`) or specify parameters inconsistently (e.g., specify the wrong input topics for the application), this tool might invalidate the application's state or even impact other applications, consumer groups, or your Kafka topics. + + + + +# Step 1: Run the application reset tool + +Invoke the application reset tool from the command line + +Warning! This tool makes irreversible changes to your application. It is strongly recommended that you run this once with `--dry-run` to preview your changes before making them. + + + $ bin/kafka-streams-application-reset.sh + +The tool accepts the following parameters: + + + Option (* = required) Description + --------------------- ----------- + * --application-id The Kafka Streams application ID + (application.id). + --bootstrap-server (deprecated) is specified. The server + (s) to connect to. The broker list + string in the form HOST1:PORT1,HOST2: + PORT2. + --by-duration Reset offsets to offset by duration from + current timestamp. Format: 'PnDTnHnMnS' + --config-file Property file containing configs to be + passed to admin clients and embedded + consumer. + --dry-run Display the actions that would be + performed without executing the reset + commands. + --from-file Reset offsets to values defined in CSV + file. + --input-topics Comma-separated list of user input + topics. For these topics, the tool will + reset the offset to the earliest + available offset. + --internal-topics Comma-separated list of internal topics + to delete. Must be a subset of the + internal topics marked for deletion by + the default behaviour (do a dry-run without + this option to view these topics). + --shift-by Reset offsets shifting current offset by + 'n', where 'n' can be positive or + negative + --to-datetime Reset offsets to offset from datetime. + Format: 'YYYY-MM-DDThh:mm:ss.sss' + --to-earliest Reset offsets to earliest offset. + --to-latest Reset offsets to latest offset. + --to-offset Reset offsets to a specific offset. + --force Force removing members of the consumer group + (intended to remove left-over members if + long session timeout was configured). + +Consider the following as reset-offset scenarios for `input-topics`: + + * by-duration + * from-file + * shift-by + * to-datetime + * to-earliest + * to-latest + * to-offset + + + +Only one of these scenarios can be defined. If not, `to-earliest` will be executed by default + +All the other parameters can be combined as needed. For example, if you want to restart an application from an empty internal state, but not reprocess previous data, simply omit the parameter `--input-topics`. + +# Step 2: Reset the local environments of your application instances + +For a complete application reset, you must delete the application's local state directory on any machines where the application instance was run. You must do this before restarting an application instance on the same machine. You can use either of these methods: + + * The API method `KafkaStreams#cleanUp()` in your application code. + * Manually delete the corresponding local state directory (default location: `/${java.io.tmpdir}/kafka-streams/`). For more information, see [Streams](/41/javadoc/org/apache/kafka/streams/StreamsConfig.html#STATE_DIR_CONFIG) javadocs. + + + +[Previous](/41/streams/developer-guide/security) [Next](/41/streams/upgrade-guide) + + * [Documentation](/documentation) + * [Kafka Streams](/streams) + * [Developer Guide](/streams/developer-guide/) + + diff --git a/content/en/41/streams/developer-guide/config-streams.md b/content/en/41/streams/developer-guide/config-streams.md new file mode 100644 index 000000000..46d003d68 --- /dev/null +++ b/content/en/41/streams/developer-guide/config-streams.md @@ -0,0 +1,729 @@ +--- +title: Configuring a Streams Application +description: +weight: 2 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Configuring a Streams Application + +Kafka and Kafka Streams configuration options must be configured before using Streams. You can configure Kafka Streams by specifying parameters in a `java.util.Properties` instance. + + 1. Create a `java.util.Properties` instance. + + 2. Set the parameters. For example: + + import java.util.Properties; + import org.apache.kafka.streams.StreamsConfig; + + Properties settings = new Properties(); + // Set a few key parameters + settings.put(StreamsConfig.APPLICATION_ID_CONFIG, "my-first-streams-application"); + settings.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka-broker1:9092"); + // Any further settings + settings.put(... , ...); + + + + +# Configuration parameter reference + +This section contains the most common Streams configuration parameters. For a full reference, see the [Streams](/41/javadoc/org/apache/kafka/streams/StreamsConfig.html) Javadocs. + + * Required configuration parameters + * application.id + * bootstrap.servers + * Recommended configuration parameters for resiliency + * acks + * replication.factor + * min.insync.replicas + * num.standby.replicas + * Optional configuration parameters + * acceptable.recovery.lag + * default.deserialization.exception.handler (deprecated) + * default.key.serde + * default.production.exception.handler (deprecated) + * default.timestamp.extractor + * default.value.serde + * deserialization.exception.handler + * enable.metrics.push + * ensure.explicit.internal.resource.naming + * group.protocol + * log.summary.interval.ms + * max.task.idle.ms + * max.warmup.replicas + * num.standby.replicas + * num.stream.threads + * probing.rebalance.interval.ms + * processing.exception.handler + * processing.guarantee + * processor.wrapper.class + * production.exception.handler + * rack.aware.assignment.non_overlap_cost + * rack.aware.assignment.strategy + * rack.aware.assignment.tags + * rack.aware.assignment.traffic_cost + * replication.factor + * rocksdb.config.setter + * state.dir + * task.assignor.class + * topology.optimization + * Kafka consumers and producer configuration parameters + * Naming + * Default Values + * Parameters controlled by Kafka Streams + * enable.auto.commit + + + +# Required configuration parameters + +Here are the required Streams configuration parameters. + +Parameter Name | Importance | Description | Default Value +---|---|---|--- +application.id | Required | An identifier for the stream processing application. Must be unique within the Kafka cluster. | None +bootstrap.servers | Required | A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. | None + +## application.id + +> (Required) The application ID. Each stream processing application must have a unique ID. The same ID must be given to all instances of the application. It is recommended to use only alphanumeric characters, `.` (dot), `-` (hyphen), and `_` (underscore). Examples: `"hello_world"`, `"hello_world-v1.0.0"` +> +> This ID is used in the following places to isolate resources used by the application from others: +> +> * As the default Kafka consumer and producer `client.id` prefix +> * As the Kafka consumer `group.id` for coordination +> * As the name of the subdirectory in the state directory (cf. `state.dir`) +> * As the prefix of internal Kafka topic names +> + +> +> Tip: +> When an application is updated, the `application.id` should be changed unless you want to reuse the existing data in internal topics and state stores. For example, you could embed the version information within `application.id`, as `my-app-v1.0.0` and `my-app-v1.0.2`. + +## bootstrap.servers + +> (Required) The Kafka bootstrap servers. This is the same [setting](/41/documentation.html#producerconfigs) that is used by the underlying producer and consumer clients to connect to the Kafka cluster. Example: `"kafka-broker1:9092,kafka-broker2:9092"`. + +# Recommended configuration parameters for resiliency + +There are several Kafka and Kafka Streams configuration options that need to be configured explicitly for resiliency in face of broker failures: + +Parameter Name | Corresponding Client | Default value | Consider setting to +---|---|---|--- +acks | Producer (for version <=2.8) | `acks="1")` | `acks="all"` +replication.factor (for broker version 2.3 or older) | Streams | `-1` | `3` (broker 2.4+: ensure broker config `default.replication.factor=3`) +min.insync.replicas | Broker | `1` | `2` +num.standby.replicas | Streams | `0` | `1` + +Increasing the replication factor to 3 ensures that the internal Kafka Streams topic can tolerate up to 2 broker failures. The tradeoff from moving to the default values to the recommended ones is that some performance and more storage space (3x with the replication factor of 3) are sacrificed for more resiliency. + +## acks + +> The number of acknowledgments that the leader must have received before considering a request complete. This controls the durability of records that are sent. The possible values are: +> +> * `acks="0"` The producer does not wait for acknowledgment from the server and the record is immediately added to the socket buffer and considered sent. No guarantee can be made that the server has received the record in this case, and the producer won't generally know of any failures. The offset returned for each record will always be set to `-1`. +> * `acks="1"` The leader writes the record to its local log and responds without waiting for full acknowledgement from all followers. If the leader immediately fails after acknowledging the record, but before the followers have replicated it, then the record will be lost. +> * `acks="all"` (default since 3.0 release) The leader waits for the full set of in-sync replicas to acknowledge the record. This guarantees that the record will not be lost if there is at least one in-sync replica alive. This is the strongest available guarantee. +> + +> +> For more information, see the [Kafka Producer documentation](https://kafka.apache.org/#producerconfigs). + +## replication.factor + +> See the description here. + +## min.insync.replicas + +The minimum number of in-sync replicas available for replication if the producer is configured with `acks="all"` (see [topic configs](/41/#topicconfigs_min.insync.replicas)). + +## num.standby.replicas + +> See the description here. + + + Properties streamsSettings = new Properties(); + // for broker version 2.3 or older + //streamsSettings.put(StreamsConfig.REPLICATION_FACTOR_CONFIG, 3); + // for version 2.8 or older + //streamsSettings.put(StreamsConfig.producerPrefix(ProducerConfig.ACKS_CONFIG), "all"); + streamsSettings.put(StreamsConfig.topicPrefix(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG), 2); + streamsSettings.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1); + +# Optional configuration parameters + +Here are the optional [Streams](/41/javadoc/org/apache/kafka/streams/StreamsConfig.html) javadocs, sorted by level of importance: + +> * High: These are parameters with a default value which is most likely not a good fit for production use. It's highly recommended to revisit these parameters for production usage. +> * Medium: The default values of these parameters should work for production for many cases, but it's not uncommon that they are changed, for example to tune performance. +> * Low: It should rarely be necessary to change the value for these parameters. It's only recommended to change them if there is a very specific issue you want to address. +> + + +Parameter Name | Importance | Description | Default Value +---|---|---|--- +acceptable.recovery.lag | Medium | The maximum acceptable lag (number of offsets to catch up) for an instance to be considered caught-up and ready for the active task. | `10000` +application.server | Low | A host:port pair pointing to an embedded user defined endpoint that can be used for discovering the locations of state stores within a single Kafka Streams application. The value of this must be different for each instance of the application. | the empty string +buffered.records.per.partition | Low | The maximum number of records to buffer per partition. | `1000` +statestore.cache.max.bytes | Medium | Maximum number of memory bytes to be used for record caches across all threads. | `10485760` +cache.max.bytes.buffering (Deprecated. Use statestore.cache.max.bytes instead.) | Medium | Maximum number of memory bytes to be used for record caches across all threads. | `10485760` +client.id | Medium | An ID string to pass to the server when making requests. (This setting is passed to the consumer/producer clients used internally by Kafka Streams.) | the empty string +commit.interval.ms | Low | The frequency in milliseconds with which to save the position (offsets in source topics) of tasks. | `30000` (30 seconds) (at-least-once) / `100` (exactly-once) +default.deserialization.exception.handler (Deprecated. Use deserialization.exception.handler instead.) | Medium | Exception handling class that implements the `DeserializationExceptionHandler` interface. | `LogAndFailExceptionHandler` +default.key.serde | Medium | Default serializer/deserializer class for record keys, implements the `Serde` interface. Must be set by the user or all serdes must be passed in explicitly (see also default.value.serde). | `null` +default.production.exception.handler (Deprecated. Use production.exception.handler instead.) | Medium | Exception handling class that implements the `ProductionExceptionHandler` interface. | `DefaultProductionExceptionHandler` +default.timestamp.extractor | Medium | Timestamp extractor class that implements the `TimestampExtractor` interface. See Timestamp Extractor | `FailOnInvalidTimestamp` +default.value.serde | Medium | Default serializer/deserializer class for record values, implements the `Serde` interface. Must be set by the user or all serdes must be passed in explicitly (see also default.key.serde). | `null` +default.dsl.store (Deprecated. Use dsl.store.suppliers.class instead.) | Low | The default state store type used by DSL operators. | `"ROCKS_DB"` +deserialization.exception.handler | Medium | Exception handling class that implements the `DeserializationExceptionHandler` interface. | `LogAndContinueExceptionHandler` +dsl.store.suppliers.class | Low | Defines a default state store implementation to be used by any stateful DSL operator that has not explicitly configured the store implementation type. Must implement the `org.apache.kafka.streams.state.DslStoreSuppliers` interface. | `BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers` +ensure.explicit.internal.resource.naming | High | Whether to enforce explicit naming for all internal resources of the topology, including internal topics (e.g., changelog and repartition topics) and their associated state stores. When enabled, the application will refuse to start if any internal resource has an auto-generated name. | `false` +log.summary.interval.ms | Low | The output interval in milliseconds for logging summary information (disabled if negative). | `120000` (2 minutes) +enable.metrics.push | Low | Whether to enable pushing of client metrics to the cluster, if the cluster has a client metrics subscription which matches this client. | `true` +max.task.idle.ms | Medium | This config controls whether joins and merges may produce out-of-order results. The config value is the maximum amount of time in milliseconds a stream task will stay idle when it is fully caught up on some (but not all) input partitions to wait for producers to send additional records and avoid potential out-of-order record processing across multiple input streams. The default (zero) does not wait for producers to send more records, but it does wait to fetch data that is already present on the brokers. This default means that for records that are already present on the brokers, Streams will process them in timestamp order. Set to -1 to disable idling entirely and process any locally available data, even though doing so may produce out-of-order processing. | `0` +max.warmup.replicas | Medium | The maximum number of warmup replicas (extra standbys beyond the configured num.standbys) that can be assigned at once. | `2` +metric.reporters | Low | A list of classes to use as metrics reporters. | the empty list +metrics.num.samples | Low | The number of samples maintained to compute metrics. | `2` +metrics.recording.level | Low | The highest recording level for metrics. | `INFO` +metrics.sample.window.ms | Low | The window of time in milliseconds a metrics sample is computed over. | `30000` (30 seconds) +num.standby.replicas | High | The number of standby replicas for each task. | `0` +num.stream.threads | Medium | The number of threads to execute stream processing. | `1` +probing.rebalance.interval.ms | Low | The maximum time in milliseconds to wait before triggering a rebalance to probe for warmup replicas that have sufficiently caught up. | `600000` (10 minutes) +processing.exception.handler | Medium | Exception handling class that implements the `ProcessingExceptionHandler` interface. | `LogAndFailProcessingExceptionHandler` +processing.guarantee | Medium | The processing mode. Can be either `"at_least_once"` or `"exactly_once_v2"` (for EOS version 2, requires broker version 2.5+). See Processing Guarantee.. | `"at_least_once"` +processor.wrapper.class | Medium | A class or class name implementing the `ProcessorWrapper` interface. Must be passed in when creating the topology, and will not be applied unless passed in to the appropriate constructor as a TopologyConfig. You should use the `StreamsBuilder#new(TopologyConfig)` constructor for DSL applications, and the `Topology#new(TopologyConfig)` constructor for PAPI applications. +production.exception.handler | Medium | Exception handling class that implements the `ProductionExceptionHandler` interface. | `DefaultProductionExceptionHandler` +poll.ms | Low | The amount of time in milliseconds to block waiting for input. | `100` +rack.aware.assignment.strategy | Low | The strategy used for rack aware assignment. Acceptable value are `"none"` (default), `"min_traffic"`, and `"balance_suttopology"`. See Rack Aware Assignment Strategy. | `"none"` +rack.aware.assignment.tags | Low | List of tag keys used to distribute standby replicas across Kafka Streams clients. When configured, Kafka Streams will make a best-effort to distribute the standby tasks over clients with different tag values. See Rack Aware Assignment Tags. | the empty list +rack.aware.assignment.non_overlap_cost | Low | Cost associated with moving tasks from existing assignment. See Rack Aware Assignment Non-Overlap-Cost. | `null` +rack.aware.assignment.non_overlap_cost | Low | Cost associated with cross rack traffic. See Rack Aware Assignment Traffic-Cost. | `null` +replication.factor | Medium | The replication factor for changelog topics and repartition topics created by the application. The default of `-1` (meaning: use broker default replication factor) requires broker version 2.4 or newer. | `-1` +repartition.purge.interval.ms | Low | The frequency in milliseconds with which to delete fully consumed records from repartition topics. Purging will occur after at least this value since the last purge, but may be delayed until later. | `30000` (30 seconds) +retry.backoff.ms | Low | The amount of time in milliseconds, before a request is retried. | `100` +rocksdb.config.setter | Medium | The RocksDB configuration. | `null` +state.cleanup.delay.ms | Low | The amount of time in milliseconds to wait before deleting state when a partition has migrated. | `600000` (10 minutes) +state.dir | High | Directory location for state stores. | `/${java.io.tmpdir}/kafka-streams` +task.assignor.class | Medium | A task assignor class or class name implementing the `TaskAssignor` interface. | The high-availability task assignor. +task.timeout.ms | Medium | The maximum amount of time in milliseconds a task might stall due to internal errors and retries until an error is raised. For a timeout of `0 ms`, a task would raise an error for the first internal error. For any timeout larger than `0 ms`, a task will retry at least once before an error is raised. | `300000` (5 minutes) +topology.optimization | Medium | A configuration telling Kafka Streams if it should optimize the topology and what optimizations to apply. Acceptable values are: `StreamsConfig.NO_OPTIMIZATION` (`none`), `StreamsConfig.OPTIMIZE` (`all`) or a comma separated list of specific optimizations: `StreamsConfig.REUSE_KTABLE_SOURCE_TOPICS` (`reuse.ktable.source.topics`), `StreamsConfig.MERGE_REPARTITION_TOPICS` (`merge.repartition.topics`), `StreamsConfig.SINGLE_STORE_SELF_JOIN` (`single.store.self.join`). | `"NO_OPTIMIZATION"` +upgrade.from | Medium | The version you are upgrading from during a rolling upgrade. See Upgrade From | `null` +windowstore.changelog.additional.retention.ms | Low | Added to a windows maintainMs to ensure data is not deleted from the log prematurely. Allows for clock drift. | `86400000` (1 day) +window.size.ms (Deprecated. See [Window Serdes](datatypes.html#window-serdes) for alternatives.) | Low | Sets window size for the deserializer in order to calculate window end times. | `null` +windowed.inner.class.serde (Deprecated. See [Window Serdes](datatypes.html#window-serdes) for alternatives.) | Low | Serde for the inner class of a windowed record. Must implement the `Serde` interface. | `null` + +## acceptable.recovery.lag + +> The maximum acceptable lag (total number of offsets to catch up from the changelog) for an instance to be considered caught-up and able to receive an active task. Streams will only assign stateful active tasks to instances whose state stores are within the acceptable recovery lag, if any exist, and assign warmup replicas to restore state in the background for instances that are not yet caught up. Should correspond to a recovery time of well under a minute for a given workload. Must be at least 0. +> +> Note: if you set this to `Long.MAX_VALUE` it effectively disables the warmup replicas and task high availability, allowing Streams to immediately produce a balanced assignment and migrate tasks to a new instance without first warming them up. + +## deserialization.exception.handler (deprecated: default.deserialization.exception.handler) + +> The deserialization exception handler allows you to manage record exceptions that fail to deserialize. This can be caused by corrupt data, incorrect serialization logic, or unhandled record types. The implemented exception handler needs to return a `FAIL` or `CONTINUE` depending on the record and the exception thrown. Returning `FAIL` will signal that Streams should shut down and `CONTINUE` will signal that Streams should ignore the issue and continue processing. The following library built-in exception handlers are available: +> +> * [LogAndContinueExceptionHandler](/41/javadoc/org/apache/kafka/streams/errors/LogAndContinueExceptionHandler.html): This handler logs the deserialization exception and then signals the processing pipeline to continue processing more records. This log-and-skip strategy allows Kafka Streams to make progress instead of failing if there are records that fail to deserialize. +> * [LogAndFailExceptionHandler](/41/javadoc/org/apache/kafka/streams/errors/LogAndFailExceptionHandler.html). This handler logs the deserialization exception and then signals the processing pipeline to stop processing more records. +> + +> +> You can also provide your own customized exception handler besides the library provided ones to meet your needs. For example, you can choose to forward corrupt records into a quarantine topic (think: a "dead letter queue") for further processing. To do this, use the Producer API to write a corrupted record directly to the quarantine topic. To be more concrete, you can create a separate `KafkaProducer` object outside the Streams client, and pass in this object as well as the dead letter queue topic name into the `Properties` map, which then can be retrieved from the `configure` function call. The drawback of this approach is that "manual" writes are side effects that are invisible to the Kafka Streams runtime library, so they do not benefit from the end-to-end processing guarantees of the Streams API: +> +> +> public class SendToDeadLetterQueueExceptionHandler implements DeserializationExceptionHandler { +> KafkaProducer dlqProducer; +> String dlqTopic; +> +> @Override +> public DeserializationHandlerResponse handle(final ErrorHandlerContext context, +> final ConsumerRecord record, +> final Exception exception) { +> +> log.warn("Exception caught during Deserialization, sending to the dead queue topic; " + +> "taskId: {}, topic: {}, partition: {}, offset: {}", +> context.taskId(), record.topic(), record.partition(), record.offset(), +> exception); +> +> dlqProducer.send(new ProducerRecord<>(dlqTopic, record.timestamp(), record.key(), record.value(), record.headers())).get(); +> +> return DeserializationHandlerResponse.CONTINUE; +> } +> +> @Override +> public void configure(final Map configs) { +> dlqProducer = .. // get a producer from the configs map +> dlqTopic = .. // get the topic name from the configs map +> } +> } + +## production.exception.handler (deprecated: default.production.exception.handler) + +> The production exception handler allows you to manage exceptions triggered when trying to interact with a broker such as attempting to produce a record that is too large. By default, Kafka provides and uses the [DefaultProductionExceptionHandler](/41/javadoc/org/apache/kafka/streams/errors/DefaultProductionExceptionHandler.html) that always fails when these exceptions occur. +> +> An exception handler can return `FAIL`, `CONTINUE`, or `RETRY` depending on the record and the exception thrown. Returning `FAIL` will signal that Streams should shut down. `CONTINUE` will signal that Streams should ignore the issue and continue processing. For `RetriableException` the handler may return `RETRY` to tell the runtime to retry sending the failed record (**Note:** If `RETRY` is returned for a non-`RetriableException` it will be treated as `FAIL`.) If you want to provide an exception handler that always ignores records that are too large, you could implement something like the following: +> +> +> import java.util.Properties; +> import org.apache.kafka.streams.StreamsConfig; +> import org.apache.kafka.common.errors.RecordTooLargeException; +> import org.apache.kafka.streams.errors.ProductionExceptionHandler; +> import org.apache.kafka.streams.errors.ProductionExceptionHandler.ProductionExceptionHandlerResponse; +> +> public class IgnoreRecordTooLargeHandler implements ProductionExceptionHandler { +> public void configure(Map config) {} +> +> public ProductionExceptionHandlerResponse handle(final ErrorHandlerContext context, +> final ProducerRecord record, +> final Exception exception) { +> if (exception instanceof RecordTooLargeException) { +> return ProductionExceptionHandlerResponse.CONTINUE; +> } else { +> return ProductionExceptionHandlerResponse.FAIL; +> } +> } +> } +> +> Properties settings = new Properties(); +> +> // other various kafka streams settings, e.g. bootstrap servers, application id, etc +> +> settings.put(StreamsConfig.PRODUCTION_EXCEPTION_HANDLER_CLASS_CONFIG, +> IgnoreRecordTooLargeHandler.class); + +## default.timestamp.extractor + +> A timestamp extractor pulls a timestamp from an instance of [ConsumerRecord](/41/javadoc/org/apache/kafka/clients/consumer/ConsumerRecord.html). Timestamps are used to control the progress of streams. +> +> The default extractor is [FailOnInvalidTimestamp](/41/javadoc/org/apache/kafka/streams/processor/FailOnInvalidTimestamp.html). This extractor retrieves built-in timestamps that are automatically embedded into Kafka messages by the Kafka producer client since [Kafka version 0.10](https://cwiki.apache.org/confluence/x/eaSnAw). Depending on the setting of Kafka's server-side `log.message.timestamp.type` broker and `message.timestamp.type` topic parameters, this extractor provides you with: +> +> * **event-time** processing semantics if `log.message.timestamp.type` is set to `CreateTime` aka "producer time" (which is the default). This represents the time when a Kafka producer sent the original message. If you use Kafka's official producer client, the timestamp represents milliseconds since the epoch. +> * **ingestion-time** processing semantics if `log.message.timestamp.type` is set to `LogAppendTime` aka "broker time". This represents the time when the Kafka broker received the original message, in milliseconds since the epoch. +> + +> +> The `FailOnInvalidTimestamp` extractor throws an exception if a record contains an invalid (i.e. negative) built-in timestamp, because Kafka Streams would not process this record but silently drop it. Invalid built-in timestamps can occur for various reasons: if for example, you consume a topic that is written to by pre-0.10 Kafka producer clients or by third-party producer clients that don't support the new Kafka 0.10 message format yet; another situation where this may happen is after upgrading your Kafka cluster from `0.9` to `0.10`, where all the data that was generated with `0.9` does not include the `0.10` message timestamps. +> +> If you have data with invalid timestamps and want to process it, then there are two alternative extractors available. Both work on built-in timestamps, but handle invalid timestamps differently. +> +> * [LogAndSkipOnInvalidTimestamp](/41/javadoc/org/apache/kafka/streams/processor/LogAndSkipOnInvalidTimestamp.html): This extractor logs a warn message and returns the invalid timestamp to Kafka Streams, which will not process but silently drop the record. This log-and-skip strategy allows Kafka Streams to make progress instead of failing if there are records with an invalid built-in timestamp in your input data. +> * [UsePartitionTimeOnInvalidTimestamp](/41/javadoc/org/apache/kafka/streams/processor/UsePartitionTimeOnInvalidTimestamp.html). This extractor returns the record's built-in timestamp if it is valid (i.e. not negative). If the record does not have a valid built-in timestamps, the extractor returns the previously extracted valid timestamp from a record of the same topic partition as the current record as a timestamp estimation. In case that no timestamp can be estimated, it throws an exception. +> + +> +> Another built-in extractor is [WallclockTimestampExtractor](/41/javadoc/org/apache/kafka/streams/processor/WallclockTimestampExtractor.html). This extractor does not actually "extract" a timestamp from the consumed record but rather returns the current time in milliseconds from the system clock (think: `System.currentTimeMillis()`), which effectively means Streams will operate on the basis of the so-called **processing-time** of events. +> +> You can also provide your own timestamp extractors, for instance to retrieve timestamps embedded in the payload of messages. If you cannot extract a valid timestamp, you can either throw an exception, return a negative timestamp, or estimate a timestamp. Returning a negative timestamp will result in data loss - the corresponding record will not be processed but silently dropped. If you want to estimate a new timestamp, you can use the value provided via `previousTimestamp` (i.e., a Kafka Streams timestamp estimation). Here is an example of a custom `TimestampExtractor` implementation: +> +> +> import org.apache.kafka.clients.consumer.ConsumerRecord; +> import org.apache.kafka.streams.processor.TimestampExtractor; +> +> // Extracts the embedded timestamp of a record (giving you "event-time" semantics). +> public class MyEventTimeExtractor implements TimestampExtractor { +> +> @Override +> public long extract(final ConsumerRecord record, final long previousTimestamp) { +> // `Foo` is your own custom class, which we assume has a method that returns +> // the embedded timestamp (milliseconds since midnight, January 1, 1970 UTC). +> long timestamp = -1; +> final Foo myPojo = (Foo) record.value(); +> if (myPojo != null) { +> timestamp = myPojo.getTimestampInMillis(); +> } +> if (timestamp < 0) { +> // Invalid timestamp! Attempt to estimate a new timestamp, +> // otherwise fall back to wall-clock time (processing-time). +> if (previousTimestamp >= 0) { +> return previousTimestamp; +> } else { +> return System.currentTimeMillis(); +> } +> } +> } +> +> } +> +> You would then define the custom timestamp extractor in your Streams configuration as follows: +> +> +> import java.util.Properties; +> import org.apache.kafka.streams.StreamsConfig; +> +> Properties streamsConfiguration = new Properties(); +> streamsConfiguration.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, MyEventTimeExtractor.class); + +## default.key.serde + +> The default Serializer/Deserializer class for record keys, null unless set by user. Serialization and deserialization in Kafka Streams happens whenever data needs to be materialized, for example: +> +> * Whenever data is read from or written to a _Kafka topic_ (e.g., via the `StreamsBuilder#stream()` and `KStream#to()` methods). +> * Whenever data is read from or written to a _state store_. +> + +> +> This is discussed in more detail in [Data types and serialization](datatypes.html#streams-developer-guide-serdes). + +## default.value.serde + +> The default Serializer/Deserializer class for record values, null unless set by user. Serialization and deserialization in Kafka Streams happens whenever data needs to be materialized, for example: +> +> * Whenever data is read from or written to a _Kafka topic_ (e.g., via the `StreamsBuilder#stream()` and `KStream#to()` methods). +> * Whenever data is read from or written to a _state store_. +> + +> +> This is discussed in more detail in [Data types and serialization](datatypes.html#streams-developer-guide-serdes). + +## ensure.explicit.internal.resource.naming + +> Whether to enforce explicit naming for all internal resources of the topology, including internal topics (e.g., changelog and repartition topics) and their associated state stores. When enabled, the application will refuse to start if any internal resource has an auto-generated name. + +## group.protocol + +> The group protocol used by the Kafka Streams client used for coordination. It determines how the client will communicate with the Kafka brokers and other clients in the same group. The default value is `"classic"`, which is the classic consumer group protocol. Can be set to `"streams"` (requires broker-side enablement) to enable the new Kafka Streams group protocol. Note that the "streams" rebalance protocol is an Early Access feature and should not be used in production. + +## rack.aware.assignment.non_overlap_cost + +> This configuration sets the cost of moving a task from the original assignment computed either by `StickyTaskAssignor` or `HighAvailabilityTaskAssignor`. Together with `rack.aware.assignment.traffic_cost`, they control whether the optimizer favors minimizing cross rack traffic or minimizing the movement of tasks in the existing assignment. If this config is set to a larger value than `rack.aware.assignment.traffic_cost`, the optimizer will try to maintain the existing assignment computed by the task assignor. Note that the optimizer takes the ratio of these two configs into consideration of favoring maintaining existing assignment or minimizing traffic cost. For example, setting `rack.aware.assignment.non_overlap_cost` to 10 and `rack.aware.assignment.traffic_cost` to 1 is more likely to maintain existing assignment than setting `rack.aware.assignment.non_overlap_cost` to 100 and `rack.aware.assignment.traffic_cost` to 50. +> +> The default value is null which means default `non_overlap_cost` in different assignors will be used. In `StickyTaskAssignor`, it has a default value of 10 and `rack.aware.assignment.traffic_cost` has a default value of 1, which means maintaining stickiness is preferred in `StickyTaskAssignor`. In `HighAvailabilityTaskAssignor`, it has a default value of 1 and `rack.aware.assignment.traffic_cost` has a default value of 10, which means minimizing cross rack traffic is preferred in `HighAvailabilityTaskAssignor`. + +## rack.aware.assignment.strategy + +> This configuration sets the strategy Kafka Streams uses for rack aware task assignment so that cross traffic from broker to client can be reduced. This config will only take effect when `broker.rack` is set on the brokers and `client.rack` is set on Kafka Streams side. There are two settings for this config: +> +> * `none`. This is the default value which means rack aware task assignment will be disabled. +> * `min_traffic`. This settings means that the rack aware task assigner will compute an assignment which tries to minimize cross rack traffic. +> * `balance_subtopology`. This settings means that the rack aware task assigner will compute an assignment which will try to balance tasks from same subtopology to different clients and minimize cross rack traffic on top of that. +> + +> +> This config can be used together with rack.aware.assignment.non_overlap_cost and rack.aware.assignment.traffic_cost to balance reducing cross rack traffic and maintaining the existing assignment. + +## rack.aware.assignment.tags + +> This configuration sets a list of tag keys used to distribute standby replicas across Kafka Streams clients. When configured, Kafka Streams will make a best-effort to distribute the standby tasks over clients with different tag values. +> +> Tags for the Kafka Streams clients can be set via `client.tag.` prefix. Example: +> +> +> Client-1 | Client-2 +> _______________________________________________________________________ +> client.tag.zone: eu-central-1a | client.tag.zone: eu-central-1b +> client.tag.cluster: k8s-cluster1 | client.tag.cluster: k8s-cluster1 +> rack.aware.assignment.tags: zone,cluster | rack.aware.assignment.tags: zone,cluster +> +> +> Client-3 | Client-4 +> _______________________________________________________________________ +> client.tag.zone: eu-central-1a | client.tag.zone: eu-central-1b +> client.tag.cluster: k8s-cluster2 | client.tag.cluster: k8s-cluster2 +> rack.aware.assignment.tags: zone,cluster | rack.aware.assignment.tags: zone,cluster +> +> In the above example, we have four Kafka Streams clients across two zones (`eu-central-1a`, `eu-central-1b`) and across two clusters (`k8s-cluster1`, `k8s-cluster2`). For an active task located on `Client-1`, Kafka Streams will allocate a standby task on `Client-4`, since `Client-4` has a different `zone` and a different `cluster` than `Client-1`. + +## rack.aware.assignment.traffic_cost + +> This configuration sets the cost of cross rack traffic. Together with `rack.aware.assignment.non_overlap_cost`, they control whether the optimizer favors minimizing cross rack traffic or minimizing the movement of tasks in the existing assignment. If this config is set to a larger value than `rack.aware.assignment.non_overlap_cost`, the optimizer will try to compute an assignment which minimize the cross rack traffic. Note that the optimizer takes the ratio of these two configs into consideration of favoring maintaining existing assignment or minimizing traffic cost. For example, setting `rack.aware.assignment.traffic_cost` to 10 and `rack.aware.assignment.non_overlap_cost` to 1 is more likely to minimize cross rack traffic than setting `rack.aware.assignment.traffic_cost` to 100 and `rack.aware.assignment.non_overlap_cost` to 50. +> +> The default value is null which means default traffic cost in different assignors will be used. In `StickyTaskAssignor`, it has a default value of 1 and `rack.aware.assignment.non_overlap_cost` has a default value of 10. In `HighAvailabilityTaskAssignor`, it has a default value of 10 and `rack.aware.assignment.non_overlap_cost` has a default value of 1. + +## log.summary.interval.ms + +> This configuration controls the output interval for summary information. If greater or equal to 0, the summary log will be output according to the set time interval; If less than 0, summary output is disabled. + +## enable.metrics.push + +> Kafka Streams metrics can be pushed to the brokers similar to client metrics. Additionally, Kafka Streams allows to enable/disable metric pushing for each embedded client individually. However, pushing Kafka Streams metrics requires that `enable.metric.push` is enabled on the main-consumer and admin client. + +## max.task.idle.ms + +> This configuration controls how long Streams will wait to fetch data in order to provide in-order processing semantics. +> +> When processing a task that has multiple input partitions (as in a join or merge), Streams needs to choose which partition to process the next record from. When all input partitions have locally buffered data, Streams picks the partition whose next record has the lowest timestamp. This has the desirable effect of collating the input partitions in timestamp order, which is generally what you want in a streaming join or merge. However, when Streams does not have any data buffered locally for one of the partitions, it does not know whether the next record for that partition will have a lower or higher timestamp than the remaining partitions' records. +> +> There are two cases to consider: either there is data in that partition on the broker that Streams has not fetched yet, or Streams is fully caught up with that partition on the broker, and the producers simply haven't produced any new records since Streams polled the last batch. +> +> The default value of `0` causes Streams to delay processing a task when it detects that it has no locally buffered data for a partition, but there is data available on the brokers. Specifically, when there is an empty partition in the local buffer, but Streams has a non-zero lag for that partition. However, as soon as Streams catches up to the broker, it will continue processing, even if there is no data in one of the partitions. That is, it will not wait for new data to be _produced_. This default is designed to sacrifice some throughput in exchange for intuitively correct join semantics. +> +> Any config value greater than zero indicates the number of _extra_ milliseconds that Streams will wait if it has a caught-up but empty partition. In other words, this is the amount of time to wait for new data to be produced to the input partitions to ensure in-order processing of data in the event of a slow producer. +> +> The config value of `-1` indicates that Streams will never wait to buffer empty partitions before choosing the next record by timestamp, which achieves maximum throughput at the expense of introducing out-of-order processing. + +## max.warmup.replicas + +> The maximum number of warmup replicas (extra standbys beyond the configured `num.standbys`) that can be assigned at once for the purpose of keeping the task available on one instance while it is warming up on another instance it has been reassigned to. Used to throttle how much extra broker traffic and cluster state can be used for high availability. Increasing this will allow Streams to warm up more tasks at once, speeding up the time for the reassigned warmups to restore sufficient state for them to be transitioned to active tasks. Must be at least 1. +> +> Note that one warmup replica corresponds to one [Stream Task](/41/streams/architecture#streams_architecture_tasks). Furthermore, note that each warmup task can only be promoted to an active task during a rebalance (normally during a so-called probing rebalance, which occur at a frequency specified by the `probing.rebalance.interval.ms` config). This means that the maximum rate at which active tasks can be migrated from one Kafka Streams instance to another instance can be determined by (`max.warmup.replicas` / `probing.rebalance.interval.ms`). + +## num.standby.replicas + +> The number of standby replicas. Standby replicas are shadow copies of local state stores. Kafka Streams attempts to create the specified number of replicas per store and keep them up to date as long as there are enough instances running. Standby replicas are used to minimize the latency of task failover. A task that was previously running on a failed instance is preferred to restart on an instance that has standby replicas so that the local state store restoration process from its changelog can be minimized. Details about how Kafka Streams makes use of the standby replicas to minimize the cost of resuming tasks on failover can be found in the [State](../architecture.html#streams_architecture_state) section. +> +> Recommendation: +> Increase the number of standbys to 1 to get instant fail-over, i.e., high-availability. Increasing the number of standbys requires more client-side storage space. For example, with 1 standby, 2x space is required. +> +> Note: +> If you enable n standby tasks, you need to provision n+1 `KafkaStreams` instances. + +## num.stream.threads + +> This specifies the number of stream threads in an instance of the Kafka Streams application. The stream processing code runs in these thread. For more information about Kafka Streams threading model, see [Threading Model](../architecture.html#streams_architecture_threads). + +## probing.rebalance.interval.ms + +> The maximum time to wait before triggering a rebalance to probe for warmup replicas that have restored enough to be considered caught up. Streams will only assign stateful active tasks to instances that are caught up and within the acceptable.recovery.lag, if any exist. Probing rebalances are used to query the latest total lag of warmup replicas and transition them to active tasks if ready. They will continue to be triggered as long as there are warmup tasks, and until the assignment is balanced. Must be at least 1 minute. + +## processing.exception.handler + +> The processing exception handler allows you to manage exceptions triggered during the processing of a record. The implemented exception handler needs to return a `FAIL` or `CONTINUE` depending on the record and the exception thrown. Returning `FAIL` will signal that Streams should shut down and `CONTINUE` will signal that Streams should ignore the issue and continue processing. The following library built-in exception handlers are available: +> +> * [LogAndContinueProcessingExceptionHandler](/41/javadoc/org/apache/kafka/streams/errors/LogAndContinueProcessingExceptionHandler.html): This handler logs the processing exception and then signals the processing pipeline to continue processing more records. This log-and-skip strategy allows Kafka Streams to make progress instead of failing if there are records that fail to be processed. +> * [LogAndFailProcessingExceptionHandler](/41/javadoc/org/apache/kafka/streams/errors/LogAndFailProcessingExceptionHandler.html). This handler logs the processing exception and then signals the processing pipeline to stop processing more records. +> + +> +> You can also provide your own customized exception handler besides the library provided ones to meet your needs. For example, you can choose to forward corrupt records into a quarantine topic (think: a "dead letter queue") for further processing. To do this, use the Producer API to write a corrupted record directly to the quarantine topic. To be more concrete, you can create a separate `KafkaProducer` object outside the Streams client, and pass in this object as well as the dead letter queue topic name into the `Properties` map, which then can be retrieved from the `configure` function call. The drawback of this approach is that "manual" writes are side effects that are invisible to the Kafka Streams runtime library, so they do not benefit from the end-to-end processing guarantees of the Streams API: +> +> +> public class SendToDeadLetterQueueExceptionHandler implements ProcessingExceptionHandler { +> KafkaProducer dlqProducer; +> String dlqTopic; +> +> @Override +> public ProcessingHandlerResponse handle(final ErrorHandlerContext context, +> final Record record, +> final Exception exception) { +> +> log.warn("Exception caught during message processing, sending to the dead queue topic; " + +> "processor node: {}, taskId: {}, source topic: {}, source partition: {}, source offset: {}", +> context.processorNodeId(), context.taskId(), context.topic(), context.partition(), context.offset(), +> exception); +> +> dlqProducer.send(new ProducerRecord<>(dlqTopic, null, record.timestamp(), (byte[]) record.key(), (byte[]) record.value(), record.headers())); +> +> return ProcessingHandlerResponse.CONTINUE; +> } +> +> @Override +> public void configure(final Map configs) { +> dlqProducer = .. // get a producer from the configs map +> dlqTopic = .. // get the topic name from the configs map +> } +> } + +## processing.guarantee + +> The processing guarantee that should be used. Possible values are `"at_least_once"` (default) and `"exactly_once_v2"` (for EOS version 2). Deprecated config options are `"exactly_once"` (for EOS alpha), and `"exactly_once_beta"` (for EOS version 2). Using `"exactly_once_v2"` (or the deprecated `"exactly_once_beta"`) requires broker version 2.5 or newer, while using the deprecated `"exactly_once"` requires broker version 0.11.0 or newer. Note that if exactly-once processing is enabled, the default for parameter `commit.interval.ms` changes to 100ms. Additionally, consumers are configured with `isolation.level="read_committed"` and producers are configured with `enable.idempotence=true` per default. Note that by default exactly-once processing requires a cluster of at least three brokers what is the recommended setting for production. For development, you can change this configuration by adjusting broker setting `transaction.state.log.replication.factor` and `transaction.state.log.min.isr` to the number of brokers you want to use. For more details see [Processing Guarantees](../core-concepts#streams_processing_guarantee). +> +> Recommendation: +> While it is technically possible to use EOS with any replication factor, using a replication factor lower than 3 effectively voids EOS. Thus it is strongly recommended to use a replication factor of 3 (together with `min.in.sync.replicas=2`). This recommendation applies to all topics (i.e. `__transaction_state`, `__consumer_offsets`, Kafka Streams internal topics, and user topics). + +## processor.wrapper.class + +> A class or class name implementing the `ProcessorWrapper` interface. This feature allows you to wrap any of the processors in the compiled topology, including both custom processor implementations and those created by Streams for DSL operators. This can be useful for logging or tracing implementations since it allows access to the otherwise-hidden processor context for DSL operators, and also allows for injecting additional debugging information to an entire application topology with just a single config. +> +> IMPORTANT: This MUST be passed in when creating the topology, and will not be applied unless passed in to the appropriate topology-building constructor. You should use the `StreamsBuilder#new(TopologyConfig)` constructor for DSL applications, and the `Topology#new(TopologyConfig)` constructor for PAPI applications. + +## replication.factor + +> This specifies the replication factor of internal topics that Kafka Streams creates when local states are used or a stream is repartitioned for aggregation. Replication is important for fault tolerance. Without replication even a single broker failure may prevent progress of the stream processing application. It is recommended to use a similar replication factor as source topics. +> +> Recommendation: +> Increase the replication factor to 3 to ensure that the internal Kafka Streams topic can tolerate up to 2 broker failures. Note that you will require more storage space as well (3x with the replication factor of 3). + +## rocksdb.config.setter + +> The RocksDB configuration. Kafka Streams uses RocksDB as the default storage engine for persistent stores. To change the default configuration for RocksDB, you can implement `RocksDBConfigSetter` and provide your custom class via [rocksdb.config.setter](/41/javadoc/org/apache/kafka/streams/state/RocksDBConfigSetter.html). +> +> Here is an example that adjusts the memory size consumed by RocksDB. +> +> +> public static class CustomRocksDBConfig implements RocksDBConfigSetter { +> // This object should be a member variable so it can be closed in RocksDBConfigSetter#close. +> private org.rocksdb.Cache cache = new org.rocksdb.LRUCache(16 * 1024L * 1024L); +> +> @Override +> public void setConfig(final String storeName, final Options options, final Map configs) { +> // See #1 below. +> BlockBasedTableConfig tableConfig = (BlockBasedTableConfig) options.tableFormatConfig(); +> tableConfig.setBlockCache(cache); +> // See #2 below. +> tableConfig.setBlockSize(16 * 1024L); +> // See #3 below. +> tableConfig.setCacheIndexAndFilterBlocks(true); +> options.setTableFormatConfig(tableConfig); +> // See #4 below. +> options.setMaxWriteBufferNumber(2); +> } +> +> @Override +> public void close(final String storeName, final Options options) { +> // See #5 below. +> cache.close(); +> } +> } +> +> Properties streamsSettings = new Properties(); +> streamsConfig.put(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG, CustomRocksDBConfig.class); +> +> Notes for example: +> +> +> 1. `BlockBasedTableConfig tableConfig = (BlockBasedTableConfig) options.tableFormatConfig();` Get a reference to the existing table config rather than create a new one, so you don't accidentally overwrite defaults such as the `BloomFilter`, which is an important optimization. +> 2. `tableConfig.setBlockSize(16 * 1024L);` Modify the default [block size](https://github.com/apache/kafka/blob/2.3/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBStore.java#L79) per these instructions from the [RocksDB GitHub](https://github.com/facebook/rocksdb/wiki/Memory-usage-in-RocksDB#indexes-and-filter-blocks). +> 3. `tableConfig.setCacheIndexAndFilterBlocks(true);` Do not let the index and filter blocks grow unbounded. For more information, see the [RocksDB GitHub](https://github.com/facebook/rocksdb/wiki/Block-Cache#caching-index-and-filter-blocks). +> 4. `options.setMaxWriteBufferNumber(2);` See the advanced options in the [RocksDB GitHub](https://github.com/facebook/rocksdb/blob/8dee8cad9ee6b70fd6e1a5989a8156650a70c04f/include/rocksdb/advanced_options.h#L103). +> 5. `cache.close();` To avoid memory leaks, you must close any objects you constructed that extend org.rocksdb.RocksObject. See [RocksJava docs](https://github.com/facebook/rocksdb/wiki/RocksJava-Basics#memory-management) for more details. +> + + + #### state.dir + +> The state directory. Kafka Streams persists local states under the state directory. Each application has a subdirectory on its hosting machine that is located under the state directory. The name of the subdirectory is the application ID. The state stores associated with the application are created under this subdirectory. When running multiple instances of the same application on a single machine, this path must be unique for each such instance. + + #### task.assignor.class + +> A task assignor class or class name implementing the `org.apache.kafka.streams.processor.assignment.TaskAssignor` interface. Defaults to the high-availability task assignor. One possible alternative implementation provided in Apache Kafka is the `org.apache.kafka.streams.processor.assignment.assignors.StickyTaskAssignor`, which was the default task assignor before KIP-441 and minimizes task movement at the cost of stateful task availability. Alternative implementations of the task assignment algorithm can be plugged into the application by implementing a custom `TaskAssignor` and setting this config to the name of the custom task assignor class. + + #### topology.optimization + +> A configuration telling Kafka Streams if it should optimize the topology and what optimizations to apply. Acceptable values are: `StreamsConfig.NO_OPTIMIZATION` (`none`), `StreamsConfig.OPTIMIZE` (`all`) or a comma separated list of specific optimizations: `StreamsConfig.REUSE_KTABLE_SOURCE_TOPICS` (`reuse.ktable.source.topics`), `StreamsConfig.MERGE_REPARTITION_TOPICS` (`merge.repartition.topics`), `StreamsConfig.SINGLE_STORE_SELF_JOIN` (`single.store.self.join`). + +We recommend listing specific optimizations in the config for production code so that the structure of your topology will not change unexpectedly during upgrades of the Streams library. + +These optimizations include moving/reducing repartition topics and reusing the source topic as the changelog for source KTables. These optimizations will save on network traffic and storage in Kafka without changing the semantics of your applications. Enabling them is recommended. + +Note that as of 2.3, you need to do two things to enable optimizations. In addition to setting this config to `StreamsConfig.OPTIMIZE`, you'll need to pass in your configuration properties when building your topology by using the overloaded `StreamsBuilder.build(Properties)` method. For example `KafkaStreams myStream = new KafkaStreams(streamsBuilder.build(properties), properties)`. + + #### upgrade.from + +> The version you are upgrading from. It is important to set this config when performing a rolling upgrade to certain versions, as described in the upgrade guide. You should set this config to the appropriate version before bouncing your instances and upgrading them to the newer version. Once everyone is on the newer version, you should remove this config and do a second rolling bounce. It is only necessary to set this config and follow the two-bounce upgrade path when upgrading from below version 2.0, or when upgrading to 2.4+ from any version lower than 2.4. + + ### Kafka consumers, producer and admin client configuration parameters + + You can specify parameters for the Kafka [consumers](/41/javadoc/org/apache/kafka/clients/consumer/package-summary.html), [producers](/41/javadoc/org/apache/kafka/clients/producer/package-summary.html), and [admin client](/41/javadoc/org/apache/kafka/kafka/clients/admin/package-summary.html) that are used internally. The consumer, producer and admin client settings are defined by specifying parameters in a `StreamsConfig` instance. + + In this example, the Kafka [consumer session timeout](/41/javadoc/org/apache/kafka/clients/consumer/ConsumerConfig.html#SESSION_TIMEOUT_MS_CONFIG) is configured to be 60000 milliseconds in the Streams settings: + + + Properties streamsSettings = new Properties(); + // Example of a "normal" setting for Kafka Streams + streamsSettings.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka-broker-01:9092"); + // Customize the Kafka consumer settings of your Streams application + streamsSettings.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 60000); + + #### Naming + + Some consumer, producer and admin client configuration parameters use the same parameter name, and Kafka Streams library itself also uses some parameters that share the same name with its embedded client. For example, `send.buffer.bytes` and `receive.buffer.bytes` are used to configure TCP buffers; `request.timeout.ms` and `retry.backoff.ms` control retries for client request. You can avoid duplicate names by prefix parameter names with `consumer.`, `producer.`, or `admin.` (e.g., `consumer.send.buffer.bytes` and `producer.send.buffer.bytes`). + + + Properties streamsSettings = new Properties(); + // same value for consumer, producer, and admin client + streamsSettings.put("PARAMETER_NAME", "value"); + // different values for consumer and producer + streamsSettings.put("consumer.PARAMETER_NAME", "consumer-value"); + streamsSettings.put("producer.PARAMETER_NAME", "producer-value"); + streamsSettings.put("admin.PARAMETER_NAME", "admin-value"); + // alternatively, you can use + streamsSettings.put(StreamsConfig.consumerPrefix("PARAMETER_NAME"), "consumer-value"); + streamsSettings.put(StreamsConfig.producerPrefix("PARAMETER_NAME"), "producer-value"); + streamsSettings.put(StreamsConfig.adminClientPrefix("PARAMETER_NAME"), "admin-value"); + + You could further separate consumer configuration by adding different prefixes: + + * `main.consumer.` for main consumer which is the default consumer of stream source. + * `restore.consumer.` for restore consumer which is in charge of state store recovery. + * `global.consumer.` for global consumer which is used in global KTable construction. + + + + For example, if you only want to set restore consumer config without touching other consumers' settings, you could simply use `restore.consumer.` to set the config. + + + Properties streamsSettings = new Properties(); + // same config value for all consumer types + streamsSettings.put("consumer.PARAMETER_NAME", "general-consumer-value"); + // set a different restore consumer config. This would make restore consumer take restore-consumer-value, + // while main consumer and global consumer stay with general-consumer-value + streamsSettings.put("restore.consumer.PARAMETER_NAME", "restore-consumer-value"); + // alternatively, you can use + streamsSettings.put(StreamsConfig.restoreConsumerPrefix("PARAMETER_NAME"), "restore-consumer-value"); + + Same applied to `main.consumer.` and `main.consumer.`, if you only want to specify one consumer type config. + + Additionally, to configure the internal repartition/changelog topics, you could use the `topic.` prefix, followed by any of the standard topic configs. + + + Properties streamsSettings = new Properties(); + // Override default for both changelog and repartition topics + streamsSettings.put("topic.PARAMETER_NAME", "topic-value"); + // alternatively, you can use + streamsSettings.put(StreamsConfig.topicPrefix("PARAMETER_NAME"), "topic-value"); + + #### Default Values + + Kafka Streams uses different default values for some of the underlying client configs, which are summarized below. For detailed descriptions of these configs, see [Producer Configs](/41/documentation.html#producerconfigs) and [Consumer Configs](/41/documentation.html#consumerconfigs). + + Parameter Name | Corresponding Client | Streams Default + ---|---|--- + auto.offset.reset | Consumer | `earliest` + linger.ms | Producer | `100` + max.poll.records | Consumer | `1000` + client.id | - | `-` + + If EOS is enabled, other parameters have the following default values. + + Parameter Name | Corresponding Client | Streams Default + ---|---|--- + transaction.timeout.ms | Producer | `10000` + delivery.timeout.ms | Producer | `Integer.MAX_VALUE` + + ### Parameters controlled by Kafka Streams + + Some parameters are not configurable by the user. If you supply a value that is different from the default value, your value is ignored. Below is a list of some of these parameters. + + Parameter Name | Corresponding Client | Streams Default + ---|---|--- + allow.auto.create.topics | Consumer | `false` + group.id | Consumer | `application.id` + enable.auto.commit | Consumer | `false` + partition.assignment.strategy | Consumer | `StreamsPartitionAssignor` + + If EOS is enabled, other parameters are set with the following values. + + Parameter Name | Corresponding Client | Streams Default + ---|---|--- + isolation.level | Consumer | `READ_COMMITTED` + enable.idempotence | Producer | `true` + + ### client.id + + Kafka Streams uses the `client.id` parameter to compute derived client IDs for internal clients. If you don't set `client.id`, Kafka Streams sets it to `-`. + + This value will be used to derive the client IDs of the following internal clients. + + Client | client.id + ---|--- + Consumer | `-StreamThread--consumer` + Restore consumer | `-StreamThread--restore-consumer` + Global consumer | `-global-consumer` + Producer | **For Non-EOS and EOS v2:**` -StreamThread--producer` + **For EOS v1:**` -StreamThread---producer` + Admin | `-admin` + + #### enable.auto.commit + +> The consumer auto commit. To guarantee at-least-once processing semantics and turn off auto commits, Kafka Streams overrides this consumer config value to `false`. Consumers will only commit explicitly via _commitSync_ calls when the Kafka Streams library or a user decides to commit the current processing state. + + [Previous](/41/streams/developer-guide/write-streams) [Next](/41/streams/developer-guide/dsl-api) + + * [Documentation](/documentation) + * [Kafka Streams](/streams) + * [Developer Guide](/streams/developer-guide/) + + diff --git a/content/en/41/streams/developer-guide/datatypes.md b/content/en/41/streams/developer-guide/datatypes.md new file mode 100644 index 000000000..eb6318743 --- /dev/null +++ b/content/en/41/streams/developer-guide/datatypes.md @@ -0,0 +1,225 @@ +--- +title: Data Types and Serialization +description: +weight: 6 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Data Types and Serialization + +Every Kafka Streams application must provide Serdes (Serializer/Deserializer) for the data types of record keys and record values (e.g. `java.lang.String`) to materialize the data when necessary. Operations that require such Serdes information include: `stream()`, `table()`, `to()`, `repartition()`, `groupByKey()`, `groupBy()`. + +You can provide Serdes by using either of these methods, but you must use at least one: + + * By setting default Serdes in the `java.util.Properties` config instance. + * By specifying explicit Serdes when calling the appropriate API methods, thus overriding the defaults. + + + +**Table of Contents** + + * Configuring Serdes + * Overriding default Serdes + * Available Serdes + * Primitive and basic types + * JSON + * Window Serdes + * Implementing custom serdes + * Kafka Streams DSL for Scala Implicit Serdes + + + +# Configuring Serdes + +Serdes specified in the Streams configuration are used as the default in your Kafka Streams application. Because this config's default is null, you must either set a default Serde by using this configuration or pass in Serdes explicitly, as described below. + + + import org.apache.kafka.common.serialization.Serdes; + import org.apache.kafka.streams.StreamsConfig; + + Properties settings = new Properties(); + // Default serde for keys of data records (here: built-in serde for String type) + settings.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); + // Default serde for values of data records (here: built-in serde for Long type) + settings.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.Long().getClass().getName()); + +# Overriding default Serdes + +You can also specify Serdes explicitly by passing them to the appropriate API methods, which overrides the default serde settings: + + + import org.apache.kafka.common.serialization.Serde; + import org.apache.kafka.common.serialization.Serdes; + + final Serde stringSerde = Serdes.String(); + final Serde longSerde = Serdes.Long(); + + // The stream userCountByRegion has type `String` for record keys (for region) + // and type `Long` for record values (for user counts). + KStream userCountByRegion = ...; + userCountByRegion.to("RegionCountsTopic", Produced.with(stringSerde, longSerde)); + +If you want to override serdes selectively, i.e., keep the defaults for some fields, then don't specify the serde whenever you want to leverage the default settings: + + + import org.apache.kafka.common.serialization.Serde; + import org.apache.kafka.common.serialization.Serdes; + + // Use the default serializer for record keys (here: region as String) by not specifying the key serde, + // but override the default serializer for record values (here: userCount as Long). + final Serde longSerde = Serdes.Long(); + KStream userCountByRegion = ...; + userCountByRegion.to("RegionCountsTopic", Produced.valueSerde(Serdes.Long())); + +If some of your incoming records are corrupted or ill-formatted, they will cause the deserializer class to report an error. Since 1.0.x we have introduced an `DeserializationExceptionHandler` interface which allows you to customize how to handle such records. The customized implementation of the interface can be specified via the `StreamsConfig`. For more details, please feel free to read the [Configuring a Streams Application](config-streams.html#default-deserialization-exception-handler) section. + +# Available Serdes + +# Primitive and basic types + +Apache Kafka includes several built-in serde implementations for Java primitives and basic types such as `byte[]` in its `kafka-clients` Maven artifact: + + + + org.apache.kafka + kafka-clients + 4.1.0 + + +This artifact provides the following serde implementations under the package [org.apache.kafka.common.serialization](https://github.com/apache/kafka/blob/4.1/clients/src/main/java/org/apache/kafka/common/serialization), which you can leverage when e.g., defining default serializers in your Streams configuration. + +Data type | Serde +---|--- +byte[] | `Serdes.ByteArray()`, `Serdes.Bytes()` (see tip below) +ByteBuffer | `Serdes.ByteBuffer()` +Double | `Serdes.Double()` +Integer | `Serdes.Integer()` +Long | `Serdes.Long()` +String | `Serdes.String()` +UUID | `Serdes.UUID()` +Void | `Serdes.Void()` +List | `Serdes.ListSerde()` +Boolean | `Serdes.Boolean()` + +**Tip** + +[Bytes](https://github.com/apache/kafka/blob/4.1/clients/src/main/java/org/apache/kafka/common/utils/Bytes.java) is a wrapper for Java's `byte[]` (byte array) that supports proper equality and ordering semantics. You may want to consider using `Bytes` instead of `byte[]` in your applications. + +# JSON + +The Kafka Streams code examples also include a basic serde implementation for JSON: + + * [PageViewTypedDemo](https://github.com/apache/kafka/blob/4.1/streams/examples/src/main/java/org/apache/kafka/streams/examples/pageview/PageViewTypedDemo.java#L83) + + + +As shown in the example, you can use JSONSerdes inner classes `Serdes.serdeFrom(, )` to construct JSON compatible serializers and deserializers. + +# Window Serdes + +Apache Kafka Streams includes serde implementations for windowed types in its `kafka-streams` Maven artifact: + + + + org.apache.kafka + kafka-streams + 4.1.0 + + +This artifact provides the following windowed serde implementations under the package [org.apache.kafka.streams.kstream](https://github.com/apache/kafka/blob/4.1/streams/src/main/java/org/apache/kafka/streams/kstream): + +**Serdes:** + + * `WindowedSerdes.TimeWindowedSerde` + * `WindowedSerdes.SessionWindowedSerde` + + + +**Serializers:** + + * `TimeWindowedSerializer` + * `SessionWindowedSerializer` + + + +**Deserializers:** + + * `TimeWindowedDeserializer` + * `SessionWindowedDeserializer` + + + +## Usage in Code + +When using windowed serdes in your application code, you typically create instances via constructors or factory methods: + + + // Time windowed serde - using factory method + Serde> timeWindowedSerde = + WindowedSerdes.timeWindowedSerdeFrom(String.class, 500L); + + // Time windowed serde - using constructor + Serde> timeWindowedSerde2 = + new WindowedSerdes.TimeWindowedSerde<>(Serdes.String(), 500L); + + // Session windowed serde - using factory method + Serde> sessionWindowedSerde = + WindowedSerdes.sessionWindowedSerdeFrom(String.class); + + // Session windowed serde - using constructor + Serde> sessionWindowedSerde2 = + new WindowedSerdes.SessionWindowedSerde<>(Serdes.String()); + + // Using individual serializers/deserializers + TimeWindowedSerializer serializer = new TimeWindowedSerializer<>(Serdes.String().serializer()); + TimeWindowedDeserializer deserializer = new TimeWindowedDeserializer<>(Serdes.String().deserializer(), 500L); + +## Usage in Command Line + +When using command-line tools (like `bin/kafka-console-consumer.sh`), you can configure windowed deserializers by passing the inner class and window size via configuration properties. The property names use a prefix pattern: + + + # Time windowed deserializer configuration + --property print.key=true \ + --property key.deserializer=org.apache.kafka.streams.kstream.TimeWindowedDeserializer \ + --property key.deserializer.windowed.inner.deserializer.class=org.apache.kafka.common.serialization.StringDeserializer \ + --property key.deserializer.window.size.ms=500 + + # Session windowed deserializer configuration + --property print.key=true \ + --property key.deserializer=org.apache.kafka.streams.kstream.SessionWindowedDeserializer \ + --property key.deserializer.windowed.inner.deserializer.class=org.apache.kafka.common.serialization.StringDeserializer + +## Deprecated Configs + +The following `StreamsConfig` parameters are deprecated in favor of passing parameters directly to serializer/deserializer constructors: + + * `StreamsConfig.WINDOWED_INNER_CLASS_SERDE` is deprecated in favor of `TimeWindowedSerializer.WINDOWED_INNER_SERIALIZER_CLASS` and `TimeWindowedDeserializer.WINDOWED_INNER_DESERIALIZER_CLASS` + * `StreamsConfig.WINDOW_SIZE_MS_CONFIG` is deprecated in favor of `TimeWindowedDeserializer.WINDOW_SIZE_MS_CONFIG` + + + +# Implementing custom Serdes + +If you need to implement custom Serdes, your best starting point is to take a look at the source code references of existing Serdes (see previous section). Typically, your workflow will be similar to: + + 1. Write a _serializer_ for your data type `T` by implementing [org.apache.kafka.common.serialization.Serializer](https://github.com/apache/kafka/blob/4.1/clients/src/main/java/org/apache/kafka/common/serialization/Serializer.java). + 2. Write a _deserializer_ for `T` by implementing [org.apache.kafka.common.serialization.Deserializer](https://github.com/apache/kafka/blob/4.1/clients/src/main/java/org/apache/kafka/common/serialization/Deserializer.java). + 3. Write a _serde_ for `T` by implementing [org.apache.kafka.common.serialization.Serde](https://github.com/apache/kafka/blob/4.1/clients/src/main/java/org/apache/kafka/common/serialization/Serde.java), which you either do manually (see existing Serdes in the previous section) or by leveraging helper functions in [Serdes](https://github.com/apache/kafka/blob/4.1/clients/src/main/java/org/apache/kafka/common/serialization/Serdes.java) such as `Serdes.serdeFrom(Serializer, Deserializer)`. Note that you will need to implement your own class (that has no generic types) if you want to use your custom serde in the configuration provided to `KafkaStreams`. If your serde class has generic types or you use `Serdes.serdeFrom(Serializer, Deserializer)`, you can pass your serde only via methods calls (for example `builder.stream("topicName", Consumed.with(...))`). + + + +# Kafka Streams DSL for Scala Implicit Serdes[](scala-dsl-serdes "Permalink to this headline") + +When using the [Kafka Streams DSL for Scala](dsl-api.html#scala-dsl) you're not required to configure a default Serdes. In fact, it's not supported. Serdes are instead provided implicitly by default implementations for common primitive datatypes. See the [Implicit Serdes](dsl-api.html#scala-dsl-implicit-serdes) and [User-Defined Serdes](dsl-api.html#scala-dsl-user-defined-serdes) sections in the DSL API documentation for details + +[Previous](/41/streams/developer-guide/processor-api) [Next](/41/streams/developer-guide/testing) + + * [Documentation](/documentation) + * [Kafka Streams](/streams) + * [Developer Guide](/streams/developer-guide/) + + diff --git a/content/en/41/streams/developer-guide/dsl-api.md b/content/en/41/streams/developer-guide/dsl-api.md new file mode 100644 index 000000000..bff511e03 --- /dev/null +++ b/content/en/41/streams/developer-guide/dsl-api.md @@ -0,0 +1,2736 @@ +--- +title: Streams DSL +description: +weight: 3 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Streams DSL + +The Kafka Streams DSL (Domain Specific Language) is built on top of the Streams Processor API. It is the recommended for most users, especially beginners. Most data processing operations can be expressed in just a few lines of DSL code. + +**Table of Contents** + + * Overview + * Creating source streams from Kafka + * Transform a stream + * Stateless transformations + * Stateful transformations + * Aggregating + * Joining + * Join co-partitioning requirements + * KStream-KStream Join + * KTable-KTable Equi-Join + * KTable-KTable Foreign-Key Join + * KStream-KTable Join + * KStream-GlobalKTable Join + * Windowing + * Hopping time windows + * Tumbling time windows + * Sliding time windows + * Session Windows + * Window Final Results + * Applying processors (Processor API integration) + * Transformers removal and migration to processors + * Naming Operators in a Streams DSL application + * Controlling KTable update rate + * Using timestamp-based semantics for table processors + * Writing streams back to Kafka + * Testing a Streams application + * Kafka Streams DSL for Scala + * Sample Usage + * Implicit Serdes + * User-Defined Serdes + + + +# Overview + +In comparison to the [Processor API](processor-api.html#streams-developer-guide-processor-api), only the DSL supports: + + * Built-in abstractions for [streams and tables](../core-concepts.html#streams_concepts_duality) in the form of KStream, KTable, and GlobalKTable. Having first-class support for streams and tables is crucial because, in practice, most use cases require not just either streams or databases/tables, but a combination of both. For example, if your use case is to create a customer 360-degree view that is updated in real-time, what your application will be doing is transforming many input _streams_ of customer-related events into an output _table_ that contains a continuously updated 360-degree view of your customers. + * Declarative, functional programming style with stateless transformations (e.g. `map` and `filter`) as well as stateful transformations such as aggregations (e.g. `count` and `reduce`), joins (e.g. `leftJoin`), and windowing (e.g. session windows). + + + +With the DSL, you can define [processor topologies](../core-concepts.html#streams_topology) (i.e., the logical processing plan) in your application. The steps to accomplish this are: + + 1. Specify one or more input streams that are read from Kafka topics. + 2. Compose transformations on these streams. + 3. Write the resulting output streams back to Kafka topics, or expose the processing results of your application directly to other applications through [interactive queries](interactive-queries.html#streams-developer-guide-interactive-queries) (e.g., via a REST API). + + + +After the application is run, the defined processor topologies are continuously executed (i.e., the processing plan is put into action). A step-by-step guide for writing a stream processing application using the DSL is provided below. + +For a complete list of available API functionality, see also the [Streams](/41/javadoc/org/apache/kafka/streams/package-summary.html) API docs. + +## KStream + +Only the **Kafka Streams DSL** has the notion of a `KStream`. + +A **KStream** is an abstraction of a **record stream** , where each data record represents a self-contained datum in the unbounded data set. Using the table analogy, data records in a record stream are always interpreted as an "INSERT" \-- think: adding more entries to an append-only ledger -- because no record replaces an existing row with the same key. Examples are a credit card transaction, a page view event, or a server log entry. + +To illustrate, let's imagine the following two data records are being sent to the stream: + +("alice", 1) --> ("alice", 3) + +If your stream processing application were to sum the values per user, it would return `4` for `alice`. Why? Because the second data record would not be considered an update of the previous record. Compare this behavior of KStream to `KTable` below, which would return `3` for `alice`. + +## KTable + +Only the **Kafka Streams DSL** has the notion of a `KTable`. + +A **KTable** is an abstraction of a **changelog stream** , where each data record represents an update. More precisely, the value in a data record is interpreted as an "UPDATE" of the last value for the same record key, if any (if a corresponding key doesn't exist yet, the update will be considered an INSERT). Using the table analogy, a data record in a changelog stream is interpreted as an UPSERT aka INSERT/UPDATE because any existing row with the same key is overwritten. Also, `null` values are interpreted in a special way: a record with a `null` value represents a "DELETE" or tombstone for the record's key. + +To illustrate, let's imagine the following two data records are being sent to the stream: + +("alice", 1) --> ("alice", 3) + +If your stream processing application were to sum the values per user, it would return `3` for `alice`. Why? Because the second data record would be considered an update of the previous record. + +**Effects of Kafka's log compaction:** Another way of thinking about KStream and KTable is as follows: If you were to store a KTable into a Kafka topic, you'd probably want to enable Kafka's [log compaction](http://kafka.apache.org/documentation.html#compaction) feature, e.g. to save storage space. + +However, it would not be safe to enable log compaction in the case of a KStream because, as soon as log compaction would begin purging older data records of the same key, it would break the semantics of the data. To pick up the illustration example again, you'd suddenly get a `3` for `alice` instead of a `4` because log compaction would have removed the `("alice", 1)` data record. Hence log compaction is perfectly safe for a KTable (changelog stream) but it is a mistake for a KStream (record stream). + +We have already seen an example of a changelog stream in the section [streams and tables](../core-concepts.html#streams_concepts_duality). Another example are change data capture (CDC) records in the changelog of a relational database, representing which row in a database table was inserted, updated, or deleted. + +KTable also provides an ability to look up _current_ values of data records by keys. This table-lookup functionality is available through **join operations** (see also **Joining** in the Developer Guide) as well as through **Interactive Queries**. + +## GlobalKTable + +Only the **Kafka Streams DSL** has the notion of a **GlobalKTable**. + +Like a **KTable** , a **GlobalKTable** is an abstraction of a **changelog stream** , where each data record represents an update. + +A GlobalKTable differs from a KTable in the data that they are being populated with, i.e. which data from the underlying Kafka topic is being read into the respective table. Slightly simplified, imagine you have an input topic with 5 partitions. In your application, you want to read this topic into a table. Also, you want to run your application across 5 application instances for **maximum parallelism**. + + * If you read the input topic into a **KTable** , then the "local" KTable instance of each application instance will be populated with data **from only 1 partition** of the topic's 5 partitions. + * If you read the input topic into a **GlobalKTable** , then the local GlobalKTable instance of each application instance will be populated with data **from all partitions of the topic**. + + + +GlobalKTable provides the ability to look up _current_ values of data records by keys. This table-lookup functionality is available through `join operations`. Note that a GlobalKTable has **no** notion of time in contrast to a KTable. + +Benefits of global tables: + + * More convenient and/or efficient **joins** : Notably, global tables allow you to perform star joins, they support "foreign-key" lookups (i.e., you can lookup data in the table not just by record key, but also by data in the record values), and they are more efficient when chaining multiple joins. Also, when joining against a global table, the input data does not need to be **co-partitioned**. + * Can be used to "broadcast" information to all the running instances of your application. + + + +Downsides of global tables: + + * Increased local storage consumption compared to the (partitioned) KTable because the entire topic is tracked. + * Increased network and Kafka broker load compared to the (partitioned) KTable because the entire topic is read. + + + +# Creating source streams from Kafka + +You can easily read data from Kafka topics into your application. The following operations are supported. + +Reading from Kafka | Description +---|--- +**Stream** + + * _input topics_ -> KStream + +| Creates a KStream from the specified Kafka input topics and interprets the data as a record stream. A `KStream` represents a _partitioned_ record stream. [(details)](/41/javadoc/org/apache/kafka/streams/StreamsBuilder.html#stream\(java.lang.String\)) In the case of a KStream, the local KStream instance of every application instance will be populated with data from only **a subset** of the partitions of the input topic. Collectively, across all application instances, all input topic partitions are read and processed. + + + import org.apache.kafka.common.serialization.Serdes; + import org.apache.kafka.streams.StreamsBuilder; + import org.apache.kafka.streams.kstream.KStream; + + StreamsBuilder builder = new StreamsBuilder(); + + KStream wordCounts = builder.stream( + "word-counts-input-topic", /* input topic */ + Consumed.with( + Serdes.String(), /* key serde */ + Serdes.Long() /* value serde */ + ); + +If you do not specify Serdes explicitly, the default Serdes from the [configuration](config-streams.html#streams-developer-guide-configuration) are used. You **must specify Serdes explicitly** if the key or value types of the records in the Kafka input topics do not match the configured default Serdes. For information about configuring default Serdes, available Serdes, and implementing your own custom Serdes see [Data Types and Serialization](datatypes.html#streams-developer-guide-serdes). Several variants of `stream` exist. For example, you can specify a regex pattern for input topics to read from (note that all matching topics will be part of the same input topic group, and the work will not be parallelized for different topics if subscribed to in this way). +**Table** + + * _input topic_ -> KTable + +| Reads the specified Kafka input topic into a KTable. The topic is interpreted as a changelog stream, where records with the same key are interpreted as UPSERT aka INSERT/UPDATE (when the record value is not `null`) or as DELETE (when the value is `null`) for that key. [(details)](/41/javadoc/org/apache/kafka/streams/StreamsBuilder.html#table-java.lang.String\(java.lang.String\)) In the case of a KTable, the local KTable instance of every application instance will be populated with data from only **a subset** of the partitions of the input topic. Collectively, across all application instances, all input topic partitions are read and processed. You must provide a name for the table (more precisely, for the internal [state store](../architecture.html#streams_architecture_state) that backs the table). This is required for supporting [interactive queries](interactive-queries.html#streams-developer-guide-interactive-queries) against the table. When a name is not provided the table will not be queryable and an internal name will be provided for the state store. If you do not specify Serdes explicitly, the default Serdes from the [configuration](config-streams.html#streams-developer-guide-configuration) are used. You **must specify Serdes explicitly** if the key or value types of the records in the Kafka input topics do not match the configured default Serdes. For information about configuring default Serdes, available Serdes, and implementing your own custom Serdes see [Data Types and Serialization](datatypes.html#streams-developer-guide-serdes). Several variants of `table` exist, for example to specify the `auto.offset.reset` policy to be used when reading from the input topic. +**Global Table** + + * _input topic_ -> GlobalKTable + +| Reads the specified Kafka input topic into a GlobalKTable. The topic is interpreted as a changelog stream, where records with the same key are interpreted as UPSERT aka INSERT/UPDATE (when the record value is not `null`) or as DELETE (when the value is `null`) for that key. [(details)](/41/javadoc/org/apache/kafka/streams/StreamsBuilder.html#globalTable-java.lang.String\(java.lang.String\)) In the case of a GlobalKTable, the local GlobalKTable instance of every application instance will be populated with data from **all** the partitions of the input topic. You must provide a name for the table (more precisely, for the internal [state store](../architecture.html#streams_architecture_state) that backs the table). This is required for supporting [interactive queries](interactive-queries.html#streams-developer-guide-interactive-queries) against the table. When a name is not provided the table will not be queryable and an internal name will be provided for the state store. + + + import org.apache.kafka.common.serialization.Serdes; + import org.apache.kafka.streams.StreamsBuilder; + import org.apache.kafka.streams.kstream.GlobalKTable; + + StreamsBuilder builder = new StreamsBuilder(); + + GlobalKTable wordCounts = builder.globalTable( + "word-counts-input-topic", + Materialized.>as( + "word-counts-global-store" /* table/store name */) + .withKeySerde(Serdes.String()) /* key serde */ + .withValueSerde(Serdes.Long()) /* value serde */ + ); + +You **must specify Serdes explicitly** if the key or value types of the records in the Kafka input topics do not match the configured default Serdes. For information about configuring default Serdes, available Serdes, and implementing your own custom Serdes see [Data Types and Serialization](datatypes.html#streams-developer-guide-serdes). Several variants of `globalTable` exist to e.g. specify explicit Serdes. + +# Transform a stream + +The KStream and KTable interfaces support a variety of transformation operations. Each of these operations can be translated into one or more connected processors into the underlying processor topology. Since KStream and KTable are strongly typed, all of these transformation operations are defined as generic functions where users could specify the input and output data types. + +Some KStream transformations may generate one or more KStream objects, for example: \- `filter` and `map` on a KStream will generate another KStream \- `split` on KStream can generate multiple KStreams + +Some others may generate a KTable object, for example an aggregation of a KStream also yields a KTable. This allows Kafka Streams to continuously update the computed value upon arrivals of [out-of-order records](../core-concepts.html#streams_concepts_aggregations) after it has already been produced to the downstream transformation operators. + +All KTable transformation operations can only generate another KTable. However, the Kafka Streams DSL does provide a special function that converts a KTable representation into a KStream. All of these transformation methods can be chained together to compose a complex processor topology. + +These transformation operations are described in the following subsections: + + * Stateless transformations + * Stateful transformations + + + +# Stateless transformations + +Stateless transformations do not require state for processing and they do not require a state store associated with the stream processor. Kafka 0.11.0 and later allows you to materialize the result from a stateless `KTable` transformation. This allows the result to be queried through [interactive queries](interactive-queries.html#streams-developer-guide-interactive-queries). To materialize a `KTable`, each of the below stateless operations [can be augmented](interactive-queries.html#streams-developer-guide-interactive-queries-local-key-value-stores) with an optional `queryableStoreName` argument. + +Transformation | Description +---|--- +**Branch** + + * KStream -> BranchedKStream + +| Branch (or split) a `KStream` based on the supplied predicates into one or more `KStream` instances. ([details](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#split\(\))) Predicates are evaluated in order. A record is placed to one and only one output stream on the first match: if the n-th predicate evaluates to true, the record is placed to n-th stream. If a record does not match any predicates, it will be routed to the default branch, or dropped if no default branch is created. Branching is useful, for example, to route records to different downstream topics. + + + KStream stream = ...; + Map> branches = + stream.split(Named.as("Branch-")) + .branch((key, value) -> key.startsWith("A"), /* first predicate */ + Branched.as("A")) + .branch((key, value) -> key.startsWith("B"), /* second predicate */ + Branched.as("B")) + .defaultBranch(Branched.as("C")) /* default branch */ + ); + + // KStream branches.get("Branch-A") contains all records whose keys start with "A" + // KStream branches.get("Branch-B") contains all records whose keys start with "B" + // KStream branches.get("Branch-C") contains all other records + + +**Broadcast/Multicast** + + * no operator + +| Broadcasting a `KStream` into multiple downstream operators. A record is sent to more than one operator by applying multiple operators to the same `KStream` instance. + + + KStream stream = ...; + KStream<...> stream1 = stream.map(...); + KStream<...> stream2 = stream.mapValue(...); + KStream<...> stream3 = stream.flatMap(...); + + +Multicasting a `KStream` into multiple downstream operators. In contrast to **branching** , which sends each record to at most one downstream branch, a multicast may send a record to any number of downstream `KStream` instances. A multicast is implemented as a broadcast plus filters. + + + KStream stream = ...; + KStream<...> stream1 = stream.filter((key, value) -> key.startsWith("A")); // contains all records whose keys start with "A" + KStream<...> stream2 = stream.filter((key, value) -> key.startsWith("AB")); // contains all records whose keys start with "AB" (subset of stream1) + KStream<...> stream3 = stream.filter((key, value) -> key.contains("B")); // contains all records whose keys contains a "B" (superset of stream2) + + +**Filter** + + * KStream -> KStream + * KTable -> KTable + +| Evaluates a boolean function for each element and retains those for which the function returns true. ([KStream details](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#filter-org.apache.kafka.streams.kstream.Predicate-), [KTable details](/41/javadoc/org/apache/kafka/streams/kstream/KTable.html#filter-org.apache.kafka.streams.kstream.Predicate-)) + + + KStream stream = ...; + + // A filter that selects (keeps) only positive numbers + KStream onlyPositives = stream.filter((key, value) -> value > 0); + + +**Inverse Filter** + + * KStream -> KStream + * KTable -> KTable + +| Evaluates a boolean function for each element and drops those for which the function returns true. ([KStream details](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#filterNot-org.apache.kafka.streams.kstream.Predicate-), [KTable details](/41/javadoc/org/apache/kafka/streams/kstream/KTable.html#filterNot-org.apache.kafka.streams.kstream.Predicate-)) + + + KStream stream = ...; + + // An inverse filter that discards any negative numbers or zero + KStream onlyPositives = stream.filterNot((key, value) -> value <= 0); + + + +**FlatMap** + + * KStream -> KStream + +| Takes one record and produces zero, one, or more records. You can modify the record keys and values, including their types. ([details](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#flatMap-org.apache.kafka.streams.kstream.KeyValueMapper-)) **Marks the stream for data re-partitioning:** Applying a grouping or a join after `flatMap` will result in re-partitioning of the records. If possible use `flatMapValues` instead, which will not cause data re-partitioning. + + + KStream stream = ...; + KStream transformed = stream.flatMap( + // Here, we generate two output records for each input record. + // We also change the key and value types. + // Example: (345L, "Hello") -> ("HELLO", 1000), ("hello", 9000) + (key, value) -> { + List> result = new LinkedList<>(); + result.add(KeyValue.pair(value.toUpperCase(), 1000)); + result.add(KeyValue.pair(value.toLowerCase(), 9000)); + return result; + } + ); + + +**FlatMapValues** + + * KStream -> KStream + +| Takes one record and produces zero, one, or more records, while retaining the key of the original record. You can modify the record values and the value type. ([details](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#flatMapValues-org.apache.kafka.streams.kstream.ValueMapper-)) `flatMapValues` is preferable to `flatMap` because it will not cause data re-partitioning. However, you cannot modify the key or key type like `flatMap` does. + + + // Split a sentence into words. + KStream sentences = ...; + KStream words = sentences.flatMapValues(value -> Arrays.asList(value.split("\s+"))); + + +**Foreach** + + * KStream -> void + * KStream -> void + * KTable -> void + +| **Terminal operation.** Performs a stateless action on each record. ([details](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#foreach-org.apache.kafka.streams.kstream.ForeachAction-)) You would use `foreach` to cause _side effects_ based on the input data (similar to `peek`) and then _stop_ _further processing_ of the input data (unlike `peek`, which is not a terminal operation). **Note on processing guarantees:** Any side effects of an action (such as writing to external systems) are not trackable by Kafka, which means they will typically not benefit from Kafka's processing guarantees. + + + KStream stream = ...; + + // Print the contents of the KStream to the local console. + stream.foreach((key, value) -> System.out.println(key + " => " + value)); + + +**GroupByKey** + + * KStream -> KGroupedStream + +| Groups the records by the existing key. ([details](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#groupByKey--)) Grouping is a prerequisite for aggregating a stream or a table and ensures that data is properly partitioned ("keyed") for subsequent operations. **When to set explicit Serdes:** Variants of `groupByKey` exist to override the configured default Serdes of your application, which **you** **must do** if the key and/or value types of the resulting `KGroupedStream` do not match the configured default Serdes. **Note** **Grouping vs. Windowing:** A related operation is windowing, which lets you control how to "sub-group" the grouped records _of the same key_ into so-called _windows_ for stateful operations such as windowed aggregations or windowed joins. **Causes data re-partitioning if and only if the stream was marked for re-partitioning.** `groupByKey` is preferable to `groupBy` because it re-partitions data only if the stream was already marked for re-partitioning. However, `groupByKey` does not allow you to modify the key or key type like `groupBy` does. + + + KStream stream = ...; + + // Group by the existing key, using the application's configured + // default serdes for keys and values. + KGroupedStream groupedStream = stream.groupByKey(); + + // When the key and/or value types do not match the configured + // default serdes, we must explicitly specify serdes. + KGroupedStream groupedStream = stream.groupByKey( + Grouped.with( + Serdes.ByteArray(), /* key */ + Serdes.String()) /* value */ + ); + +**GroupBy** + + * KStream -> KGroupedStream + * KTable -> KGroupedTable + +| Groups the records by a _new_ key, which may be of a different key type. When grouping a table, you may also specify a new value and value type. `groupBy` is a shorthand for `selectKey(...).groupByKey()`. ([KStream details](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#groupBy-org.apache.kafka.streams.kstream.KeyValueMapper-), [KTable details](/41/javadoc/org/apache/kafka/streams/kstream/KTable.html#groupBy-org.apache.kafka.streams.kstream.KeyValueMapper-)) Grouping is a prerequisite for aggregating a stream or a table and ensures that data is properly partitioned ("keyed") for subsequent operations. **When to set explicit Serdes:** Variants of `groupBy` exist to override the configured default Serdes of your application, which **you must** **do** if the key and/or value types of the resulting `KGroupedStream` or `KGroupedTable` do not match the configured default Serdes. **Note** **Grouping vs. Windowing:** A related operation is windowing, which lets you control how to "sub-group" the grouped records _of the same key_ into so-called _windows_ for stateful operations such as windowed aggregations or windowed joins. **Always causes data re-partitioning:** `groupBy` always causes data re-partitioning. If possible use `groupByKey` instead, which will re-partition data only if required. + + + KStream stream = ...; + KTable table = ...; + + // Group the stream by a new key and key type + KGroupedStream groupedStream = stream.groupBy( + (key, value) -> value, + Grouped.with( + Serdes.String(), /* key (note: type was modified) */ + Serdes.String()) /* value */ + ); + + // Group the table by a new key and key type, and also modify the value and value type. + KGroupedTable groupedTable = table.groupBy( + (key, value) -> KeyValue.pair(value, value.length()), + Grouped.with( + Serdes.String(), /* key (note: type was modified) */ + Serdes.Integer()) /* value (note: type was modified) */ + ); + + + +**Cogroup** + + * KGroupedStream -> CogroupedKStream + * CogroupedKStream -> CogroupedKStream + +| Cogrouping allows to aggregate multiple input streams in a single operation. The different (already grouped) input streams must have the same key type and may have different values types. [KGroupedStream#cogroup()](/41/javadoc/org/apache/kafka/streams/kstream/KGroupedStream.html#cogroup) creates a new cogrouped stream with a single input stream, while [CogroupedKStream#cogroup()](/41/javadoc/org/apache/kafka/streams/kstream/CogroupedKStream.html#cogroup) adds a grouped stream to an existing cogrouped stream. A `CogroupedKStream` may be [windowed](/41/javadoc/org/apache/kafka/streams/kstream/CogroupedKStream.html#windowedBy) before it is [aggregated](/41/javadoc/org/apache/kafka/streams/kstream/CogroupedKStream.html#aggregate). Cogroup does not cause a repartition as it has the prerequisite that the input streams are grouped. In the process of creating these groups they will have already been repartitioned if the stream was already marked for repartitioning. + + + KStream stream = ...; + KStream stream2 = ...; + + // Group by the existing key, using the application's configured + // default serdes for keys and values. + KGroupedStream groupedStream = stream.groupByKey(); + KGroupedStream groupedStream2 = stream2.groupByKey(); + CogroupedKStream cogroupedStream = groupedStream.cogroup(aggregator1).cogroup(groupedStream2, aggregator2); + + KTable table = cogroupedStream.aggregate(initializer); + + KTable table2 = cogroupedStream.windowedBy(TimeWindows.ofSizeWithNoGrace(Duration.ofMillis(500))).aggregate(initializer); + +**Map** + + * KStream -> KStream + +| Takes one record and produces one record. You can modify the record key and value, including their types. ([details](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#map-org.apache.kafka.streams.kstream.KeyValueMapper-)) **Marks the stream for data re-partitioning:** Applying a grouping or a join after `map` will result in re-partitioning of the records. If possible use `mapValues` instead, which will not cause data re-partitioning. + + + KStream stream = ...; + + // Note how we change the key and the key type (similar to `selectKey`) + // as well as the value and the value type. + KStream transformed = stream.map( + (key, value) -> KeyValue.pair(value.toLowerCase(), value.length())); + + +**Map (values only)** + + * KStream -> KStream + * KTable -> KTable + +| Takes one record and produces one record, while retaining the key of the original record. You can modify the record value and the value type. ([KStream details](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#mapValues-org.apache.kafka.streams.kstream.ValueMapper-), [KTable details](/41/javadoc/org/apache/kafka/streams/kstream/KTable.html#mapValues-org.apache.kafka.streams.kstream.ValueMapper-)) `mapValues` is preferable to `map` because it will not cause data re-partitioning. However, it does not allow you to modify the key or key type like `map` does. + + + KStream stream = ...; + + KStream uppercased = stream.mapValues(value -> value.toUpperCase()); + + +**Merge** + + * KStream -> KStream + +| Merges records of two streams into one larger stream. ([details](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#merge-org.apache.kafka.streams.kstream.KStream-)) There is no ordering guarantee between records from different streams in the merged stream. Relative order is preserved within each input stream though (ie, records within the same input stream are processed in order) + + + KStream stream1 = ...; + + KStream stream2 = ...; + + KStream merged = stream1.merge(stream2); + +**Peek** + + * KStream -> KStream + +| Performs a stateless action on each record, and returns an unchanged stream. ([details](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#peek-org.apache.kafka.streams.kstream.ForeachAction-)) You would use `peek` to cause _side effects_ based on the input data (similar to `foreach`) and _continue_ _processing_ the input data (unlike `foreach`, which is a terminal operation). `peek` returns the input stream as-is; if you need to modify the input stream, use `map` or `mapValues` instead. `peek` is helpful for use cases such as logging or tracking metrics or for debugging and troubleshooting. **Note on processing guarantees:** Any side effects of an action (such as writing to external systems) are not trackable by Kafka, which means they will typically not benefit from Kafka's processing guarantees. + + + KStream stream = ...; + + KStream unmodifiedStream = stream.peek( + (key, value) -> System.out.println("key=" + key + ", value=" + value)); + + +**Print** + + * KStream -> void + +| **Terminal operation.** Prints the records to `System.out`. See Javadocs for serde and `toString()` caveats. ([details](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#print--)) Calling `print()` is the same as calling `foreach((key, value) -> System.out.println(key + ", " + value))` `print` is mainly for debugging/testing purposes, and it will try to flush on each record print. Hence it **should not** be used for production usage if performance requirements are concerned. + + + KStream stream = ...; + // print to sysout + stream.print(); + + // print to file with a custom label + stream.print(Printed.toFile("streams.out").withLabel("streams")); + +**SelectKey** + + * KStream -> KStream + +| Assigns a new key - possibly of a new key type - to each record. ([details](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#selectKey-org.apache.kafka.streams.kstream.KeyValueMapper-)) Calling `selectKey(mapper)` is the same as calling `map((key, value) -> mapper(key, value), value)`. **Marks the stream for data re-partitioning:** Applying a grouping or a join after `selectKey` will result in re-partitioning of the records. + + + KStream stream = ...; + + // Derive a new record key from the record's value. Note how the key type changes, too. + KStream rekeyed = stream.selectKey((key, value) -> value.split(" ")[0]) + + +**Table to Stream** + + * KTable -> KStream + +| Get the changelog stream of this table. ([details](/41/javadoc/org/apache/kafka/streams/kstream/KTable.html#toStream--)) + + + KTable table = ...; + + // Also, a variant of `toStream` exists that allows you + // to select a new key for the resulting stream. + KStream stream = table.toStream(); + +**Stream to Table** + + * KStream -> KTable + +| Convert an event stream into a table, or say a changelog stream. ([details](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#toTable--)) + + + KStream stream = ...; + + KTable table = stream.toTable(); + +**Repartition** + + * KStream -> KStream + +| Manually trigger repartitioning of the stream with desired number of partitions. ([details](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#repartition--)) Kafka Streams will manage the topic for `repartition()`. Generated topic is treated as internal topic, as a result data will be purged automatically as any other internal repartition topic. In addition, you can specify the desired number of partitions, which allows to easily scale in/out downstream sub-topologies. `repartition()` operation always triggers repartitioning of the stream, as a result it can be used with embedded Processor API methods (like `process()` et al.) that do not trigger auto repartitioning when key changing operation is performed beforehand. + + + KStream stream = ... ; + KStream repartitionedStream = stream.repartition(Repartitioned.numberOfPartitions(10)); + +# Stateful transformations + +Stateful transformations depend on state for processing inputs and producing outputs and require a [state store](../architecture.html#streams_architecture_state) associated with the stream processor. For example, in aggregating operations, a windowing state store is used to collect the latest aggregation results per window. In join operations, a windowing state store is used to collect all of the records received so far within the defined window boundary. + +**Note:** Following store types are used regardless of the possibly specified type (via the parameter `materialized`): + + * non-windowed aggregations and non-windowed KTables use [TimestampedKeyValueStore](/41/javadoc/org/apache/kafka/streams/state/TimestampedKeyValueStore.html)s or [VersionedKeyValueStore](/41/javadoc/org/apache/kafka/streams/state/VersionedKeyValueStore.html)s, depending on whether the parameter `materialized` is versioned + * time-windowed aggregations and KStream-KStream joins use [TimestampedWindowStore](/41/javadoc/org/apache/kafka/streams/state/TimestampedWindowStore.html)s + * session windowed aggregations use [SessionStore](/41/javadoc/org/apache/kafka/streams/state/SessionStore.html)s (there is no timestamped session store as of now) + + + +Note, that state stores are fault-tolerant. In case of failure, Kafka Streams guarantees to fully restore all state stores prior to resuming the processing. See [Fault Tolerance](../architecture.html#streams_architecture_recovery) for further information. + +Available stateful transformations in the DSL include: + + * Aggregating + * Joining + * Windowing (as part of aggregations and joins) + * Applying custom processors and transformers, which may be stateful, for Processor API integration + + + +The following diagram shows their relationships: + +![](/41/images/streams-stateful_operations.png) + +Stateful transformations in the DSL. + +Here is an example of a stateful application: the WordCount algorithm. + +WordCount example: + + + // Assume the record values represent lines of text. For the sake of this example, you can ignore + // whatever may be stored in the record keys. + KStream textLines = ...; + + KStream wordCounts = textLines + // Split each text line, by whitespace, into words. The text lines are the record + // values, i.e. you can ignore whatever data is in the record keys and thus invoke + // `flatMapValues` instead of the more generic `flatMap`. + .flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\W+"))) + // Group the stream by word to ensure the key of the record is the word. + .groupBy((key, word) -> word) + // Count the occurrences of each word (record key). + // + // This will change the stream type from `KGroupedStream` to + // `KTable` (word -> count). + .count() + // Convert the `KTable` into a `KStream`. + .toStream(); + +## Aggregating + +After records are grouped by key via `groupByKey` or `groupBy` - and thus represented as either a `KGroupedStream` or a `KGroupedTable`, they can be aggregated via an operation such as `reduce`. Aggregations are key-based operations, which means that they always operate over records (notably record values) of the same key. You can perform aggregations on windowed or non-windowed data. + +Transformation | Description +---|--- +**Aggregate** + + * KGroupedStream -> KTable + * KGroupedTable -> KTable + +| **Rolling aggregation.** Aggregates the values of (non-windowed) records by the grouped key or cogrouped. Aggregating is a generalization of `reduce` and allows, for example, the aggregate value to have a different type than the input values. ([KGroupedStream details](/41/javadoc/org/apache/kafka/streams/kstream/KGroupedStream.html), [KGroupedTable details](/41/javadoc/org/apache/kafka/streams/kstream/KGroupedTable.html) [KGroupedTable details](/41/javadoc/org/apache/kafka/streams/kstream/CogroupedKStream.html)) When aggregating a _grouped stream_ , you must provide an initializer (e.g., `aggValue = 0`) and an "adder" aggregator (e.g., `aggValue + curValue`). When aggregating a _grouped table_ , you must additionally provide a "subtractor" aggregator (think: `aggValue - oldValue`). When aggregating a _cogrouped stream_ , the actual aggregators are provided for each input stream in the prior `cogroup()`calls, and thus you only need to provide an initializer (e.g., `aggValue = 0`) Several variants of `aggregate` exist, see Javadocs for details. + + + KGroupedStream groupedStream = ...; + KGroupedTable groupedTable = ...; + + // Aggregating a KGroupedStream (note how the value type changes from String to Long) + KTable aggregatedStream = groupedStream.aggregate( + () -> 0L, /* initializer */ + (aggKey, newValue, aggValue) -> aggValue + newValue.length(), /* adder */ + Materialized.>as("aggregated-stream-store") /* state store name */ + .withValueSerde(Serdes.Long()); /* serde for aggregate value */ + + // Aggregating a KGroupedTable (note how the value type changes from String to Long) + KTable aggregatedTable = groupedTable.aggregate( + () -> 0L, /* initializer */ + (aggKey, newValue, aggValue) -> aggValue + newValue.length(), /* adder */ + (aggKey, oldValue, aggValue) -> aggValue - oldValue.length(), /* subtractor */ + Materialized.>as("aggregated-table-store") /* state store name */ + .withValueSerde(Serdes.Long()) /* serde for aggregate value */ + + +Detailed behavior of `KGroupedStream`: + + * Input records with `null` keys are ignored. + * When a record key is received for the first time, the initializer is called (and called before the adder). + * Whenever a record with a non-`null` value is received, the adder is called. + +Detailed behavior of `KGroupedTable`: + + * Input records with `null` keys are ignored. + * When a record key is received for the first time, the initializer is called (and called before the adder and subtractor). Note that, in contrast to `KGroupedStream`, over time the initializer may be called more than once for a key as a result of having received input tombstone records for that key (see below). + * When the first non-`null` value is received for a key (e.g., INSERT), then only the adder is called. + * When subsequent non-`null` values are received for a key (e.g., UPDATE), then (1) the subtractor is called with the old value as stored in the table and (2) the adder is called with the new value of the input record that was just received. The subtractor is guaranteed to be called before the adder if the extracted grouping key of the old and new value is the same. The detection of this case depends on the correct implementation of the equals() method of the extracted key type. Otherwise, the order of execution for the subtractor and adder is not defined. + * When a tombstone record - i.e. a record with a `null` value - is received for a key (e.g., DELETE), then only the subtractor is called. Note that, whenever the subtractor returns a `null` value itself, then the corresponding key is removed from the resulting `KTable`. If that happens, any next input record for that key will trigger the initializer again. + +See the example at the bottom of this section for a visualization of the aggregation semantics. +**Aggregate (windowed)** + + * KGroupedStream -> KTable + +| **Windowed aggregation.** Aggregates the values of records, per window, by the grouped key. Aggregating is a generalization of `reduce` and allows, for example, the aggregate value to have a different type than the input values. ([TimeWindowedKStream details](/41/javadoc/org/apache/kafka/streams/kstream/TimeWindowedKStream.html), [SessionWindowedKStream details](/41/javadoc/org/apache/kafka/streams/kstream/SessionWindowedKStream.html)) You must provide an initializer (e.g., `aggValue = 0`), "adder" aggregator (e.g., `aggValue + curValue`), and a window. When windowing based on sessions, you must additionally provide a "session merger" aggregator (e.g., `mergedAggValue = leftAggValue + rightAggValue`). The windowed `aggregate` turns a `TimeWindowedKStream` or `SessionWindowedKStream` into a windowed `KTable, V>`. Several variants of `aggregate` exist, see Javadocs for details. + + + import java.time.Duration; + KGroupedStream groupedStream = ...; + + // Aggregating with time-based windowing (here: with 5-minute tumbling windows) + KTable, Long> timeWindowedAggregatedStream = groupedStream.windowedBy(Duration.ofMinutes(5)) + .aggregate( + () -> 0L, /* initializer */ + (aggKey, newValue, aggValue) -> aggValue + newValue, /* adder */ + Materialized.>as("time-windowed-aggregated-stream-store") /* state store name */ + .withValueSerde(Serdes.Long())); /* serde for aggregate value */ + + // Aggregating with time-based windowing (here: with 5-minute sliding windows and 30-minute grace period) + KTable, Long> timeWindowedAggregatedStream = groupedStream.windowedBy(SlidingWindows.ofTimeDifferenceAndGrace(Duration.ofMinutes(5), Duration.ofMinutes(30))) + .aggregate( + () -> 0L, /* initializer */ + (aggKey, newValue, aggValue) -> aggValue + newValue, /* adder */ + Materialized.>as("time-windowed-aggregated-stream-store") /* state store name */ + .withValueSerde(Serdes.Long())); /* serde for aggregate value */ + + // Aggregating with session-based windowing (here: with an inactivity gap of 5 minutes) + KTable, Long> sessionizedAggregatedStream = groupedStream.windowedBy(SessionWindows.ofInactivityGapWithNoGrace(Duration.ofMinutes(5)). + aggregate( + () -> 0L, /* initializer */ + (aggKey, newValue, aggValue) -> aggValue + newValue, /* adder */ + (aggKey, leftAggValue, rightAggValue) -> leftAggValue + rightAggValue, /* session merger */ + Materialized.>as("sessionized-aggregated-stream-store") /* state store name */ + .withValueSerde(Serdes.Long())); /* serde for aggregate value */ + + +Detailed behavior: + + * The windowed aggregate behaves similar to the rolling aggregate described above. The additional twist is that the behavior applies _per window_. + * Input records with `null` keys are ignored in general. + * When a record key is received for the first time for a given window, the initializer is called (and called before the adder). + * Whenever a record with a non-`null` value is received for a given window, the adder is called. + * When using session windows: the session merger is called whenever two sessions are being merged. + +See the example at the bottom of this section for a visualization of the aggregation semantics. +**Count** + + * KGroupedStream -> KTable + * KGroupedTable -> KTable + +| **Rolling aggregation.** Counts the number of records by the grouped key. ([KGroupedStream details](/41/javadoc/org/apache/kafka/streams/kstream/KGroupedStream.html), [KGroupedTable details](/41/javadoc/org/apache/kafka/streams/kstream/KGroupedTable.html)) Several variants of `count` exist, see Javadocs for details. + + + KGroupedStream groupedStream = ...; + KGroupedTable groupedTable = ...; + + // Counting a KGroupedStream + KTable aggregatedStream = groupedStream.count(); + + // Counting a KGroupedTable + KTable aggregatedTable = groupedTable.count(); + +Detailed behavior for `KGroupedStream`: + + * Input records with `null` keys or values are ignored. + +Detailed behavior for `KGroupedTable`: + + * Input records with `null` keys are ignored. Records with `null` values are not ignored but interpreted as "tombstones" for the corresponding key, which indicate the deletion of the key from the table. + + +**Count (windowed)** + + * KGroupedStream -> KTable + +| **Windowed aggregation.** Counts the number of records, per window, by the grouped key. ([TimeWindowedKStream details](/41/javadoc/org/apache/kafka/streams/kstream/TimeWindowedKStream.html), [SessionWindowedKStream details](/41/javadoc/org/apache/kafka/streams/kstream/SessionWindowedKStream.html)) The windowed `count` turns a `TimeWindowedKStream` or `SessionWindowedKStream` into a windowed `KTable, V>`. Several variants of `count` exist, see Javadocs for details. + + + import java.time.Duration; + KGroupedStream groupedStream = ...; + + // Counting a KGroupedStream with time-based windowing (here: with 5-minute tumbling windows) + KTable, Long> aggregatedStream = groupedStream.windowedBy( + TimeWindows.ofSizeWithNoGrace(Duration.ofMinutes(5))) /* time-based window */ + .count(); + + // Counting a KGroupedStream with time-based windowing (here: with 5-minute sliding windows and 30-minute grace period) + KTable, Long> aggregatedStream = groupedStream.windowedBy( + SlidingWindows.ofTimeDifferenceAndGrace(Duration.ofMinutes(5), Duration.ofMinutes(30))) /* time-based window */ + .count(); + + // Counting a KGroupedStream with session-based windowing (here: with 5-minute inactivity gaps) + KTable, Long> aggregatedStream = groupedStream.windowedBy( + SessionWindows.ofInactivityGapWithNoGrace(Duration.ofMinutes(5))) /* session window */ + .count(); + +Detailed behavior: + + * Input records with `null` keys or values are ignored. + + +**Reduce** + + * KGroupedStream -> KTable + * KGroupedTable -> KTable + +| **Rolling aggregation.** Combines the values of (non-windowed) records by the grouped key. The current record value is combined with the last reduced value, and a new reduced value is returned. The result value type cannot be changed, unlike `aggregate`. ([KGroupedStream details](/41/javadoc/org/apache/kafka/streams/kstream/KGroupedStream.html), [KGroupedTable details](/41/javadoc/org/apache/kafka/streams/kstream/KGroupedTable.html)) When reducing a _grouped stream_ , you must provide an "adder" reducer (e.g., `aggValue + curValue`). When reducing a _grouped table_ , you must additionally provide a "subtractor" reducer (e.g., `aggValue - oldValue`). Several variants of `reduce` exist, see Javadocs for details. + + + KGroupedStream groupedStream = ...; + KGroupedTable groupedTable = ...; + + // Reducing a KGroupedStream + KTable aggregatedStream = groupedStream.reduce( + (aggValue, newValue) -> aggValue + newValue /* adder */); + + // Reducing a KGroupedTable + KTable aggregatedTable = groupedTable.reduce( + (aggValue, newValue) -> aggValue + newValue, /* adder */ + (aggValue, oldValue) -> aggValue - oldValue /* subtractor */); + + +Detailed behavior for `KGroupedStream`: + + * Input records with `null` keys are ignored in general. + * When a record key is received for the first time, then the value of that record is used as the initial aggregate value. + * Whenever a record with a non-`null` value is received, the adder is called. + +Detailed behavior for `KGroupedTable`: + + * Input records with `null` keys are ignored in general. + * When a record key is received for the first time, then the value of that record is used as the initial aggregate value. Note that, in contrast to `KGroupedStream`, over time this initialization step may happen more than once for a key as a result of having received input tombstone records for that key (see below). + * When the first non-`null` value is received for a key (e.g., INSERT), then only the adder is called. + * When subsequent non-`null` values are received for a key (e.g., UPDATE), then (1) the subtractor is called with the old value as stored in the table and (2) the adder is called with the new value of the input record that was just received. The subtractor is guaranteed be called before the adder if the extracted grouping key of the old and new value is the same. The detection of this case depends on the correct implementation of the equals() method of the extracted key type. Otherwise, the order of execution for the subtractor and adder is not defined. + * When a tombstone record - i.e. a record with a `null` value - is received for a key (e.g., DELETE), then only the subtractor is called. Note that, whenever the subtractor returns a `null` value itself, then the corresponding key is removed from the resulting `KTable`. If that happens, any next input record for that key will re-initialize its aggregate value. + +See the example at the bottom of this section for a visualization of the aggregation semantics. +**Reduce (windowed)** + + * KGroupedStream -> KTable + +| **Windowed aggregation.** Combines the values of records, per window, by the grouped key. The current record value is combined with the last reduced value, and a new reduced value is returned. Records with `null` key or value are ignored. The result value type cannot be changed, unlike `aggregate`. ([TimeWindowedKStream details](/41/javadoc/org/apache/kafka/streams/kstream/TimeWindowedKStream.html), [SessionWindowedKStream details](/41/javadoc/org/apache/kafka/streams/kstream/SessionWindowedKStream.html)) The windowed `reduce` turns a turns a `TimeWindowedKStream` or a `SessionWindowedKStream` into a windowed `KTable, V>`. Several variants of `reduce` exist, see Javadocs for details. + + + import java.time.Duration; + KGroupedStream groupedStream = ...; + + // Aggregating with time-based windowing (here: with 5-minute tumbling windows) + KTable, Long> timeWindowedAggregatedStream = groupedStream.windowedBy( + TimeWindows.ofSizeWithNoGrace(Duration.ofMinutes(5)) /* time-based window */) + .reduce( + (aggValue, newValue) -> aggValue + newValue /* adder */ + ); + + // Aggregating with time-based windowing (here: with 5-minute sliding windows and 30-minute grace) + KTable, Long> timeWindowedAggregatedStream = groupedStream.windowedBy( + SlidingWindows.ofTimeDifferenceAndGrace(Duration.ofMinutes(5), Duration.ofMinutes(30))) /* time-based window */) + .reduce( + (aggValue, newValue) -> aggValue + newValue /* adder */ + ); + + // Aggregating with session-based windowing (here: with an inactivity gap of 5 minutes) + KTable, Long> sessionzedAggregatedStream = groupedStream.windowedBy( + SessionWindows.ofInactivityGapWithNoGrace(Duration.ofMinutes(5))) /* session window */ + .reduce( + (aggValue, newValue) -> aggValue + newValue /* adder */ + ); + + +Detailed behavior: + + * The windowed reduce behaves similar to the rolling reduce described above. The additional twist is that the behavior applies _per window_. + * Input records with `null` keys are ignored in general. + * When a record key is received for the first time for a given window, then the value of that record is used as the initial aggregate value. + * Whenever a record with a non-`null` value is received for a given window, the adder is called. + +See the example at the bottom of this section for a visualization of the aggregation semantics. + +**Example of semantics for stream aggregations:** A `KGroupedStream` -> `KTable` example is shown below. The streams and the table are initially empty. Bold font is used in the column for "KTable `aggregated`" to highlight changed state. An entry such as `(hello, 1)` denotes a record with key `hello` and value `1`. To improve the readability of the semantics table you can assume that all records are processed in timestamp order. + + + // Key: word, value: count + KStream wordCounts = ...; + + KGroupedStream groupedStream = wordCounts + .groupByKey(Grouped.with(Serdes.String(), Serdes.Integer())); + + KTable aggregated = groupedStream.aggregate( + () -> 0, /* initializer */ + (aggKey, newValue, aggValue) -> aggValue + newValue, /* adder */ + Materialized.as("aggregated-stream-store" /* state store name */) + .withKeySerde(Serdes.String()) /* key serde */ + .withValueSerde(Serdes.Integer()); /* serde for aggregate value */ + +**Note** + +**Impact of record caches** : For illustration purposes, the column "KTable `aggregated`" below shows the table's state changes over time in a very granular way. In practice, you would observe state changes in such a granular way only when [record caches](memory-mgmt.html#streams-developer-guide-memory-management-record-cache) are disabled (default: enabled). When record caches are enabled, what might happen for example is that the output results of the rows with timestamps 4 and 5 would be [compacted](memory-mgmt.html#streams-developer-guide-memory-management-record-cache), and there would only be a single state update for the key `kafka` in the KTable (here: from `(kafka 1)` directly to `(kafka, 3)`. Typically, you should only disable record caches for testing or debugging purposes - under normal circumstances it is better to leave record caches enabled. + + | KStream `wordCounts` | KGroupedStream `groupedStream` | KTable `aggregated` +---|---|---|--- +Timestamp | Input record | Grouping | Initializer | Adder | State +1 | (hello, 1) | (hello, 1) | 0 (for hello) | (hello, 0 + 1) | **(hello, 1)** +2 | (kafka, 1) | (kafka, 1) | 0 (for kafka) | (kafka, 0 + 1) | (hello, 1) **(kafka, 1)** +3 | (streams, 1) | (streams, 1) | 0 (for streams) | (streams, 0 + 1) | (hello, 1) (kafka, 1) **(streams, 1)** +4 | (kafka, 1) | (kafka, 1) | | (kafka, 1 + 1) | (hello, 1) (kafka, **2**) (streams, 1) +5 | (kafka, 1) | (kafka, 1) | | (kafka, 2 + 1) | (hello, 1) (kafka, **3**) (streams, 1) +6 | (streams, 1) | (streams, 1) | | (streams, 1 + 1) | (hello, 1) (kafka, 3) (streams, **2**) + +**Example of semantics for table aggregations:** A `KGroupedTable` -> `KTable` example is shown below. The tables are initially empty. Bold font is used in the column for "KTable `aggregated`" to highlight changed state. An entry such as `(hello, 1)` denotes a record with key `hello` and value `1`. To improve the readability of the semantics table you can assume that all records are processed in timestamp order. + + + // Key: username, value: user region (abbreviated to "E" for "Europe", "A" for "Asia") + KTable userProfiles = ...; + + // Re-group `userProfiles`. Don't read too much into what the grouping does: + // its prime purpose in this example is to show the *effects* of the grouping + // in the subsequent aggregation. + KGroupedTable groupedTable = userProfiles + .groupBy((user, region) -> KeyValue.pair(region, user.length()), Serdes.String(), Serdes.Integer()); + + KTable aggregated = groupedTable.aggregate( + () -> 0, /* initializer */ + (aggKey, newValue, aggValue) -> aggValue + newValue, /* adder */ + (aggKey, oldValue, aggValue) -> aggValue - oldValue, /* subtractor */ + Materialized.as("aggregated-table-store" /* state store name */) + .withKeySerde(Serdes.String()) /* key serde */ + .withValueSerde(Serdes.Integer()); /* serde for aggregate value */ + +**Note** + +**Impact of record caches** : For illustration purposes, the column "KTable `aggregated`" below shows the table's state changes over time in a very granular way. In practice, you would observe state changes in such a granular way only when [record caches](memory-mgmt.html#streams-developer-guide-memory-management-record-cache) are disabled (default: enabled). When record caches are enabled, what might happen for example is that the output results of the rows with timestamps 4 and 5 would be [compacted](memory-mgmt.html#streams-developer-guide-memory-management-record-cache), and there would only be a single state update for the key `kafka` in the KTable (here: from `(kafka 1)` directly to `(kafka, 3)`. Typically, you should only disable record caches for testing or debugging purposes - under normal circumstances it is better to leave record caches enabled. + + | KTable `userProfiles` | KGroupedTable `groupedTable` | KTable `aggregated` +---|---|---|--- +Timestamp | Input record | Interpreted as | Grouping | Initializer | Adder | Subtractor | State +1 | (alice, E) | INSERT alice | (E, 5) | 0 (for E) | (E, 0 + 5) | | **(E, 5)** +2 | (bob, A) | INSERT bob | (A, 3) | 0 (for A) | (A, 0 + 3) | | **(A, 3)** (E, 5) +3 | (charlie, A) | INSERT charlie | (A, 7) | | (A, 3 + 7) | | (A, **10**) (E, 5) +4 | (alice, A) | UPDATE alice | (A, 5) | | (A, 10 + 5) | (E, 5 - 5) | (A, **15**) (E, **0**) +5 | (charlie, null) | DELETE charlie | (null, 7) | | | (A, 15 - 7) | (A, **8**) (E, 0) +6 | (null, E) | _ignored_ | | | | | (A, 8) (E, 0) +7 | (bob, E) | UPDATE bob | (E, 3) | | (E, 0 + 3) | (A, 8 - 3) | (A, **5**) (E, **3**) + +## Joining + +Streams and tables can also be joined. Many stream processing applications in practice are coded as streaming joins. For example, applications backing an online shop might need to access multiple, updating database tables (e.g. sales prices, inventory, customer information) in order to enrich a new data record (e.g. customer transaction) with context information. That is, scenarios where you need to perform table lookups at very large scale and with a low processing latency. Here, a popular pattern is to make the information in the databases available in Kafka through so-called _change data capture_ in combination with [Kafka's Connect API](../../#connect), and then implementing applications that leverage the Streams API to perform very fast and efficient local joins of such tables and streams, rather than requiring the application to make a query to a remote database over the network for each record. In this example, the KTable concept in Kafka Streams would enable you to track the latest state (e.g., snapshot) of each table in a local state store, thus greatly reducing the processing latency as well as reducing the load of the remote databases when doing such streaming joins. + +The following join operations are supported, see also the diagram in the overview section of Stateful Transformations. Depending on the operands, joins are either windowed joins or non-windowed joins. + +Join operands | Type | (INNER) JOIN | LEFT JOIN | OUTER JOIN +---|---|---|---|--- +KStream-to-KStream | Windowed | Supported | Supported | Supported +KTable-to-KTable | Non-windowed | Supported | Supported | Supported +KTable-to-KTable Foreign-Key Join | Non-windowed | Supported | Supported | Not Supported +KStream-to-KTable | Non-windowed | Supported | Supported | Not Supported +KStream-to-GlobalKTable | Non-windowed | Supported | Supported | Not Supported +KTable-to-GlobalKTable | N/A | Not Supported | Not Supported | Not Supported + +Each case is explained in more detail in the subsequent sections. + +### Join co-partitioning requirements + +For equi-joins, input data must be co-partitioned when joining. This ensures that input records with the same key from both sides of the join, are delivered to the same stream task during processing. **It is your responsibility to ensure data co-partitioning when joining**. Co-partitioning is not required when performing KTable-KTable Foreign-Key joins and Global KTable joins. + +The requirements for data co-partitioning are: + + * The input topics of the join (left side and right side) must have the **same number of partitions**. + * All applications that _write_ to the input topics must have the **same partitioning strategy** so that records with the same key are delivered to same partition number. In other words, the keyspace of the input data must be distributed across partitions in the same manner. This means that, for example, applications that use Kafka's [Java Producer API](../../#producerapi) must use the same partitioner (cf. the producer setting `"partitioner.class"` aka `ProducerConfig.PARTITIONER_CLASS_CONFIG`), and applications that use the Kafka's Streams API must use the same `StreamPartitioner` for operations such as `KStream#to()`. The good news is that, if you happen to use the default partitioner-related settings across all applications, you do not need to worry about the partitioning strategy. + + + +Why is data co-partitioning required? Because KStream-KStream, KTable-KTable, and KStream-KTable joins are performed based on the keys of records (e.g., `leftRecord.key == rightRecord.key`), it is required that the input streams/tables of a join are co-partitioned by key. + +There are two exceptions where co-partitioning is not required. For KStream-GlobalKTable joins joins, co-partitioning is not required because _all_ partitions of the `GlobalKTable`'s underlying changelog stream are made available to each `KafkaStreams` instance. That is, each instance has a full copy of the changelog stream. Further, a `KeyValueMapper` allows for non-key based joins from the `KStream` to the `GlobalKTable`. KTable-KTable Foreign-Key joins also do not require co-partitioning. Kafka Streams internally ensures co-partitioning for Foreign-Key joins. + +**Note** + +**Kafka Streams partly verifies the co-partitioning requirement:** During the partition assignment step, i.e. at runtime, Kafka Streams verifies whether the number of partitions for both sides of a join are the same. If they are not, a `TopologyBuilderException` (runtime exception) is being thrown. Note that Kafka Streams cannot verify whether the partitioning strategy matches between the input streams/tables of a join - it is up to the user to ensure that this is the case. + +**Ensuring data co-partitioning:** If the inputs of a join are not co-partitioned yet, you must ensure this manually. You may follow a procedure such as outlined below. It is recommended to repartition the topic with fewer partitions to match the larger partition number of avoid bottlenecks. Technically it would also be possible to repartition the topic with more partitions to the smaller partition number. For stream-table joins, it's recommended to repartition the KStream because repartitioning a KTable might result in a second state store. For table-table joins, you might also consider to size of the KTables and repartition the smaller KTable. + + 1. Identify the input KStream/KTable in the join whose underlying Kafka topic has the smaller number of partitions. Let's call this stream/table "SMALLER", and the other side of the join "LARGER". To learn about the number of partitions of a Kafka topic you can use, for example, the CLI tool `bin/kafka-topics` with the `--describe` option. + + 2. Within your application, re-partition the data of "SMALLER". You must ensure that, when repartitioning the data with `repartition`, the same partitioner is used as for "LARGER". + +> * If "SMALLER" is a KStream: `KStream#repartition(Repartitioned.numberOfPartitions(...))`. +> * If "SMALLER" is a KTable: `KTable#toStream#repartition(Repartitioned.numberOfPartitions(...).toTable())`. + + 3. Within your application, perform the join between "LARGER" and the new stream/table. + + + + +### KStream-KStream Join + +KStream-KStream joins are always windowed joins, because otherwise the size of the internal state store used to perform the join - e.g., a sliding window or "buffer" - would grow indefinitely. For stream-stream joins it's important to highlight that a new input record on one side will produce a join output _for each_ matching record on the other side, and there can be _multiple_ such matching records in a given join window (cf. the row with timestamp 15 in the join semantics table below, for example). + +Join output records are effectively created as follows, leveraging the user-supplied `ValueJoiner`: + + + KeyValue leftRecord = ...; + KeyValue rightRecord = ...; + ValueJoiner joiner = ...; + + KeyValue joinOutputRecord = KeyValue.pair( + leftRecord.key, /* by definition, leftRecord.key == rightRecord.key */ + joiner.apply(leftRecord.value, rightRecord.value) + ); + +Transformation | Description +---|--- +**Inner Join (windowed)** + + * (KStream, KStream) -> KStream + +| Performs an INNER JOIN of this stream with another stream. Even though this operation is windowed, the joined stream will be of type `KStream` rather than `KStream, ...>`. [(details)](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#join-org.apache.kafka.streams.kstream.KStream-org.apache.kafka.streams.kstream.ValueJoiner-org.apache.kafka.streams.kstream.JoinWindows-) **Data must be co-partitioned** : The input data for both sides must be co-partitioned. **Causes data re-partitioning of a stream if and only if the stream was marked for re-partitioning (if both are marked, both are re-partitioned).** Several variants of `join` exists, see the Javadocs for details. + + + import java.time.Duration; + KStream left = ...; + KStream right = ...; + + KStream joined = left.join(right, + (leftValue, rightValue) -> "left=" + leftValue + ", right=" + rightValue, /* ValueJoiner */ + JoinWindows.ofTimeDifferenceWithNoGrace(Duration.ofMinutes(5)), + Joined.with( + Serdes.String(), /* key */ + Serdes.Long(), /* left value */ + Serdes.Double()) /* right value */ + ); + + +Detailed behavior: + + * The join is _key-based_ , i.e. with the join predicate `leftRecord.key == rightRecord.key`, and _window-based_ , i.e. two input records are joined if and only if their timestamps are "close" to each other as defined by the user-supplied `JoinWindows`, i.e. the window defines an additional join predicate over the record timestamps. + * The join will be triggered under the conditions listed below whenever new input is received. When it is triggered, the user-supplied `ValueJoiner` will be called to produce join output records. + +> * Input records with a `null` key or a `null` value are ignored and do not trigger the join. + + +See the semantics overview at the bottom of this section for a detailed description. +**Left Join (windowed)** + + * (KStream, KStream) -> KStream + +| Performs a LEFT JOIN of this stream with another stream. Even though this operation is windowed, the joined stream will be of type `KStream` rather than `KStream, ...>`. [(details)](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#leftJoin-org.apache.kafka.streams.kstream.KStream-org.apache.kafka.streams.kstream.ValueJoiner-org.apache.kafka.streams.kstream.JoinWindows-) **Data must be co-partitioned** : The input data for both sides must be co-partitioned. **Causes data re-partitioning of a stream if and only if the stream was marked for re-partitioning (if both are marked, both are re-partitioned).** Several variants of `leftJoin` exists, see the Javadocs for details. + + + import java.time.Duration; + KStream left = ...; + KStream right = ...; + + KStream joined = left.leftJoin(right, + (leftValue, rightValue) -> "left=" + leftValue + ", right=" + rightValue, /* ValueJoiner */ + JoinWindows.ofTimeDifferenceWithNoGrace(Duration.ofMinutes(5)), + Joined.with( + Serdes.String(), /* key */ + Serdes.Long(), /* left value */ + Serdes.Double()) /* right value */ + ); + + +Detailed behavior: + + * The join is _key-based_ , i.e. with the join predicate `leftRecord.key == rightRecord.key`, and _window-based_ , i.e. two input records are joined if and only if their timestamps are "close" to each other as defined by the user-supplied `JoinWindows`, i.e. the window defines an additional join predicate over the record timestamps. + * The join will be triggered under the conditions listed below whenever new input is received. When it is triggered, the user-supplied `ValueJoiner` will be called to produce join output records. + +> * Input records with a `null` value are ignored and do not trigger the join. + + * For each input record on the left side that does not have any match on the right side, the `ValueJoiner` will be called with `ValueJoiner#apply(leftRecord.value, null)`; this explains the row with timestamp=60 and timestampe=80 in the table below, which lists `[E, null]` and `[F, null]`in the LEFT JOIN column. Note that these left results are emitted after the specified grace period passed. **Caution:** using the deprecated `JoinWindows.of(...).grace(...)` API might result in eagerly emitted spurious left results. + +See the semantics overview at the bottom of this section for a detailed description. +**Outer Join (windowed)** + + * (KStream, KStream) -> KStream + +| Performs an OUTER JOIN of this stream with another stream. Even though this operation is windowed, the joined stream will be of type `KStream` rather than `KStream, ...>`. [(details)](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#outerJoin-org.apache.kafka.streams.kstream.KStream-org.apache.kafka.streams.kstream.ValueJoiner-org.apache.kafka.streams.kstream.JoinWindows-) **Data must be co-partitioned** : The input data for both sides must be co-partitioned. **Causes data re-partitioning of a stream if and only if the stream was marked for re-partitioning (if both are marked, both are re-partitioned).** Several variants of `outerJoin` exists, see the Javadocs for details. + + + import java.time.Duration; + KStream left = ...; + KStream right = ...; + + KStream joined = left.outerJoin(right, + (leftValue, rightValue) -> "left=" + leftValue + ", right=" + rightValue, /* ValueJoiner */ + JoinWindows.ofTimeDifferenceWithNoGrace(Duration.ofMinutes(5)), + Joined.with( + Serdes.String(), /* key */ + Serdes.Long(), /* left value */ + Serdes.Double()) /* right value */ + ); + + +Detailed behavior: + + * The join is _key-based_ , i.e. with the join predicate `leftRecord.key == rightRecord.key`, and _window-based_ , i.e. two input records are joined if and only if their timestamps are "close" to each other as defined by the user-supplied `JoinWindows`, i.e. the window defines an additional join predicate over the record timestamps. + * The join will be triggered under the conditions listed below whenever new input is received. When it is triggered, the user-supplied `ValueJoiner` will be called to produce join output records. + +> * Input records with a `null` value are ignored and do not trigger the join. + + * For each input record on one side that does not have any match on the other side, the `ValueJoiner` will be called with `ValueJoiner#apply(leftRecord.value, null)` or `ValueJoiner#apply(null, rightRecord.value)`, respectively; this explains the row with timestamp=60, timestamp=80, and timestamp=100 in the table below, which lists `[E, null]`, `[F, null]`, and `[null, f]` in the OUTER JOIN column. Note that these left and right results are emitted after the specified grace period passed. **Caution:** using the deprecated `JoinWindows.of(...).grace(...)` API might result in eagerly emitted spurious left/right results. + +See the semantics overview at the bottom of this section for a detailed description. + +**Semantics of stream-stream joins:** The semantics of the various stream-stream join variants are explained below. To improve the readability of the table, assume that (1) all records have the same key (and thus the key in the table is omitted), and (2) all records are processed in timestamp order. We assume a join window size of 15 seconds with a grace period of 5 seconds. + +**Note:** If you use the old and now deprecated API to specify the grace period, i.e., `JoinWindows.of(...).grace(...)`, left/outer join results are emitted eagerly, and the observed result might differ from the result shown below. + +The columns INNER JOIN, LEFT JOIN, and OUTER JOIN denote what is passed as arguments to the user-supplied [ValueJoiner](/41/javadoc/org/apache/kafka/streams/kstream/ValueJoiner.html) for the `join`, `leftJoin`, and `outerJoin` methods, respectively, whenever a new input record is received on either side of the join. An empty table cell denotes that the `ValueJoiner` is not called at all. + +Timestamp | Left (KStream) | Right (KStream) | (INNER) JOIN | LEFT JOIN | OUTER JOIN +---|---|---|---|---|--- +1 | null | | | | +2 | | null | | | +3 | A | | | | +4 | | a | [A, a] | [A, a] | [A, a] +5 | B | | [B, a] | [B, a] | [B, a] +6 | | b | [A, b], [B, b] | [A, b], [B, b] | [A, b], [B, b] +7 | null | | | | +8 | | null | | | +9 | C | | [C, a], [C, b] | [C, a], [C, b] | [C, a], [C, b] +10 | | c | [A, c], [B, c], [C, c] | [A, c], [B, c], [C, c] | [A, c], [B, c], [C, c] +11 | | null | | | +12 | null | | | | +13 | | null | | | +14 | | d | [A, d], [B, d], [C, d] | [A, d], [B, d], [C, d] | [A, d], [B, d], [C, d] +15 | D | | [D, a], [D, b], [D, c], [D, d] | [D, a], [D, b], [D, c], [D, d] | [D, a], [D, b], [D, c], [D, d] +... | | | | | +40 | E | | | | +... | | | | | +60 | F | | | [E, null] | [E, null] +... | | | | | +80 | | f | | [F, null] | [F, null] +... | | | | | +100 | G | | | | [null, f] + +### KTable-KTable Equi-Join + +KTable-KTable equi-joins are always _non-windowed_ joins. They are designed to be consistent with their counterparts in relational databases. The changelog streams of both KTables are materialized into local state stores to represent the latest snapshot of their table duals. The join result is a new KTable that represents the changelog stream of the join operation. + +Join output records are effectively created as follows, leveraging the user-supplied `ValueJoiner`: + + + KeyValue leftRecord = ...; + KeyValue rightRecord = ...; + ValueJoiner joiner = ...; + + KeyValue joinOutputRecord = KeyValue.pair( + leftRecord.key, /* by definition, leftRecord.key == rightRecord.key */ + joiner.apply(leftRecord.value, rightRecord.value) + ); + +Transformation | Description +---|--- +**Inner Join** + + * (KTable, KTable) -> KTable + +| Performs an INNER JOIN of this table with another table. The result is an ever-updating KTable that represents the "current" result of the join. [(details)](/41/javadoc/org/apache/kafka/streams/kstream/KTable.html#join-org.apache.kafka.streams.kstream.KTable-org.apache.kafka.streams.kstream.ValueJoiner-) **Data must be co-partitioned** : The input data for both sides must be co-partitioned. + + + KTable left = ...; + KTable right = ...; + + KTable joined = left.join(right, + (leftValue, rightValue) -> "left=" + leftValue + ", right=" + rightValue /* ValueJoiner */ + ); + + +Detailed behavior: + + * The join is _key-based_ , i.e. with the join predicate `leftRecord.key == rightRecord.key`. + * The join will be triggered under the conditions listed below whenever new input is received. When it is triggered, the user-supplied `ValueJoiner` will be called to produce join output records. + +> * Input records with a `null` key are ignored and do not trigger the join. +> * Input records with a `null` value are interpreted as _tombstones_ for the corresponding key, which indicate the deletion of the key from the table. Tombstones do not trigger the join. When an input tombstone is received, then an output tombstone is forwarded directly to the join result KTable if required (i.e. only if the corresponding key actually exists already in the join result KTable). +> * When joining versioned tables, out-of-order input records, i.e., those for which another record from the same table, with the same key and a larger timestamp, has already been processed, are ignored and do not trigger the join. + + +See the semantics overview at the bottom of this section for a detailed description. +**Left Join** + + * (KTable, KTable) -> KTable + +| Performs a LEFT JOIN of this table with another table. [(details)](/41/javadoc/org/apache/kafka/streams/kstream/KTable.html#leftJoin-org.apache.kafka.streams.kstream.KTable-org.apache.kafka.streams.kstream.ValueJoiner-) **Data must be co-partitioned** : The input data for both sides must be co-partitioned. + + + KTable left = ...; + KTable right = ...; + + KTable joined = left.leftJoin(right, + (leftValue, rightValue) -> "left=" + leftValue + ", right=" + rightValue /* ValueJoiner */ + ); + + +Detailed behavior: + + * The join is _key-based_ , i.e. with the join predicate `leftRecord.key == rightRecord.key`. + * The join will be triggered under the conditions listed below whenever new input is received. When it is triggered, the user-supplied `ValueJoiner` will be called to produce join output records. + +> * Input records with a `null` key are ignored and do not trigger the join. +> * Input records with a `null` value are interpreted as _tombstones_ for the corresponding key, which indicate the deletion of the key from the table. Right-tombstones trigger the join, but left-tombstones don't: when an input tombstone is received, an output tombstone is forwarded directly to the join result KTable if required (i.e. only if the corresponding key actually exists already in the join result KTable). +> * When joining versioned tables, out-of-order input records, i.e., those for which another record from the same table, with the same key and a larger timestamp, has already been processed, are ignored and do not trigger the join. + + * For each input record on the left side that does not have any match on the right side, the `ValueJoiner` will be called with `ValueJoiner#apply(leftRecord.value, null)`; this explains the row with timestamp=3 in the table below, which lists `[A, null]` in the LEFT JOIN column. + +See the semantics overview at the bottom of this section for a detailed description. +**Outer Join** + + * (KTable, KTable) -> KTable + +| Performs an OUTER JOIN of this table with another table. [(details)](/41/javadoc/org/apache/kafka/streams/kstream/KTable.html#outerJoin-org.apache.kafka.streams.kstream.KTable-org.apache.kafka.streams.kstream.ValueJoiner-) **Data must be co-partitioned** : The input data for both sides must be co-partitioned. + + + KTable left = ...; + KTable right = ...; + + KTable joined = left.outerJoin(right, + (leftValue, rightValue) -> "left=" + leftValue + ", right=" + rightValue /* ValueJoiner */ + ); + + +Detailed behavior: + + * The join is _key-based_ , i.e. with the join predicate `leftRecord.key == rightRecord.key`. + * The join will be triggered under the conditions listed below whenever new input is received. When it is triggered, the user-supplied `ValueJoiner` will be called to produce join output records. + +> * Input records with a `null` key are ignored and do not trigger the join. +> * Input records with a `null` value are interpreted as _tombstones_ for the corresponding key, which indicate the deletion of the key from the table. Tombstones may trigger joins, depending on the content in the left and right tables. When an input tombstone is received, an output tombstone is forwarded directly to the join result KTable if required (i.e. only if the corresponding key actually exists already in the join result KTable). +> * When joining versioned tables, out-of-order input records, i.e., those for which another record from the same table, with the same key and a larger timestamp, has already been processed, are ignored and do not trigger the join. + + * For each input record on one side that does not have any match on the other side, the `ValueJoiner` will be called with `ValueJoiner#apply(leftRecord.value, null)` or `ValueJoiner#apply(null, rightRecord.value)`, respectively; this explains the rows with timestamp=3 and timestamp=7 in the table below, which list `[A, null]` and `[null, b]`, respectively, in the OUTER JOIN column. + +See the semantics overview at the bottom of this section for a detailed description. + +**Semantics of table-table equi-joins:** The semantics of the various table-table equi-join variants are explained below. To improve the readability of the table, you can assume that (1) all records have the same key (and thus the key in the table is omitted) and that (2) all records are processed in timestamp order. The columns INNER JOIN, LEFT JOIN, and OUTER JOIN denote what is passed as arguments to the user-supplied [ValueJoiner](/41/javadoc/org/apache/kafka/streams/kstream/ValueJoiner.html) for the `join`, `leftJoin`, and `outerJoin` methods, respectively, whenever a new input record is received on either side of the join. An empty table cell denotes that the `ValueJoiner` is not called at all. + +Timestamp | Left (KTable) | Right (KTable) | (INNER) JOIN | LEFT JOIN | OUTER JOIN +---|---|---|---|---|--- +1 | null | | | | +2 | | null | | | +3 | A | | | [A, null] | [A, null] +4 | | a | [A, a] | [A, a] | [A, a] +5 | B | | [B, a] | [B, a] | [B, a] +6 | | b | [B, b] | [B, b] | [B, b] +7 | null | | null | null | [null, b] +8 | | null | | | null +9 | C | | | [C, null] | [C, null] +10 | | c | [C, c] | [C, c] | [C, c] +11 | | null | null | [C, null] | [C, null] +12 | null | | | null | null +13 | | null | | | +14 | | d | | | [null, d] +15 | D | | [D, d] | [D, d] | [D, d] + +### KTable-KTable Foreign-Key Join + +KTable-KTable foreign-key joins are always _non-windowed_ joins. Foreign-key joins are analogous to joins in SQL. As a rough example: + +` SELECT ... FROM {this KTable} JOIN {other KTable} ON {other.key} = {result of foreignKeyExtractor(this.value)} ... ` + +The output of the operation is a new KTable containing the join result. + +The changelog streams of both KTables are materialized into local state stores to represent the latest snapshot of their table duals. A foreign-key extractor function is applied to the left record, with a new intermediate record created and is used to lookup and join with the corresponding primary key on the right hand side table. The result is a new KTable that represents the changelog stream of the join operation. + +The left KTable can have multiple records which map to the same key on the right KTable. An update to a single left KTable entry may result in a single output event, provided the corresponding key exists in the right KTable. Consequently, a single update to a right KTable entry will result in an update for each record in the left KTable that has the same foreign key. + + + +Transformation | Description +---|--- +**Inner Join** + + * (KTable, KTable) -> KTable + +| Performs a foreign-key INNER JOIN of this table with another table. The result is an ever-updating KTable that represents the "current" result of the join. [(details)](/%7B%7Bversion%7D%7D/javadoc/org/apache/kafka/streams/kstream/KTable.html#join-org.apache.kafka.streams.kstream.KTable-org.apache.kafka.streams.kstream.ValueJoiner-) + + + KTable left = ...; + KTable right = ...; + //This foreignKeyExtractor simply uses the left-value to map to the right-key. + Function foreignKeyExtractor = (v) -> v; + //Alternative: with access to left table key + BiFunction foreignKeyExtractor = (k, v) -> v; + + KTable joined = left.join(right, foreignKeyExtractor, + (leftValue, rightValue) -> "left=" + leftValue + ", right=" + rightValue /* ValueJoiner */ + ); + +Detailed behavior: + + * The join is _key-based_ , i.e. with the join predicate: + + foreignKeyExtractor.apply(leftRecord.value) == rightRecord.key + + * The join will be triggered under the conditions listed below whenever new input is received. When it is triggered, the user-supplied `ValueJoiner` will be called to produce join output records. + +> * Records for which the `foreignKeyExtractor` produces `null` are ignored and do not trigger a join. If you want to join with `null` foreign keys, use a suitable sentinel value to do so (i.e. `"NULL"` for a String field, or `-1` for an auto-incrementing integer field). +> * Input records with a `null` value are interpreted as _tombstones_ for the corresponding key, which indicate the deletion of the key from the table. Tombstones do not trigger the join. When an input tombstone is received, then an output tombstone is forwarded directly to the join result KTable if required (i.e. only if the corresponding key actually exists already in the join result KTable). +> * When joining versioned tables, out-of-order input records, i.e., those for which another record from the same table, with the same key and a larger timestamp, has already been processed, are ignored and do not trigger the join. + + +See the semantics overview at the bottom of this section for a detailed description. +**Left Join** + + * (KTable, KTable) -> KTable + +| Performs a foreign-key LEFT JOIN of this table with another table. [(details)](/%7B%7Bversion%7D%7D/javadoc/org/apache/kafka/streams/kstream/KTable.html#leftJoin-org.apache.kafka.streams.kstream.KTable-org.apache.kafka.streams.kstream.ValueJoiner-) + + + KTable left = ...; + KTable right = ...; + //This foreignKeyExtractor simply uses the left-value to map to the right-key. + Function foreignKeyExtractor = (v) -> v; + //Alternative: with access to left table key + BiFunction foreignKeyExtractor = (k, v) -> v; + + KTable joined = left.leftJoin(right, foreignKeyExtractor, + (leftValue, rightValue) -> "left=" + leftValue + ", right=" + rightValue /* ValueJoiner */ + ); + +Detailed behavior: + + * The join is _key-based_ , i.e. with the join predicate: + + foreignKeyExtractor.apply(leftRecord.value) == rightRecord.key + + * The join will be triggered under the conditions listed below whenever new input is received. When it is triggered, the user-supplied `ValueJoiner` will be called to produce join output records. + +> * Input records with a `null` value are interpreted as _tombstones_ for the corresponding key, which indicate the deletion of the key from the table. Right-tombstones trigger the join, but left-tombstones don't: when an input tombstone is received, then an output tombstone is forwarded directly to the join result KTable if required (i.e. only if the corresponding key actually exists already in the join result KTable). +> * When joining versioned tables, out-of-order input records, i.e., those for which another record from the same table, with the same key and a larger timestamp, has already been processed, are ignored and do not trigger the join. + + * For each input record on the left side that does not have any match on the right side, the `ValueJoiner` will be called with `ValueJoiner#apply(leftRecord.value, null)`; this explains the row with timestamp=7 & 8 in the table below, which lists `(q,10,null) and (r,10,null)` in the LEFT JOIN column. + +See the semantics overview at the bottom of this section for a detailed description. + +**Semantics of table-table foreign-key joins:** The semantics of the table-table foreign-key INNER and LEFT JOIN variants are demonstrated below. The key is shown alongside the value for each record. Records are processed in incrementing offset order. The columns INNER JOIN and LEFT JOIN denote what is passed as arguments to the user-supplied [ValueJoiner](/%7B%7Bversion%7D%7D/javadoc/org/apache/kafka/streams/kstream/ValueJoiner.html) for the `join` and `leftJoin` methods, respectively, whenever a new input record is received on either side of the join. An empty table cell denotes that the `ValueJoiner` is not called at all. For the purpose of this example, `Function foreignKeyExtractor` simply uses the left-value as the output. + +Record Offset | Left KTable (K, extracted-FK) | Right KTable (FK, VR) | (INNER) JOIN | LEFT JOIN +---|---|---|---|--- +1 | (k,1) | (1,foo) | (k,1,foo) +| (k,1,foo) +2 | (k,2) | +| (k,null) | (k,2,null) + +3 | (k,3) +| | (k,null) | (k,3,null) + +4 | | (3,bar) +| (k,3,bar) +| (k,3,bar) + +5 | (k,null) +| | (k,null) +| (k,null,null) +6 | (k,1) | +| (k,1,foo) +| (k,1,foo) + +7 | (q,10) +| | +| (q,10,null) +8 | (r,10) | +| | (r,10,null) +9 | +| (10,baz) | (q,10,baz), (r,10,baz) | (q,10,baz), (r,10,baz) + +### KStream-KTable Join + +KStream-KTable joins are always _non-windowed_ joins. They allow you to perform _table lookups_ against a KTable (changelog stream) upon receiving a new record from the KStream (record stream). An example use case would be to enrich a stream of user activities (KStream) with the latest user profile information (KTable). + +Join output records are effectively created as follows, leveraging the user-supplied `ValueJoiner`: + + + KeyValue leftRecord = ...; + KeyValue rightRecord = ...; + ValueJoiner joiner = ...; + + KeyValue joinOutputRecord = KeyValue.pair( + leftRecord.key, /* by definition, leftRecord.key == rightRecord.key */ + joiner.apply(leftRecord.value, rightRecord.value) + ); + +Transformation | Description +---|--- +**Inner Join** + + * (KStream, KTable) -> KStream + +| Performs an INNER JOIN of this stream with the table, effectively doing a table lookup. [(details)](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#join-org.apache.kafka.streams.kstream.KTable-org.apache.kafka.streams.kstream.ValueJoiner-) **Data must be co-partitioned** : The input data for both sides must be co-partitioned. **Causes data re-partitioning of the stream if and only if the stream was marked for re-partitioning.** Several variants of `join` exists, see the Javadocs for details. + + + KStream left = ...; + KTable right = ...; + + KStream joined = left.join(right, + (leftValue, rightValue) -> "left=" + leftValue + ", right=" + rightValue, /* ValueJoiner */ + Joined.keySerde(Serdes.String()) /* key */ + .withValueSerde(Serdes.Long()) /* left value */ + .withGracePeriod(Duration.ZERO) /* grace period */ + ); + + +Detailed behavior: + + * The join is _key-based_ , i.e. with the join predicate `leftRecord.key == rightRecord.key`. + * The join will be triggered under the conditions listed below whenever new input is received. When it is triggered, the user-supplied `ValueJoiner` will be called to produce join output records. + +> * Only input records for the left side (stream) trigger the join. Input records for the right side (table) update only the internal right-side join state. +> * Input records for the stream with a `null` key or a `null` value are ignored and do not trigger the join. +> * Input records for the table with a `null` value are interpreted as _tombstones_ for the corresponding key, which indicate the deletion of the key from the table. Tombstones do not trigger the join. + + * When the table is versioned, the table record to join with is determined by performing a timestamped lookup, i.e., the table record which is joined will be the latest-by-timestamp record with timestamp less than or equal to the stream record timestamp. If the stream record timestamp is older than the table's history retention, then the record is dropped. + * To use the grace period, the table needs to be versioned. This will cause the stream to buffer for the specified grace period before trying to find a matching record with the right timestamp in the table. The case where the grace period would be used for is if a record in the table has a timestamp less than or equal to the stream record timestamp but arrives after the stream record. If the table record arrives within the grace period the join will still occur. If the table record does not arrive before the grace period the join will continue as normal. + +See the semantics overview at the bottom of this section for a detailed description. +**Left Join** + + * (KStream, KTable) -> KStream + +| Performs a LEFT JOIN of this stream with the table, effectively doing a table lookup. [(details)](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#leftJoin-org.apache.kafka.streams.kstream.KTable-org.apache.kafka.streams.kstream.ValueJoiner-) **Data must be co-partitioned** : The input data for both sides must be co-partitioned. **Causes data re-partitioning of the stream if and only if the stream was marked for re-partitioning.** Several variants of `leftJoin` exists, see the Javadocs for details. + + + KStream left = ...; + KTable right = ...; + + KStream joined = left.leftJoin(right, + (leftValue, rightValue) -> "left=" + leftValue + ", right=" + rightValue, /* ValueJoiner */ + Joined.keySerde(Serdes.String()) /* key */ + .withValueSerde(Serdes.Long()) /* left value */ + .withGracePeriod(Duration.ZERO) /* grace period */ + ); + + +Detailed behavior: + + * The join is _key-based_ , i.e. with the join predicate `leftRecord.key == rightRecord.key`. + * The join will be triggered under the conditions listed below whenever new input is received. When it is triggered, the user-supplied `ValueJoiner` will be called to produce join output records. + +> * Only input records for the left side (stream) trigger the join. Input records for the right side (table) update only the internal right-side join state. +> * Input records for the stream with a `null` value are ignored and do not trigger the join. +> * Input records for the table with a `null` value are interpreted as _tombstones_ for the corresponding key, which indicate the deletion of the key from the table. Tombstones do not trigger the join. + + * For each input record on the left side that does not have any match on the right side, the `ValueJoiner` will be called with `ValueJoiner#apply(leftRecord.value, null)`; this explains the row with timestamp=3 in the table below, which lists `[A, null]` in the LEFT JOIN column. + * When the table is versioned, the table record to join with is determined by performing a timestamped lookup, i.e., the table record which is joined will be the latest-by-timestamp record with timestamp less than or equal to the stream record timestamp. If the stream record timestamp is older than the table's history retention, then the record that is joined will be `null`. + * To use the grace period, the table needs to be versioned. This will cause the stream to buffer for the specified grace period before trying to find a matching record with the right timestamp in the table. The case where the grace period would be used for is if a record in the table has a timestamp less than or equal to the stream record timestamp but arrives after the stream record. If the table record arrives within the grace period the join will still occur. If the table record does not arrive before the grace period the join will continue as normal. + +See the semantics overview at the bottom of this section for a detailed description. + +**Semantics of stream-table joins:** The semantics of the various stream-table join variants are explained below. To improve the readability of the table we assume that (1) all records have the same key (and thus we omit the key in the table) and that (2) all records are processed in timestamp order. The columns INNER JOIN and LEFT JOIN denote what is passed as arguments to the user-supplied [ValueJoiner](/41/javadoc/org/apache/kafka/streams/kstream/ValueJoiner.html) for the `join` and `leftJoin` methods, respectively, whenever a new input record is received on either side of the join. An empty table cell denotes that the `ValueJoiner` is not called at all. + +Timestamp | Left (KStream) | Right (KTable) | (INNER) JOIN | LEFT JOIN +---|---|---|---|--- +1 | null | | | +2 | | null | | +3 | A | | | [A, null] +4 | | a | | +5 | B | | [B, a] | [B, a] +6 | | b | | +7 | null | | | +8 | | null | | +9 | C | | | [C, null] +10 | | c | | +11 | | null | | +12 | null | | | +13 | | null | | +14 | | d | | +15 | D | | [D, d] | [D, d] + +### KStream-GlobalKTable Join + +KStream-GlobalKTable joins are always _non-windowed_ joins. They allow you to perform _table lookups_ against a GlobalKTable (entire changelog stream) upon receiving a new record from the KStream (record stream). An example use case would be "star queries" or "star joins", where you would enrich a stream of user activities (KStream) with the latest user profile information (GlobalKTable) and further context information (further GlobalKTables). However, because GlobalKTables have no notion of time, a KStream-GlobalKTable join is not a temporal join, and there is no event-time synchronization between updates to a GlobalKTable and processing of KStream records. + +At a high-level, KStream-GlobalKTable joins are very similar to KStream-KTable joins. However, global tables provide you with much more flexibility at the some expense when compared to partitioned tables: + + * They do not require data co-partitioning. + * They allow for efficient "star joins"; i.e., joining a large-scale "facts" stream against "dimension" tables + * They allow for joining against foreign keys; i.e., you can lookup data in the table not just by the keys of records in the stream, but also by data in the record values. + * They make many use cases feasible where you must work on heavily skewed data and thus suffer from hot partitions. + * They are often more efficient than their partitioned KTable counterpart when you need to perform multiple joins in succession. + + + +Join output records are effectively created as follows, leveraging the user-supplied `ValueJoiner`: + + + KeyValue leftRecord = ...; + KeyValue rightRecord = ...; + ValueJoiner joiner = ...; + + KeyValue joinOutputRecord = KeyValue.pair( + leftRecord.key, /* by definition, leftRecord.key == rightRecord.key */ + joiner.apply(leftRecord.value, rightRecord.value) + ); + +Transformation | Description +---|--- +**Inner Join** + + * (KStream, GlobalKTable) -> KStream + +| Performs an INNER JOIN of this stream with the global table, effectively doing a table lookup. [(details)](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#join-org.apache.kafka.streams.kstream.GlobalKTable-org.apache.kafka.streams.kstream.KeyValueMapper-org.apache.kafka.streams.kstream.ValueJoiner-) The `GlobalKTable` is fully bootstrapped upon (re)start of a `KafkaStreams` instance, which means the table is fully populated with all the data in the underlying topic that is available at the time of the startup. The actual data processing begins only once the bootstrapping has completed. **Causes data re-partitioning of the stream if and only if the stream was marked for re-partitioning.** + + + KStream left = ...; + GlobalKTable right = ...; + + KStream joined = left.join(right, + (leftKey, leftValue) -> leftKey.length(), /* derive a (potentially) new key by which to lookup against the table */ + (leftValue, rightValue) -> "left=" + leftValue + ", right=" + rightValue /* ValueJoiner */ + ); + + +Detailed behavior: + + * The join is indirectly _key-based_ , i.e. with the join predicate `KeyValueMapper#apply(leftRecord.key, leftRecord.value) == rightRecord.key`. + * The join will be triggered under the conditions listed below whenever new input is received. When it is triggered, the user-supplied `ValueJoiner` will be called to produce join output records. + +> * Only input records for the left side (stream) trigger the join. Input records for the right side (table) update only the internal right-side join state. +> * Input records for the stream with a `null` key or a `null` value are ignored and do not trigger the join. +> * Input records for the table with a `null` value are interpreted as _tombstones_ , which indicate the deletion of a record key from the table. Tombstones do not trigger the join. + + + +**Left Join** + + * (KStream, GlobalKTable) -> KStream + +| Performs a LEFT JOIN of this stream with the global table, effectively doing a table lookup. [(details)](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#leftJoin-org.apache.kafka.streams.kstream.GlobalKTable-org.apache.kafka.streams.kstream.KeyValueMapper-org.apache.kafka.streams.kstream.ValueJoiner-) The `GlobalKTable` is fully bootstrapped upon (re)start of a `KafkaStreams` instance, which means the table is fully populated with all the data in the underlying topic that is available at the time of the startup. The actual data processing begins only once the bootstrapping has completed. **Causes data re-partitioning of the stream if and only if the stream was marked for re-partitioning.** + + + KStream left = ...; + GlobalKTable right = ...; + + KStream joined = left.leftJoin(right, + (leftKey, leftValue) -> leftKey.length(), /* derive a (potentially) new key by which to lookup against the table */ + (leftValue, rightValue) -> "left=" + leftValue + ", right=" + rightValue /* ValueJoiner */ + ); + + +Detailed behavior: + + * The join is indirectly _key-based_ , i.e. with the join predicate `KeyValueMapper#apply(leftRecord.key, leftRecord.value) == rightRecord.key`. + * The join will be triggered under the conditions listed below whenever new input is received. When it is triggered, the user-supplied `ValueJoiner` will be called to produce join output records. + +> * Only input records for the left side (stream) trigger the join. Input records for the right side (table) update only the internal right-side join state. +> * Input records for the stream with a `null` value are ignored and do not trigger the join. +> * Input records for the table with a `null` value are interpreted as _tombstones_ , which indicate the deletion of a record key from the table. Tombstones do not trigger the join. + + * For each input record on the left side that does not have any match on the right side, the `ValueJoiner` will be called with `ValueJoiner#apply(leftRecord.value, null)`. + + + +**Semantics of stream-global-table joins:** The join semantics are different to KStream-KTable joins because it's not a temporal join. Another difference is that, for KStream-GlobalKTable joins, the left input record is first "mapped" with a user-supplied `KeyValueMapper` into the table's keyspace prior to the table lookup. + +## Windowing + +Windowing lets you control how to group records that have the same key for stateful operations such as aggregations or joins into so-called windows. Windows are tracked per record key. + +**Note** + +A related operation is grouping, which groups all records that have the same key to ensure that data is properly partitioned ("keyed") for subsequent operations. Once grouped, windowing allows you to further sub-group the records of a key. + +For example, in join operations, a windowing state store is used to store all the records received so far within the defined window boundary. In aggregating operations, a windowing state store is used to store the latest aggregation results per window. Old records in the state store are purged after the specified [window retention period](../core-concepts.html#streams_concepts_windowing). Kafka Streams guarantees to keep a window for at least this specified time; the default value is one day and can be changed via `Materialized#withRetention()`. + +The DSL supports the following types of windows: + +Window name | Behavior | Short description +---|---|--- +Hopping time window | Time-based | Fixed-size, overlapping windows +Tumbling time window | Time-based | Fixed-size, non-overlapping, gap-less windows +Sliding time window | Time-based | Fixed-size, overlapping windows that work on differences between record timestamps +Session window | Session-based | Dynamically-sized, non-overlapping, data-driven windows + +### Hopping time windows + +Hopping time windows are windows based on time intervals. They model fixed-sized, (possibly) overlapping windows. A hopping window is defined by two properties: the window's _size_ and its _advance interval_ (aka "hop"). The advance interval specifies by how much a window moves forward relative to the previous one. For example, you can configure a hopping window with a size 5 minutes and an advance interval of 1 minute. Since hopping windows can overlap - and in general they do - a data record may belong to more than one such windows. + +**Note** + +**Hopping windows vs. sliding windows:** Hopping windows are sometimes called "sliding windows" in other stream processing tools. Kafka Streams follows the terminology in academic literature, where the semantics of sliding windows are different to those of hopping windows. + +The following code defines a hopping window with a size of 5 minutes and an advance interval of 1 minute: + + + import java.time.Duration; + import org.apache.kafka.streams.kstream.TimeWindows; + + // A hopping time window with a size of 5 minutes and an advance interval of 1 minute. + // The window's name -- the string parameter -- is used to e.g. name the backing state store. + Duration windowSize = Duration.ofMinutes(5); + Duration advance = Duration.ofMinutes(1); + TimeWindows.ofSizeWithNoGrace(windowSize).advanceBy(advance); + +![](/41/images/streams-time-windows-hopping.png) + +This diagram shows windowing a stream of data records with hopping windows. In this diagram the time numbers represent minutes; e.g. t=5 means "at the five-minute mark". In reality, the unit of time in Kafka Streams is milliseconds, which means the time numbers would need to be multiplied with 60 * 1,000 to convert from minutes to milliseconds (e.g. t=5 would become t=300,000). + +Hopping time windows are _aligned to the epoch_ , with the lower interval bound being inclusive and the upper bound being exclusive. "Aligned to the epoch" means that the first window starts at timestamp zero. For example, hopping windows with a size of 5000ms and an advance interval ("hop") of 3000ms have predictable window boundaries `[0;5000),[3000;8000),...` -- and **not** `[1000;6000),[4000;9000),...` or even something "random" like `[1452;6452),[4452;9452),...`. + +Unlike non-windowed aggregates that we have seen previously, windowed aggregates return a _windowed KTable_ whose keys type is `Windowed`. This is to differentiate aggregate values with the same key from different windows. The corresponding window instance and the embedded key can be retrieved as `Windowed#window()` and `Windowed#key()`, respectively. + +### Tumbling time windows + +Tumbling time windows are a special case of hopping time windows and, like the latter, are windows based on time intervals. They model fixed-size, non-overlapping, gap-less windows. A tumbling window is defined by a single property: the window's _size_. A tumbling window is a hopping window whose window size is equal to its advance interval. Since tumbling windows never overlap, a data record will belong to one and only one window. + +![](/41/images/streams-time-windows-tumbling.png) + +This diagram shows windowing a stream of data records with tumbling windows. Windows do not overlap because, by definition, the advance interval is identical to the window size. In this diagram the time numbers represent minutes; e.g. t=5 means "at the five-minute mark". In reality, the unit of time in Kafka Streams is milliseconds, which means the time numbers would need to be multiplied with 60 * 1,000 to convert from minutes to milliseconds (e.g. t=5 would become t=300,000). + +Tumbling time windows are _aligned to the epoch_ , with the lower interval bound being inclusive and the upper bound being exclusive. "Aligned to the epoch" means that the first window starts at timestamp zero. For example, tumbling windows with a size of 5000ms have predictable window boundaries `[0;5000),[5000;10000),...` -- and **not** `[1000;6000),[6000;11000),...` or even something "random" like `[1452;6452),[6452;11452),...`. + +The following code defines a tumbling window with a size of 5 minutes: + + + import java.time.Duration; + import org.apache.kafka.streams.kstream.TimeWindows; + + // A tumbling time window with a size of 5 minutes (and, by definition, an implicit + // advance interval of 5 minutes), and grace period of 1 minute. + Duration windowSize = Duration.ofMinutes(5); + Duration gracePeriod = Duration.ofMinutes(1); + TimeWindows.ofSizeAndGrace(windowSize, gracePeriod); + + // The above is equivalent to the following code: + TimeWindows.ofSizeAndGrace(windowSize, gracePeriod).advanceBy(windowSize); + +### Sliding time windows + +Sliding windows are actually quite different from hopping and tumbling windows. In Kafka Streams, sliding windows are used for join operations, specified by using the `JoinWindows` class, and windowed aggregations, specified by using the `SlidingWindows` class. + +A sliding window models a fixed-size window that slides continuously over the time axis. In this model, two data records are said to be included in the same window if (in the case of symmetric windows) the difference of their timestamps is within the window size. As a sliding window moves along the time axis, records may fall into multiple snapshots of the sliding window, but each unique combination of records appears only in one sliding window snapshot. + +The following code defines a sliding window with a time difference of 10 minutes and a grace period of 30 minutes: + + + import org.apache.kafka.streams.kstream.SlidingWindows; + + // A sliding time window with a time difference of 10 minutes and grace period of 30 minutes + Duration timeDifference = Duration.ofMinutes(10); + Duration gracePeriod = Duration.ofMinutes(30); + SlidingWindows.ofTimeDifferenceAndGrace(timeDifference, gracePeriod); + +![](/41/images/streams-sliding-windows.png) + +This diagram shows windowing a stream of data records with sliding windows. The overlap of the sliding window snapshots varies depending on the record times. In this diagram, the time numbers represent milliseconds. For example, t=5 means "at the five millisecond mark". + +Sliding windows are aligned to the data record timestamps, not to the epoch. In contrast to hopping and tumbling windows, the lower and upper window time interval bounds of sliding windows are both inclusive. + +### Session Windows + +Session windows are used to aggregate key-based events into so-called _sessions_ , the process of which is referred to as _sessionization_. Sessions represent a **period of activity** separated by a defined **gap of inactivity** (or "idleness"). Any events processed that fall within the inactivity gap of any existing sessions are merged into the existing sessions. If an event falls outside of the session gap, then a new session will be created. + +Session windows are different from the other window types in that: + + * all windows are tracked independently across keys - e.g. windows of different keys typically have different start and end times + * their window sizes sizes vary - even windows for the same key typically have different sizes + + + +The prime area of application for session windows is **user behavior analysis**. Session-based analyses can range from simple metrics (e.g. count of user visits on a news website or social platform) to more complex metrics (e.g. customer conversion funnel and event flows). + +The following code defines a session window with an inactivity gap of 5 minutes: + + + import java.time.Duration; + import org.apache.kafka.streams.kstream.SessionWindows; + + // A session window with an inactivity gap of 5 minutes. + SessionWindows.ofInactivityGapWithNoGrace(Duration.ofMinutes(5)); + +Given the previous session window example, here's what would happen on an input stream of six records. When the first three records arrive (upper part of in the diagram below), we'd have three sessions (see lower part) after having processed those records: two for the green record key, with one session starting and ending at the 0-minute mark (only due to the illustration it looks as if the session goes from 0 to 1), and another starting and ending at the 6-minute mark; and one session for the blue record key, starting and ending at the 2-minute mark. + +![](/41/images/streams-session-windows-01.png) + +Detected sessions after having received three input records: two records for the green record key at t=0 and t=6, and one record for the blue record key at t=2. In this diagram the time numbers represent minutes; e.g. t=5 means "at the five-minute mark". In reality, the unit of time in Kafka Streams is milliseconds, which means the time numbers would need to be multiplied with 60 * 1,000 to convert from minutes to milliseconds (e.g. t=5 would become t=300,000). + +If we then receive three additional records (including two out-of-order records), what would happen is that the two existing sessions for the green record key will be merged into a single session starting at time 0 and ending at time 6, consisting of a total of three records. The existing session for the blue record key will be extended to end at time 5, consisting of a total of two records. And, finally, there will be a new session for the blue key starting and ending at time 11. + +![](/41/images/streams-session-windows-02.png) + +Detected sessions after having received six input records. Note the two out-of-order data records at t=4 (green) and t=5 (blue), which lead to a merge of sessions and an extension of a session, respectively. + +### Window Final Results + +In Kafka Streams, windowed computations update their results continuously. As new data arrives for a window, freshly computed results are emitted downstream. For many applications, this is ideal, since fresh results are always available. and Kafka Streams is designed to make programming continuous computations seamless. However, some applications need to take action **only** on the final result of a windowed computation. Common examples of this are sending alerts or delivering results to a system that doesn't support updates. + +Suppose that you have an hourly windowed count of events per user. If you want to send an alert when a user has _less than_ three events in an hour, you have a real challenge. All users would match this condition at first, until they accrue enough events, so you cannot simply send an alert when someone matches the condition; you have to wait until you know you won't see any more events for a particular window and _then_ send the alert. + +Kafka Streams offers a clean way to define this logic: after defining your windowed computation, you can suppress the intermediate results, emitting the final count for each user when the window is **closed**. + +For example: + + + KGroupedStream grouped = ...; + grouped + .windowedBy(TimeWindows.ofSizeAndGrace(Duration.ofHours(1), Duration.ofMinutes(10))) + .count() + .suppress(Suppressed.untilWindowCloses(unbounded())) + .filter((windowedUserId, count) -> count < 3) + .toStream() + .foreach((windowedUserId, count) -> sendAlert(windowedUserId.window(), windowedUserId.key(), count)); + +The key parts of this program are: + +`ofSizeAndGrace(Duration.ofHours(1), Duration.ofMinutes(10))` + The specified grace period of 10 minutes (i.e., the `Duration.ofMinutes(10)` argument) allows us to bound the lateness of events the window will accept. For example, the 09:00 to 10:00 window will accept out-of-order records until 10:10, at which point, the window is **closed**. +`.suppress(Suppressed.untilWindowCloses(...))` + This configures the suppression operator to emit nothing for a window until it closes, and then emit the final result. For example, if user `U` gets 10 events between 09:00 and 10:10, the `filter` downstream of the suppression will get no events for the windowed key `U@09:00-10:00` until 10:10, and then it will get exactly one with the value `10`. This is the final result of the windowed count. +`unbounded()` + This configures the buffer used for storing events until their windows close. Production code is able to put a cap on the amount of memory to use for the buffer, but this simple example creates a buffer with no upper bound. + +One thing to note is that suppression is just like any other Kafka Streams operator, so you can build a topology with two branches emerging from the `count`, one suppressed, and one not, or even multiple differently configured suppressions. This allows you to apply suppressions where they are needed and otherwise rely on the default continuous update behavior. + +For more detailed information, see the JavaDoc on the `Suppressed` config object and [KIP-328](https://cwiki.apache.org/confluence/x/sQU0BQ "KIP-328"). + +Applying processors (Processor API integration) + +Beyond the aforementioned stateless and stateful transformations, you may also leverage the Processor API from the DSL. There are a number of scenarios where this may be helpful: + + * **Customization:** You need to implement special, customized logic that is not or not yet available in the DSL. + * **Combining ease-of-use with full flexibility where it's needed:** Even though you generally prefer to use the expressiveness of the DSL, there are certain steps in your processing that require more flexibility and tinkering than the DSL provides. For example, only the Processor API provides access to a record's metadata such as its topic, partition, and offset information. However, you don't want to switch completely to the Processor API just because of that; and + * **Migrating from other tools:** You are migrating from other stream processing technologies that provide an imperative API, and migrating some of your legacy code to the Processor API was faster and/or easier than to migrate completely to the DSL right away. + + + +## Operations and concepts + + * `KStream#process`: Process all records in a stream, one record at a time, by applying a `Processor` (provided by a given `ProcessorSupplier`); + * `KStream#processValues`: Process all records in a stream, one record at a time, by applying a `FixedKeyProcessor` (provided by a given `FixedKeyProcessorSupplier`) [**CAUTION:** If you are deploying a new Kafka Streams application, and you are using the "merge repartition topics" optimization, you should enable the fix for [KAFKA-19668](https://issues.apache.org/jira/browse/KAFKA-19668) to avoid compatibility issues for future upgrades to newer versions of Kafka Streams; For more details, see the migration guide below]; + * `Processor`: A processor of key-value pair records; + * `ContextualProcessor`: An abstract implementation of `Processor` that manages the `ProcessorContext` instance; + * `FixedKeyProcessor`: A processor of key-value pair records where keys are immutable; + * `ContextualFixedKeyProcessor`: An abstract implementation of `FixedKeyProcessor` that manages the `FixedKeyProcessorContext` instance; + * `ProcessorSupplier`: A processor supplier that can create one or more `Processor` instances; and + * `FixedKeyProcessorSupplier`: A processor supplier that can create one or more `FixedKeyProcessor` instances. + + + +## Examples + +Follow the examples below to learn how to apply `process` and `processValues` to your `KStream`. + +Example | Operation | State Type +---|---|--- +Categorizing Logs by Severity | `process` | Stateless +Replacing Slang in Text Messages | `processValues` | Stateless +Cumulative Discounts for a Loyalty Program | `process` | Stateful +Traffic Radar Monitoring Car Count | `processValues` | Stateful + +### Categorizing Logs by Severity + + * **Idea:** You have a stream of log messages. Each message contains a severity level (e.g., INFO, WARN, ERROR) in the value. The processor filters messages, routing ERROR messages to a dedicated topic and discarding INFO messages. The rest (WARN) are forwarded to a dedicated topic too. + * **Real-World Context:** In a production monitoring system, categorizing logs by severity ensures ERROR logs are sent to a critical incident management system, WARN logs are analyzed for potential risks, and INFO logs are stored for basic reporting purposes. + + + + + public class CategorizingLogsBySeverityExample { + private static final String ERROR_LOGS_TOPIC = "error-logs-topic"; + private static final String INPUT_LOGS_TOPIC = "input-logs-topic"; + private static final String UNKNOWN_LOGS_TOPIC = "unknown-logs-topic"; + private static final String WARN_LOGS_TOPIC = "warn-logs-topic"; + + public static void categorizeWithProcess(final StreamsBuilder builder) { + final KStream logStream = builder.stream(INPUT_LOGS_TOPIC); + logStream.process(LogSeverityProcessor::new) + .to((key, value, recordContext) -> { + // Determine the target topic dynamically + if ("ERROR".equals(key)) return ERROR_LOGS_TOPIC; + if ("WARN".equals(key)) return WARN_LOGS_TOPIC; + return UNKNOWN_LOGS_TOPIC; + }); + } + + private static class LogSeverityProcessor extends ContextualProcessor { + @Override + public void process(final Record record) { + if (record.value() == null) { + return; // Skip null values + } + + // Assume the severity is the first word in the log message + // For example: "ERROR: Disk not found" -> "ERROR" + final int colonIndex = record.value().indexOf(':'); + final String severity = colonIndex > 0 ? record.value().substring(0, colonIndex).trim() : "UNKNOWN"; + + // Route logs based on severity + switch (severity) { + case "ERROR": + context().forward(record.withKey(ERROR_LOGS_TOPIC)); + break; + case "WARN": + context().forward(record.withKey(WARN_LOGS_TOPIC)); + break; + case "INFO": + // INFO logs are ignored + break; + default: + // Forward to an "unknown" topic for logs with unrecognized severities + context().forward(record.withKey(UNKNOWN_LOGS_TOPIC)); + } + } + } + } + +### Replacing Slang in Text Messages + + * **Idea:** A messaging stream contains user-generated content, and you want to replace slang words with their formal equivalents (e.g., "u" becomes "you", "brb" becomes "be right back"). The operation only modifies the message value and keeps the key intact. + * **Real-World Context:** In customer support chat systems, normalizing text by replacing slang with formal equivalents ensures that automated sentiment analysis tools work accurately and provide reliable insights. + + + + + public class ReplacingSlangTextInMessagesExample { + private static final Map SLANG_DICTIONARY = Map.of( + "u", "you", + "brb", "be right back", + "omg", "oh my god", + "btw", "by the way" + ); + private static final String INPUT_MESSAGES_TOPIC = "input-messages-topic"; + private static final String OUTPUT_MESSAGES_TOPIC = "output-messages-topic"; + + public static void replaceWithProcessValues(final StreamsBuilder builder) { + KStream messageStream = builder.stream(INPUT_MESSAGES_TOPIC); + messageStream.processValues(SlangReplacementProcessor::new).to(OUTPUT_MESSAGES_TOPIC); + } + + private static class SlangReplacementProcessor extends ContextualFixedKeyProcessor { + @Override + public void process(final FixedKeyRecord record) { + if (record.value() == null) { + return; // Skip null values + } + + // Replace slang words in the message + final String[] words = record.value().split("\s+"); + for (final String word : words) { + String replacedWord = SLANG_DICTIONARY.getOrDefault(word, word); + context().forward(record.withValue(replacedWord)); + } + } + } + } + +### Cumulative Discounts for a Loyalty Program + + * **Idea:** A stream of purchase events contains user IDs and transaction amounts. Use a state store to accumulate the total spending of each user. When their total crosses a threshold, apply a discount on their next transaction and update their accumulated total. + * **Real-World Context:** In a retail loyalty program, tracking cumulative customer spending enables dynamic rewards, such as issuing a discount when a customer's total purchases exceed a predefined limit. + + + + + public class CumulativeDiscountsForALoyaltyProgramExample { + private static final double DISCOUNT_THRESHOLD = 100.0; + private static final String CUSTOMER_SPENDING_STORE = "customer-spending-store"; + private static final String DISCOUNT_NOTIFICATION_MESSAGE = + "Discount applied! You have received a reward for your purchases."; + private static final String DISCOUNT_NOTIFICATIONS_TOPIC = "discount-notifications-topic"; + private static final String PURCHASE_EVENTS_TOPIC = "purchase-events-topic"; + + public static void applyDiscountWithProcess(final StreamsBuilder builder) { + // Define the state store for tracking cumulative spending + builder.addStateStore( + Stores.keyValueStoreBuilder( + Stores.inMemoryKeyValueStore(CUSTOMER_SPENDING_STORE), + Serdes.String(), + Serdes.Double() + ) + ); + final KStream purchaseStream = builder.stream(PURCHASE_EVENTS_TOPIC); + // Apply the Processor with the state store + final KStream notificationStream = + purchaseStream.process(CumulativeDiscountProcessor::new, CUSTOMER_SPENDING_STORE); + // Send the notifications to the output topic + notificationStream.to(DISCOUNT_NOTIFICATIONS_TOPIC); + } + + private static class CumulativeDiscountProcessor implements Processor { + private KeyValueStore spendingStore; + private ProcessorContext context; + + @Override + public void init(final ProcessorContext context) { + this.context = context; + // Retrieve the state store for cumulative spending + spendingStore = context.getStateStore(CUSTOMER_SPENDING_STORE); + } + + @Override + public void process(final Record record) { + if (record.value() == null) { + return; // Skip null purchase amounts + } + + // Get the current spending total for the customer + Double currentSpending = spendingStore.get(record.key()); + if (currentSpending == null) { + currentSpending = 0.0; + } + // Update the cumulative spending + currentSpending += record.value(); + spendingStore.put(record.key(), currentSpending); + + // Check if the customer qualifies for a discount + if (currentSpending >= DISCOUNT_THRESHOLD) { + // Reset the spending after applying the discount + spendingStore.put(record.key(), currentSpending - DISCOUNT_THRESHOLD); + // Send a discount notification + context.forward(record.withValue(DISCOUNT_NOTIFICATION_MESSAGE)); + } + } + } + } + +### Traffic Radar Monitoring Car Count + + * **Idea:** A radar monitors cars passing along a road stretch. A system counts the cars for each day, maintaining a cumulative total for the current day in a state store. At the end of the day, the count is emitted and the state is cleared for the next day. + * **Real-World Context:** A car counting system can be useful for determining measures for widening or controlling traffic depending on the number of cars passing through the monitored stretch. + + + + + public class TrafficRadarMonitoringCarCountExample { + private static final String DAILY_COUNT_STORE = "price-state-store"; + private static final String DAILY_COUNT_TOPIC = "price-state-topic"; + private static final String RADAR_COUNT_TOPIC = "car-radar-topic"; + + public static void countWithProcessValues(final StreamsBuilder builder) { + // Define a state store for tracking daily car counts + builder.addStateStore( + Stores.keyValueStoreBuilder( + Stores.inMemoryKeyValueStore(DAILY_COUNT_STORE), + Serdes.String(), + Serdes.Long() + ) + ); + final KStream radarStream = builder.stream(RADAR_COUNT_TOPIC); + // Apply the FixedKeyProcessor with the state store + radarStream.processValues(DailyCarCountProcessor::new, DAILY_COUNT_STORE) + .to(DAILY_COUNT_TOPIC); + } + + private static class DailyCarCountProcessor implements FixedKeyProcessor { + private FixedKeyProcessorContext context; + private KeyValueStore stateStore; + private static final DateTimeFormatter DATE_FORMATTER = + DateTimeFormatter.ofPattern("yyyy-MM-dd").withZone(ZoneId.systemDefault()); + + @Override + public void init(final FixedKeyProcessorContext context) { + this.context = context; + stateStore = context.getStateStore(DAILY_COUNT_STORE); + } + + @Override + public void process(final FixedKeyRecord record) { + if (record.value() == null) { + return; // Skip null events + } + + // Derive the current day from the event timestamp + final long timestamp = System.currentTimeMillis(); // Use system time for simplicity + final String currentDay = DATE_FORMATTER.format(Instant.ofEpochMilli(timestamp)); + // Retrieve the current count for the day + Long dailyCount = stateStore.get(currentDay); + if (dailyCount == null) { + dailyCount = 0L; + } + // Increment the count + dailyCount++; + stateStore.put(currentDay, dailyCount); + + // Emit the current day's count + context.forward(record.withValue(String.format("Day: %s, Car Count: %s", currentDay, dailyCount))); + } + } + } + +## Keynotes + + * **Type Safety and Flexibility:** The process and processValues APIs utilize `ProcessorContext` and `Record` or `FixedKeyRecord` objects for better type safety and flexibility of custom processing logic. + * **Clear State and Logic Management:** Implementations for `Processor` or `FixedKeyProcessor` should manage state and logic clearly. Use `context().forward()` for emitting records downstream. + * **Unified API:** Consolidates multiple methods into a single, versatile API. + * **Future-Proof:** Ensures compatibility with the latest Kafka Streams releases. + + + +Transformers removal and migration to processors + +As of Kafka 4.0, several deprecated methods in the Kafka Streams API, such as `transform`, `flatTransform`, `transformValues`, `flatTransformValues`, and `process` have been removed. These methods have been replaced with the more versatile Processor API. This guide provides detailed steps for migrating existing code to use the new Processor API and explains the benefits of the changes. + +The following deprecated methods are no longer available in Kafka Streams: + + * `KStream#transform` + * `KStream#flatTransform` + * `KStream#transformValues` + * `KStream#flatTransformValues` + * `KStream#process` + + + +The Processor API now serves as a unified replacement for all these methods. It simplifies the API surface while maintaining support for both stateless and stateful operations. + +**CAUTION:** If you are using `KStream.transformValues()` and you have the "merge repartition topics" optimization enabled, rewriting your program to `KStream.processValues()` might not be safe due to [KAFKA-19668](https://issues.apache.org/jira/browse/KAFKA-19668). For this case, you should not upgrade to Kafka Streams 4.0.0 or 4.1.0, but use Kafka Streams 4.0.1 instead, which contains a fix. Note, that the fix is not enabled by default for backward compatibility reasons, and you would need to enable the fix by setting config `__enable.process.processValue.fix__ = true` and pass it into `StreamsBuilder()` constructor. + + + final Properties properties = new Properties(); + properties.put(StreamsConfig.APPLICATION_ID_CONFIG, ...); + properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, ...); + properties.put(TopologyConfig.InternalConfig.ENABLE_PROCESS_PROCESSVALUE_FIX, true); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(properties))); + +It is recommended, that you compare the output of `Topology.describe()` for the old and new topology, to verify if the rewrite to `processValues()` is correct, and that it does not introduce any incompatibilities. You should also test the upgrade in a non-production environment. + +## Migration Examples + +To migrate from the deprecated `transform`, `transformValues`, `flatTransform`, and `flatTransformValues` methods to the Processor API (PAPI) in Kafka Streams, let's resume the previouss examples. The new `process` and `processValues` methods enable a more flexible and reusable approach by requiring implementations of the `Processor` or `FixedKeyProcessor` interfaces. + +Example | Migrating from | Migrating to | State Type +---|---|---|--- +Categorizing Logs by Severity | `flatTransform` | `process` | Stateless +Replacing Slang in Text Messages | `flatTransformValues` | `processValues` | Stateless +Cumulative Discounts for a Loyalty Program | `transform` | `process` | Stateful +Traffic Radar Monitoring Car Count | `transformValues` | `processValues` | Stateful + +### Categorizing Logs by Severity + +Below, methods `categorizeWithFlatTransform` and `categorizeWithProcess` show how you can migrate from `flatTransform` to `process`. + + + public class CategorizingLogsBySeverityExample { + private static final String ERROR_LOGS_TOPIC = "error-logs-topic"; + private static final String INPUT_LOGS_TOPIC = "input-logs-topic"; + private static final String UNKNOWN_LOGS_TOPIC = "unknown-logs-topic"; + private static final String WARN_LOGS_TOPIC = "warn-logs-topic"; + + public static void categorizeWithFlatTransform(final StreamsBuilder builder) { + final KStream logStream = builder.stream(INPUT_LOGS_TOPIC); + logStream.flatTransform(LogSeverityTransformer::new) + .to((key, value, recordContext) -> { + // Determine the target topic dynamically + if ("ERROR".equals(key)) return ERROR_LOGS_TOPIC; + if ("WARN".equals(key)) return WARN_LOGS_TOPIC; + return UNKNOWN_LOGS_TOPIC; + }); + } + + public static void categorizeWithProcess(final StreamsBuilder builder) { + final KStream logStream = builder.stream(INPUT_LOGS_TOPIC); + logStream.process(LogSeverityProcessor::new) + .to((key, value, recordContext) -> { + // Determine the target topic dynamically + if ("ERROR".equals(key)) return ERROR_LOGS_TOPIC; + if ("WARN".equals(key)) return WARN_LOGS_TOPIC; + return UNKNOWN_LOGS_TOPIC; + }); + } + + private static class LogSeverityTransformer implements Transformer>> { + @Override + public void init(org.apache.kafka.streams.processor.ProcessorContext context) { + } + + @Override + public Iterable> transform(String key, String value) { + if (value == null) { + return Collections.emptyList(); // Skip null values + } + + // Assume the severity is the first word in the log message + // For example: "ERROR: Disk not found" -> "ERROR" + int colonIndex = value.indexOf(':'); + String severity = colonIndex > 0 ? value.substring(0, colonIndex).trim() : "UNKNOWN"; + + // Create appropriate KeyValue pair based on severity + return switch (severity) { + case "ERROR" -> List.of(new KeyValue<>("ERROR", value)); + case "WARN" -> List.of(new KeyValue<>("WARN", value)); + case "INFO" -> Collections.emptyList(); // INFO logs are ignored + default -> List.of(new KeyValue<>("UNKNOWN", value)); + }; + } + + @Override + public void close() { + } + } + + private static class LogSeverityProcessor extends ContextualProcessor { + @Override + public void process(final Record record) { + if (record.value() == null) { + return; // Skip null values + } + + // Assume the severity is the first word in the log message + // For example: "ERROR: Disk not found" -> "ERROR" + final int colonIndex = record.value().indexOf(':'); + final String severity = colonIndex > 0 ? record.value().substring(0, colonIndex).trim() : "UNKNOWN"; + + // Route logs based on severity + switch (severity) { + case "ERROR": + context().forward(record.withKey(ERROR_LOGS_TOPIC)); + break; + case "WARN": + context().forward(record.withKey(WARN_LOGS_TOPIC)); + break; + case "INFO": + // INFO logs are ignored + break; + default: + // Forward to an "unknown" topic for logs with unrecognized severities + context().forward(record.withKey(UNKNOWN_LOGS_TOPIC)); + } + } + } + } + +### Replacing Slang in Text Messages + +Below, methods `replaceWithFlatTransformValues` and `replaceWithProcessValues` show how you can migrate from `flatTransformValues` to `processValues`. + + + public class ReplacingSlangTextInMessagesExample { + private static final Map SLANG_DICTIONARY = Map.of( + "u", "you", + "brb", "be right back", + "omg", "oh my god", + "btw", "by the way" + ); + private static final String INPUT_MESSAGES_TOPIC = "input-messages-topic"; + private static final String OUTPUT_MESSAGES_TOPIC = "output-messages-topic"; + + public static void replaceWithFlatTransformValues(final StreamsBuilder builder) { + KStream messageStream = builder.stream(INPUT_MESSAGES_TOPIC); + messageStream.flatTransformValues(SlangReplacementTransformer::new).to(OUTPUT_MESSAGES_TOPIC); + } + + public static void replaceWithProcessValues(final StreamsBuilder builder) { + KStream messageStream = builder.stream(INPUT_MESSAGES_TOPIC); + messageStream.processValues(SlangReplacementProcessor::new).to(OUTPUT_MESSAGES_TOPIC); + } + + private static class SlangReplacementTransformer implements ValueTransformer> { + + @Override + public void init(final org.apache.kafka.streams.processor.ProcessorContext context) { + } + + @Override + public Iterable transform(final String value) { + if (value == null) { + return Collections.emptyList(); // Skip null values + } + + // Replace slang words in the message + final String[] words = value.split("\s+"); + return Arrays.asList( + Arrays.stream(words) + .map(word -> SLANG_DICTIONARY.getOrDefault(word, word)) + .toArray(String[]::new) + ); + } + + @Override + public void close() { + } + } + + private static class SlangReplacementProcessor extends ContextualFixedKeyProcessor { + @Override + public void process(final FixedKeyRecord record) { + if (record.value() == null) { + return; // Skip null values + } + + // Replace slang words in the message + final String[] words = record.value().split("\s+"); + for (final String word : words) { + String replacedWord = SLANG_DICTIONARY.getOrDefault(word, word); + context().forward(record.withValue(replacedWord)); + } + } + } + } + +### Cumulative Discounts for a Loyalty Program + + + public class CumulativeDiscountsForALoyaltyProgramExample { + private static final double DISCOUNT_THRESHOLD = 100.0; + private static final String CUSTOMER_SPENDING_STORE = "customer-spending-store"; + private static final String DISCOUNT_NOTIFICATION_MESSAGE = + "Discount applied! You have received a reward for your purchases."; + private static final String DISCOUNT_NOTIFICATIONS_TOPIC = "discount-notifications-topic"; + private static final String PURCHASE_EVENTS_TOPIC = "purchase-events-topic"; + + public static void applyDiscountWithTransform(final StreamsBuilder builder) { + // Define the state store for tracking cumulative spending + builder.addStateStore( + Stores.keyValueStoreBuilder( + Stores.inMemoryKeyValueStore(CUSTOMER_SPENDING_STORE), + Serdes.String(), + Serdes.Double() + ) + ); + final KStream purchaseStream = builder.stream(PURCHASE_EVENTS_TOPIC); + // Apply the Transformer with the state store + final KStream notificationStream = + purchaseStream.transform(CumulativeDiscountTransformer::new, CUSTOMER_SPENDING_STORE); + // Send the notifications to the output topic + notificationStream.to(DISCOUNT_NOTIFICATIONS_TOPIC); + } + + public static void applyDiscountWithProcess(final StreamsBuilder builder) { + // Define the state store for tracking cumulative spending + builder.addStateStore( + Stores.keyValueStoreBuilder( + Stores.inMemoryKeyValueStore(CUSTOMER_SPENDING_STORE), + org.apache.kafka.common.serialization.Serdes.String(), + org.apache.kafka.common.serialization.Serdes.Double() + ) + ); + final KStream purchaseStream = builder.stream(PURCHASE_EVENTS_TOPIC); + // Apply the Processor with the state store + final KStream notificationStream = + purchaseStream.process(CumulativeDiscountProcessor::new, CUSTOMER_SPENDING_STORE); + // Send the notifications to the output topic + notificationStream.to(DISCOUNT_NOTIFICATIONS_TOPIC); + } + + private static class CumulativeDiscountTransformer implements Transformer> { + private KeyValueStore spendingStore; + + @Override + public void init(final org.apache.kafka.streams.processor.ProcessorContext context) { + // Retrieve the state store for cumulative spending + spendingStore = context.getStateStore(CUSTOMER_SPENDING_STORE); + } + + @Override + public KeyValue transform(final String key, final Double value) { + if (value == null) { + return null; // Skip null purchase amounts + } + + // Get the current spending total for the customer + Double currentSpending = spendingStore.get(key); + if (currentSpending == null) { + currentSpending = 0.0; + } + // Update the cumulative spending + currentSpending += value; + spendingStore.put(key, currentSpending); + + // Check if the customer qualifies for a discount + if (currentSpending >= DISCOUNT_THRESHOLD) { + // Reset the spending after applying the discount + spendingStore.put(key, currentSpending - DISCOUNT_THRESHOLD); + // Return a notification message + return new KeyValue<>(key, DISCOUNT_NOTIFICATION_MESSAGE); + } + return null; // No discount, so no output for this record + } + + @Override + public void close() { + } + } + + private static class CumulativeDiscountProcessor implements Processor { + private KeyValueStore spendingStore; + private ProcessorContext context; + + @Override + public void init(final ProcessorContext context) { + this.context = context; + // Retrieve the state store for cumulative spending + spendingStore = context.getStateStore(CUSTOMER_SPENDING_STORE); + } + + @Override + public void process(final Record record) { + if (record.value() == null) { + return; // Skip null purchase amounts + } + + // Get the current spending total for the customer + Double currentSpending = spendingStore.get(record.key()); + if (currentSpending == null) { + currentSpending = 0.0; + } + // Update the cumulative spending + currentSpending += record.value(); + spendingStore.put(record.key(), currentSpending); + + // Check if the customer qualifies for a discount + if (currentSpending >= DISCOUNT_THRESHOLD) { + // Reset the spending after applying the discount + spendingStore.put(record.key(), currentSpending - DISCOUNT_THRESHOLD); + // Send a discount notification + context.forward(record.withValue(DISCOUNT_NOTIFICATION_MESSAGE)); + } + } + } + } + +### Traffic Radar Monitoring Car Count + +Below, methods `countWithTransformValues` and `countWithProcessValues` show how you can migrate from `transformValues` to `processValues`. + + + public class TrafficRadarMonitoringCarCountExample { + private static final String DAILY_COUNT_STORE = "price-state-store"; + private static final String DAILY_COUNT_TOPIC = "price-state-topic"; + private static final String RADAR_COUNT_TOPIC = "car-radar-topic"; + + public static void countWithTransformValues(final StreamsBuilder builder) { + // Define a state store for tracking daily car counts + builder.addStateStore( + Stores.keyValueStoreBuilder( + Stores.inMemoryKeyValueStore(DAILY_COUNT_STORE), + org.apache.kafka.common.serialization.Serdes.String(), + org.apache.kafka.common.serialization.Serdes.Long() + ) + ); + final KStream radarStream = builder.stream(RADAR_COUNT_TOPIC); + // Apply the ValueTransformer with the state store + radarStream.transformValues(DailyCarCountTransformer::new, DAILY_COUNT_STORE) + .to(DAILY_COUNT_TOPIC); + } + + public static void countWithProcessValues(final StreamsBuilder builder) { + // Define a state store for tracking daily car counts + builder.addStateStore( + Stores.keyValueStoreBuilder( + Stores.inMemoryKeyValueStore(DAILY_COUNT_STORE), + org.apache.kafka.common.serialization.Serdes.String(), + org.apache.kafka.common.serialization.Serdes.Long() + ) + ); + final KStream radarStream = builder.stream(RADAR_COUNT_TOPIC); + // Apply the FixedKeyProcessor with the state store + radarStream.processValues(DailyCarCountProcessor::new, DAILY_COUNT_STORE) + .to(DAILY_COUNT_TOPIC); + } + + private static class DailyCarCountTransformer implements ValueTransformerWithKey { + private KeyValueStore stateStore; + private static final DateTimeFormatter DATE_FORMATTER = + DateTimeFormatter.ofPattern("yyyy-MM-dd").withZone(ZoneId.systemDefault()); + + @Override + public void init(final org.apache.kafka.streams.processor.ProcessorContext context) { + // Access the state store + stateStore = context.getStateStore(DAILY_COUNT_STORE); + } + + @Override + public String transform(Void readOnlyKey, String value) { + if (value == null) { + return null; // Skip null events + } + + // Derive the current day from the event timestamp + final long timestamp = System.currentTimeMillis(); // Use system time for simplicity + final String currentDay = DATE_FORMATTER.format(Instant.ofEpochMilli(timestamp)); + // Retrieve the current count for the day + Long dailyCount = stateStore.get(currentDay); + if (dailyCount == null) { + dailyCount = 0L; + } + // Increment the count + dailyCount++; + stateStore.put(currentDay, dailyCount); + + // Return the current day's count + return String.format("Day: %s, Car Count: %s", currentDay, dailyCount); + } + + @Override + public void close() { + } + } + + private static class DailyCarCountProcessor implements FixedKeyProcessor { + private FixedKeyProcessorContext context; + private KeyValueStore stateStore; + private static final DateTimeFormatter DATE_FORMATTER = + DateTimeFormatter.ofPattern("yyyy-MM-dd").withZone(ZoneId.systemDefault()); + + @Override + public void init(final FixedKeyProcessorContext context) { + this.context = context; + stateStore = context.getStateStore(DAILY_COUNT_STORE); + } + + @Override + public void process(final FixedKeyRecord record) { + if (record.value() == null) { + return; // Skip null events + } + + // Derive the current day from the event timestamp + final long timestamp = System.currentTimeMillis(); // Use system time for simplicity + final String currentDay = DATE_FORMATTER.format(Instant.ofEpochMilli(timestamp)); + // Retrieve the current count for the day + Long dailyCount = stateStore.get(currentDay); + if (dailyCount == null) { + dailyCount = 0L; + } + // Increment the count + dailyCount++; + stateStore.put(currentDay, dailyCount); + + // Emit the current day's count + context.forward(record.withValue(String.format("Day: %s, Car Count: %s", currentDay, dailyCount))); + } + } + } + +## Keynotes + + * **Type Safety and Flexibility:** The process and processValues APIs utilize `ProcessorContext` and `Record` or `FixedKeyRecord` objects for better type safety and flexibility of custom processing logic. + * **Clear State and Logic Management:** Implementations for `Processor` or `FixedKeyProcessor` should manage state and logic clearly. Use `context().forward()` for emitting records downstream. + * **Unified API:** Consolidates multiple methods into a single, versatile API. + * **Future-Proof:** Ensures compatibility with the latest Kafka Streams releases. + + + +## Removal of Old `process` Method + +It is worth mentioning that, in addition to the methods mentioned above, the `process` method, which integrated the 'old' Processor API (i.e., `Processor` as opposed to the new `api.Processor`) into the DSL, has also been removed. The following example shows how to migrate to the new `process`. + +### Example + + * **Idea:** The system monitors page views for a website in real-time. When a page reaches a predefined popularity threshold (e.g., 1000 views), the system automatically sends an email alert to the site administrator or marketing team to notify them of the page's success. This helps teams quickly identify high-performing content and act on it, such as promoting the page further or analyzing the traffic source. + + * **Real-World Context:** In a content management system (CMS) for a news or blogging platform, it's crucial to track the popularity of articles or posts. For example: + + * **Marketing Teams:** Use the notification to highlight trending content on social media or email newsletters. + * **Operations Teams:** Use the alert to ensure the site can handle increased traffic for popular pages. + * **Ad Managers:** Identify pages where additional ad placements might maximize revenue. + +By automating the detection of popular pages, the system eliminates the need for manual monitoring and ensures timely actions to capitalize on the content's performance. + + + + + + public class PopularPageEmailAlertExample { + private static final String ALERTS_EMAIL = "alerts@yourcompany.com"; + private static final String PAGE_VIEWS_TOPIC = "page-views-topic"; + + public static void alertWithOldProcess(StreamsBuilder builder) { + KStream pageViews = builder.stream(PAGE_VIEWS_TOPIC); + // Filter pages with exactly 1000 views and process them using the old API + pageViews.filter((pageId, viewCount) -> viewCount == 1000) + .process(PopularPageEmailAlertOld::new); + } + + public static void alertWithNewProcess(StreamsBuilder builder) { + KStream pageViews = builder.stream(PAGE_VIEWS_TOPIC); + // Filter pages with exactly 1000 views and process them using the new API + pageViews.filter((pageId, viewCount) -> viewCount == 1000) + .process(PopularPageEmailAlertNew::new); + } + + private static class PopularPageEmailAlertOld extends AbstractProcessor { + @Override + public void init(org.apache.kafka.streams.processor.ProcessorContext context) { + super.init(context); + System.out.println("Initialized email client for: " + ALERTS_EMAIL); + } + + @Override + public void process(String key, Long value) { + if (value == null) return; + + if (value == 1000) { + // Send an email alert + System.out.printf("ALERT (Old API): Page %s has reached 1000 views. Sending email to %s%n", key, ALERTS_EMAIL); + } + } + + @Override + public void close() { + System.out.println("Tearing down email client for: " + ALERTS_EMAIL); + } + } + + private static class PopularPageEmailAlertNew implements Processor { + @Override + public void init(ProcessorContext context) { + System.out.println("Initialized email client for: " + ALERTS_EMAIL); + } + + @Override + public void process(Record record) { + if (record.value() == null) return; + + if (record.value() == 1000) { + // Send an email alert + System.out.printf("ALERT (New API): Page %s has reached 1000 views. Sending email to %s%n", record.key(), ALERTS_EMAIL); + } + } + + @Override + public void close() { + System.out.println("Tearing down email client for: " + ALERTS_EMAIL); + } + } + } + +Naming Operators in a Streams DSL application Kafka Streams allows you to [name processors](dsl-topology-naming.html) created via the Streams DSL + +# Controlling KTable emit rate + +A KTable is logically a continuously updated table. These updates make their way to downstream operators whenever new data is available, ensuring that the whole computation is as fresh as possible. Logically speaking, most programs describe a series of transformations, and the update rate is not a factor in the program behavior. In these cases, the rate of update is more of a performance concern. Operators are able to optimize both the network traffic (to the Kafka brokers) and the disk traffic (to the local state stores) by adjusting commit interval and batch size configurations. + +However, some applications need to take other actions, such as calling out to external systems, and therefore need to exercise some control over the rate of invocations, for example of `KStream#foreach`. + +Rather than achieving this as a side-effect of the [KTable record cache](memory-mgmt.html#streams-developer-guide-memory-management-record-cache), you can directly impose a rate limit via the `KTable#suppress` operator. + +For example: + + + KGroupedTable groupedTable = ...; + groupedTable + .count() + .suppress(untilTimeLimit(ofMinutes(5), maxBytes(1_000_000L).emitEarlyWhenFull())) + .toStream() + .foreach((key, count) -> updateCountsDatabase(key, count)); + +This configuration ensures that `updateCountsDatabase` gets events for each `key` no more than once every 5 minutes. Note that the latest state for each key has to be buffered in memory for that 5-minute period. You have the option to control the maximum amount of memory to use for this buffer (in this case, 1MB). There is also an option to impose a limit in terms of number of records (or to leave both limits unspecified). + +Additionally, it is possible to choose what happens if the buffer fills up. This example takes a relaxed approach and just emits the oldest records before their 5-minute time limit to bring the buffer back down to size. Alternatively, you can choose to stop processing and shut the application down. This may seem extreme, but it gives you a guarantee that the 5-minute time limit will be absolutely enforced. After the application shuts down, you could allocate more memory for the buffer and resume processing. Emitting early is preferable for most applications. + +For more detailed information, see the JavaDoc on the `Suppressed` config object and [KIP-328](https://cwiki.apache.org/confluence/x/sQU0BQ "KIP-328"). + +# Using timestamp-based semantics for table processors + +By default, tables in Kafka Streams use offset-based semantics. When multiple records arrive for the same key, the one with the largest record offset is considered the latest record for the key, and is the record that appears in aggregation and join results computed on the table. This is true even in the event of [out-of-order data](/41/streams/core-concepts.html#streams_out_of_ordering). The record with the largest offset is considered to be the latest record for the key, even if this record does not have the largest timestamp. + +An alternative to offset-based semantics is timestamp-based semantics. With timestamp-based semantics, the record with the largest timestamp is considered the latest record, even if there is another record with a larger offset (and smaller timestamp). If there is no out-of-order data (per key), then offset-based semantics and timestamp-based semantics are equivalent; the difference only appears when there is out-of-order data. + +Starting with Kafka Streams 3.5, Kafka Streams supports timestamp-based semantics through the use of [versioned state stores](/41/streams/developer-guide/processor-api.html#versioned-state-stores). When a table is materialized with a versioned state store, it is a versioned table and will result in different processor semantics in the presence of out-of-order data. + + * When performing a stream-table join, stream-side records will join with the latest-by-timestamp table record which has a timestamp less than or equal to the stream record's timestamp. This is in contrast to joining a stream to an unversioned table, in which case the latest-by-offset table record will be joined, even if the stream-side record is out-of-order and has a lower timestamp. + * Aggregations computed on the table will include the latest-by-timestamp record for each key, instead of the latest-by-offset record. Out-of-order updates (per key) will not trigger a new aggregation result. This is true for `count` and `reduce` operations as well, in addition to `aggregate` operations. + * Table joins will use the latest-by-timestamp record for each key, instead of the latest-by-offset record. Out-of-order updates (per key) will not trigger a new join result. This is true for both primary-key table-table joins and also foreign-key table-table joins. If a versioned table is joined with an unversioned table, the result will be the join of the latest-by-timestamp record from the versioned table with the latest-by-offset record from the unversioned table. + * Table filter operations will no longer suppress consecutive tombstones, so users may observe more `null` records downstream of the filter than compared to when filtering an unversioned table. This is done in order to preserve a complete version history downstream, in the event of out-of-order data. + * `suppress` operations are not allowed on versioned tables, as this would collapse the version history and lead to undefined behavior. + + + +Once a table is materialized with a versioned store, downstream tables are also considered versioned until any of the following occurs: + + * A downstream table is explicitly materialized, either with an unversioned store supplier or with no store supplier (all stores are unversioned by default, including the default store supplier) + * Any stateful transformation occurs, including aggregations and joins + * A table is converted to a stream and back. + + + +The results of certain processors should not be materialized with versioned stores, as these processors do not produce a complete older version history, and therefore materialization as a versioned table would lead to unpredictable results: + + * Aggregate processors, for both table and stream aggregations. This includes `aggregate`, `count` and `reduce` operations. + * Table-table join processors, including both primary-key and foreign-key joins. + + + +For more on versioned stores and how to start using them in your application, see [here](/41/streams/developer-guide/processor-api.html#versioned-state-stores). + +# Writing streams back to Kafka + +Any streams and tables may be (continuously) written back to a Kafka topic. As we will describe in more detail below, the output data might be re-partitioned on its way to Kafka, depending on the situation. + +Writing to Kafka | Description +---|--- +**To** + + * KStream -> void + +| **Terminal operation.** Write the records to Kafka topic(s). ([KStream details](/41/javadoc/org/apache/kafka/streams/kstream/KStream.html#to\(java.lang.String\))) When to provide serdes explicitly: + + * If you do not specify Serdes explicitly, the default Serdes from the [configuration](config-streams.html#streams-developer-guide-configuration) are used. + * You **must specify Serdes explicitly** via the `Produced` class if the key and/or value types of the `KStream` do not match the configured default Serdes. + * See [Data Types and Serialization](datatypes.html#streams-developer-guide-serdes) for information about configuring default Serdes, available Serdes, and implementing your own custom Serdes. + +A variant of `to` exists that enables you to specify how the data is produced by using a `Produced` instance to specify, for example, a `StreamPartitioner` that gives you control over how output records are distributed across the partitions of the output topic. Another variant of `to` exists that enables you to dynamically choose which topic to send to for each record via a `TopicNameExtractor` instance. + + + KStream stream = ...; + + // Write the stream to the output topic, using the configured default key + // and value serdes. + stream.to("my-stream-output-topic"); + + // Write the stream to the output topic, using explicit key and value serdes, + // (thus overriding the defaults in the config properties). + stream.to("my-stream-output-topic", Produced.with(Serdes.String(), Serdes.Long()); + +**Causes data re-partitioning if any of the following conditions is true:** + + 1. If the output topic has a different number of partitions than the stream/table. + 2. If the `KStream` was marked for re-partitioning. + 3. If you provide a custom `StreamPartitioner` to explicitly control how to distribute the output records across the partitions of the output topic. + 4. If the key of an output record is `null`. + + + +**Note** + +**When you want to write to systems other than Kafka:** Besides writing the data back to Kafka, you can also apply a custom processor as a stream sink at the end of the processing to, for example, write to external databases. First, doing so is not a recommended pattern - we strongly suggest to use the [Kafka Connect API](../../connect/index.html#kafka-connect) instead. However, if you do use such a sink processor, please be aware that it is now your responsibility to guarantee message delivery semantics when talking to such external systems (e.g., to retry on delivery failure or to prevent message duplication). + +# Testing a Streams application + +Kafka Streams comes with a `test-utils` module to help you test your application [here](testing.html). + +# Kafka Streams DSL for Scala + +The Kafka Streams DSL Java APIs are based on the Builder design pattern, which allows users to incrementally build the target functionality using lower level compositional fluent APIs. These APIs can be called from Scala, but there are several issues: + + 1. **Additional type annotations** \- The Java APIs use Java generics in a way that are not fully compatible with the type inferencer of the Scala compiler. Hence the user has to add type annotations to the Scala code, which seems rather non-idiomatic in Scala. + 2. **Verbosity** \- In some cases the Java APIs appear too verbose compared to idiomatic Scala. + 3. **Type Unsafety** \- The Java APIs offer some options where the compile time type safety is sometimes subverted and can result in runtime errors. This stems from the fact that the Serdes defined as part of config are not type checked during compile time. Hence any missing Serdes can result in runtime errors. + + + +The Kafka Streams DSL for Scala library is a wrapper over the existing Java APIs for Kafka Streams DSL that addresses the concerns raised above. It does not attempt to provide idiomatic Scala APIs that one would implement in a Scala library developed from scratch. The intention is to make the Java APIs more usable in Scala through better type inferencing, enhanced expressiveness, and lesser boilerplates. + +The library wraps Java Stream DSL APIs in Scala thereby providing: + + 1. Better type inference in Scala. + 2. Less boilerplate in application code. + 3. The usual builder-style composition that developers get with the original Java API. + 4. Implicit serializers and de-serializers leading to better abstraction and less verbosity. + 5. Better type safety during compile time. + + + +All functionality provided by Kafka Streams DSL for Scala are under the root package name of `org.apache.kafka.streams.scala`. + +Many of the public facing types from the Java API are wrapped. The following Scala abstractions are available to the user: + + * `org.apache.kafka.streams.scala.StreamsBuilder` + * `org.apache.kafka.streams.scala.kstream.KStream` + * `org.apache.kafka.streams.scala.kstream.KTable` + * `org.apache.kafka.streams.scala.kstream.KGroupedStream` + * `org.apache.kafka.streams.scala.kstream.KGroupedTable` + * `org.apache.kafka.streams.scala.kstream.SessionWindowedKStream` + * `org.apache.kafka.streams.scala.kstream.TimeWindowedKStream` + + + +The library also has several utility abstractions and modules that the user needs to use for proper semantics. + + * `org.apache.kafka.streams.scala.ImplicitConversions`: Module that brings into scope the implicit conversions between the Scala and Java classes. + * `org.apache.kafka.streams.scala.serialization.Serdes`: Module that contains all primitive Serdes that can be imported as implicits and a helper to create custom Serdes. + + + +The library is cross-built with Scala 2.12 and 2.13. To reference the library compiled against Scala 2.13 include the following in your maven `pom.xml` add the following: + + + + org.apache.kafka + kafka-streams-scala_2.13 + 4.1.0 + + +To use the library compiled against Scala 2.12 replace the `artifactId` with `kafka-streams-scala_2.12`. + +When using SBT then you can reference the correct library using the following: + + + libraryDependencies += "org.apache.kafka" %% "kafka-streams-scala" % "4.1.0" + +# Sample Usage + +The library works by wrapping the original Java abstractions of Kafka Streams within a Scala wrapper object and then using implicit conversions between them. All the Scala abstractions are named identically as the corresponding Java abstraction, but they reside in a different package of the library e.g. the Scala class `org.apache.kafka.streams.scala.StreamsBuilder` is a wrapper around `org.apache.kafka.streams.StreamsBuilder`, `org.apache.kafka.streams.scala.kstream.KStream` is a wrapper around `org.apache.kafka.streams.kstream.KStream`, and so on. + +Here's an example of the classic WordCount program that uses the Scala `StreamsBuilder` that builds an instance of `KStream` which is a wrapper around Java `KStream`. Then we reify to a table and get a `KTable`, which, again is a wrapper around Java `KTable`. + +The net result is that the following code is structured just like using the Java API, but with Scala and with far fewer type annotations compared to using the Java API directly from Scala. The difference in type annotation usage is more obvious when given an example. Below is an example WordCount implementation that will be used to demonstrate the differences between the Scala and Java API. + + + import java.time.Duration + import java.util.Properties + + import org.apache.kafka.streams.kstream.Materialized + import org.apache.kafka.streams.scala.ImplicitConversions._ + import org.apache.kafka.streams.scala._ + import org.apache.kafka.streams.scala.kstream._ + import org.apache.kafka.streams.{KafkaStreams, StreamsConfig} + + object WordCountApplication extends App { + import Serdes._ + + val props: Properties = { + val p = new Properties() + p.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-application") + p.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka-broker1:9092") + p + } + + val builder: StreamsBuilder = new StreamsBuilder + val textLines: KStream[String, String] = builder.stream[String, String]("TextLinesTopic") + val wordCounts: KTable[String, Long] = textLines + .flatMapValues(textLine => textLine.toLowerCase.split("\W+")) + .groupBy((_, word) => word) + .count(Materialized.as("counts-store")) + wordCounts.toStream.to("WordsWithCountsTopic") + + val streams: KafkaStreams = new KafkaStreams(builder.build(), props) + streams.start() + + sys.ShutdownHookThread { + streams.close(Duration.ofSeconds(10)) + } + } + +In the above code snippet, we don't have to provide any Serdes, `Grouped`, `Produced`, `Consumed` or `Joined` explicitly. They will also not be dependent on any Serdes specified in the config. **In fact all Serdes specified in the config will be ignored by the Scala APIs**. All Serdes and `Grouped`, `Produced`, `Consumed` or `Joined` will be handled through implicit Serdes as discussed later in the Implicit Serdes section. The complete independence from configuration based Serdes is what makes this library completely typesafe. Any missing instances of Serdes, `Grouped`, `Produced`, `Consumed` or `Joined` will be flagged as a compile time error. + +# Implicit Serdes + +One of the common complaints of Scala users with the Java API has been the repetitive usage of the Serdes in API invocations. Many of the APIs need to take the Serdes through abstractions like `Grouped`, `Produced`, `Repartitioned`, `Consumed` or `Joined`. And the user has to supply them every time through the with function of these classes. + +The library uses the power of [Scala implicit parameters](https://docs.scala-lang.org/tour/implicit-parameters.html) to alleviate this concern. As a user you can provide implicit Serdes or implicit values of `Grouped`, `Produced`, `Repartitioned`, `Consumed` or `Joined` once and make your code less verbose. In fact you can just have the implicit Serdes in scope and the library will make the instances of `Grouped`, `Produced`, `Consumed` or `Joined` available in scope. + +The library also bundles all implicit Serdes of the commonly used primitive types in a Scala module - so just import the module vals and have all Serdes in scope. A similar strategy of modular implicits can be adopted for any user-defined Serdes as well (User-defined Serdes are discussed in the next section). + +Here's an example: + + + // DefaultSerdes brings into scope implicit Serdes (mostly for primitives) + // that will set up all Grouped, Produced, Consumed and Joined instances. + // So all APIs below that accept Grouped, Produced, Consumed or Joined will + // get these instances automatically + import Serdes._ + + val builder = new StreamsBuilder() + + val userClicksStream: KStream[String, Long] = builder.stream(userClicksTopic) + + val userRegionsTable: KTable[String, String] = builder.table(userRegionsTopic) + + // The following code fragment does not have a single instance of Grouped, + // Produced, Consumed or Joined supplied explicitly. + // All of them are taken care of by the implicit Serdes imported by DefaultSerdes + val clicksPerRegion: KTable[String, Long] = + userClicksStream + .leftJoin(userRegionsTable)((clicks, region) => (if (region == null) "UNKNOWN" else region, clicks)) + .map((_, regionWithClicks) => regionWithClicks) + .groupByKey + .reduce(_ + _) + + clicksPerRegion.toStream.to(outputTopic) + +Quite a few things are going on in the above code snippet that may warrant a few lines of elaboration: + + 1. The code snippet does not depend on any config defined Serdes. In fact any Serdes defined as part of the config will be ignored. + 2. All Serdes are picked up from the implicits in scope. And `import Serdes._` brings all necessary Serdes in scope. + 3. This is an example of compile time type safety that we don't have in the Java APIs. + 4. The code looks less verbose and more focused towards the actual transformation that it does on the data stream. + + + +# User-Defined Serdes + +When the default primitive Serdes are not enough and we need to define custom Serdes, the usage is exactly the same as above. Just define the implicit Serdes and start building the stream transformation. Here's an example with `AvroSerde`: + + + // domain object as a case class + case class UserClicks(clicks: Long) + + // An implicit Serde implementation for the values we want to + // serialize as avro + implicit val userClicksSerde: Serde[UserClicks] = new AvroSerde + + // Primitive Serdes + import Serdes._ + + // And then business as usual .. + + val userClicksStream: KStream[String, UserClicks] = builder.stream(userClicksTopic) + + val userRegionsTable: KTable[String, String] = builder.table(userRegionsTopic) + + // Compute the total per region by summing the individual click counts per region. + val clicksPerRegion: KTable[String, Long] = + userClicksStream + + // Join the stream against the table. + .leftJoin(userRegionsTable)((clicks, region) => (if (region == null) "UNKNOWN" else region, clicks.clicks)) + + // Change the stream from -> to -> + .map((_, regionWithClicks) => regionWithClicks) + + // Compute the total per region by summing the individual click counts per region. + .groupByKey + .reduce(_ + _) + + // Write the (continuously updating) results to the output topic. + clicksPerRegion.toStream.to(outputTopic) + +A complete example of user-defined Serdes can be found in a test class within the library. + +[Previous](/41/streams/developer-guide/config-streams) [Next](/41/streams/developer-guide/processor-api) + + * [Documentation](/documentation) + * [Kafka Streams](/streams) + * [Developer Guide](/streams/developer-guide/) + + diff --git a/content/en/41/streams/developer-guide/dsl-topology-naming.md b/content/en/41/streams/developer-guide/dsl-topology-naming.md new file mode 100644 index 000000000..2b37d1125 --- /dev/null +++ b/content/en/41/streams/developer-guide/dsl-topology-naming.md @@ -0,0 +1,195 @@ +--- +title: Naming Operators in a Streams DSL application +description: +weight: 5 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Developer Guide for Kafka Streams + +# Naming Operators in a Kafka Streams DSL Application + +You now can give names to processors when using the Kafka Streams DSL. In the PAPI there are `Processors` and `State Stores` and you are required to explicitly name each one. + +At the DSL layer, there are operators. A single DSL operator may compile down to multiple `Processors` and `State Stores`, and if required `repartition topics`. But with the Kafka Streams DSL, all these names are generated for you. There is a relationship between the generated processor name state store names (hence changelog topic names) and repartition topic names. Note, that the names of state stores and changelog/repartition topics are "stateful" while processor names are "stateless". + +This distinction of stateful vs. stateless names has important implications when updating your topology. While the internal naming makes creating a topology with the DSL much more straightforward, there are a couple of trade-offs. The first trade-off is what we could consider a readability issue. The other more severe trade-off is the shifting of names due to the relationship between the DSL operator and the generated `Processors`, `State Stores` changelog topics and repartition topics. + +# Readability Issues + +By saying there is a readability trade-off, we are referring to viewing a description of the topology. When you render the string description of your topology via the `Topology#describe()` method, you can see what the processor is, but you don't have any context for its business purpose. For example, consider the following simple topology: + + + + KStream stream = builder.stream("input"); + stream.filter((k,v) -> !v.equals("invalid_txn")) + .mapValues((v) -> v.substring(0,5)) + .to("output"); + +Running `Topology#describe()` yields this string: + + + Topologies: + Sub-topology: 0 + Source: KSTREAM-SOURCE-0000000000 (topics: [input]) + --> KSTREAM-FILTER-0000000001 + Processor: KSTREAM-FILTER-0000000001 (stores: []) + --> KSTREAM-MAPVALUES-0000000002 + <-- KSTREAM-SOURCE-0000000000 + Processor: KSTREAM-MAPVALUES-0000000002 (stores: []) + --> KSTREAM-SINK-0000000003 + <-- KSTREAM-FILTER-0000000001 + Sink: KSTREAM-SINK-0000000003 (topic: output) + <-- KSTREAM-MAPVALUES-0000000002 + +From this report, you can see what the different operators are, but what is the broader context here? For example, consider `KSTREAM-FILTER-0000000001`, we can see that it's a filter operation, which means that records are dropped that don't match the given predicate. But what is the meaning of the predicate? Additionally, you can see the topic names of the source and sink nodes, but what if the topics aren't named in a meaningful way? Then you're left to guess the business purpose behind these topics. + +Also notice the numbering here: the source node is suffixed with `0000000000` indicating it's the first processor in the topology. The filter is suffixed with `0000000001`, indicating it's the second processor in the topology. In Kafka Streams, there are now overloaded methods for both `KStream` and `KTable` that accept a new parameter `Named`. By using the `Named` class DSL users can provide meaningful names to the processors in their topology. + +Now let's take a look at your topology with all the processors named: + + + KStream stream = + builder.stream("input", Consumed.as("Customer_transactions_input_topic")); + stream.filter((k,v) -> !v.equals("invalid_txn"), Named.as("filter_out_invalid_txns")) + .mapValues((v) -> v.substring(0,5), Named.as("Map_values_to_first_6_characters")) + .to("output", Produced.as("Mapped_transactions_output_topic")); + + + Topologies: + Sub-topology: 0 + Source: Customer_transactions_input_topic (topics: [input]) + --> filter_out_invalid_txns + Processor: filter_out_invalid_txns (stores: []) + --> Map_values_to_first_6_characters + <-- Customer_transactions_input_topic + Processor: Map_values_to_first_6_characters (stores: []) + --> Mapped_transactions_output_topic + <-- filter_out_invalid_txns + Sink: Mapped_transactions_output_topic (topic: output) + <-- Map_values_to_first_6_characters + +Now you can look at the topology description and easily understand what role each processor plays in the topology. But there's another reason for naming your processor nodes when you have stateful operators that remain between restarts of your Kafka Streams applications, state stores, changelog topics, and repartition topics. + +# Changing Names + +Generated names are numbered where they are built in the topology. The name generation strategy is `KSTREAM|KTABLE->operator name<->number suffix<`. The number is a globally incrementing number that represents the operator's order in the topology. The generated number is prefixed with a varying number of "0"s to create a string that is consistently 10 characters long. This means that if you add/remove or shift the order of operations, the position of the processor shifts, which shifts the name of the processor. Since **most** processors exist in memory only, this name shifting presents no issue for many topologies. But the name shifting does have implications for topologies with stateful operators or repartition topics. Here's a different topology with some state: + + + KStream stream = builder.stream("input"); + stream.groupByKey() + .count() + .toStream() + .to("output"); + +This topology description yields the following: + + + Topologies: + Sub-topology: 0 + Source: KSTREAM-SOURCE-0000000000 (topics: [input]) + --> KSTREAM-AGGREGATE-0000000002 + Processor: KSTREAM-AGGREGATE-0000000002 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000001]) + --> KTABLE-TOSTREAM-0000000003 + <-- KSTREAM-SOURCE-0000000000 + Processor: KTABLE-TOSTREAM-0000000003 (stores: []) + --> KSTREAM-SINK-0000000004 + <-- KSTREAM-AGGREGATE-0000000002 + Sink: KSTREAM-SINK-0000000004 (topic: output) + <-- KTABLE-TOSTREAM-0000000003 + +You can see from the topology description above that the state store is named `KSTREAM-AGGREGATE-STATE-STORE-0000000002`. Here's what happens when you add a filter to keep some of the records out of the aggregation: + + + KStream stream = builder.stream("input"); + stream.filter((k,v)-> v !=null && v.length() >= 6 ) + .groupByKey() + .count() + .toStream() + .to("output"); + +And the corresponding topology: + + + Topologies: + Sub-topology: 0 + Source: KSTREAM-SOURCE-0000000000 (topics: [input]) + --> KSTREAM-FILTER-0000000001 + Processor: KSTREAM-FILTER-0000000001 (stores: []) + --> KSTREAM-AGGREGATE-0000000003 + <-- KSTREAM-SOURCE-0000000000 + Processor: KSTREAM-AGGREGATE-0000000003 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000002]) + --> KTABLE-TOSTREAM-0000000004 + <-- KSTREAM-FILTER-0000000001 + Processor: KTABLE-TOSTREAM-0000000004 (stores: []) + --> KSTREAM-SINK-0000000005 + <-- KSTREAM-AGGREGATE-0000000003 + Sink: KSTREAM-SINK-0000000005 (topic: output) + <-- KTABLE-TOSTREAM-0000000004 + +Notice that since you've added an operation _before_ the `count` operation, the state store (and the changelog topic) names have changed. This name change means you can't do a rolling re-deployment of your updated topology. Also, you must use the [Streams Reset Tool](/41/streams/developer-guide/app-reset-tool) to re-calculate the aggregations, because the changelog topic has changed on start-up and the new changelog topic contains no data. Fortunately, there's an easy solution to remedy this situation. Give the state store a user-defined name instead of relying on the generated one, so you don't have to worry about topology changes shifting the name of the state store. You've had the ability to name repartition topics with the `Joined`, `StreamJoined`, and`Grouped` classes, and name state store and changelog topics with `Materialized`. But it's worth reiterating the importance of naming these DSL topology operations again. Here's how your DSL code looks now giving a specific name to your state store: + + + KStream stream = builder.stream("input"); + stream.filter((k, v) -> v != null && v.length() >= 6) + .groupByKey() + .count(Materialized.as("Purchase_count_store")) + .toStream() + .to("output"); + +And here's the topology + + + Topologies: + Sub-topology: 0 + Source: KSTREAM-SOURCE-0000000000 (topics: [input]) + --> KSTREAM-FILTER-0000000001 + Processor: KSTREAM-FILTER-0000000001 (stores: []) + --> KSTREAM-AGGREGATE-0000000002 + <-- KSTREAM-SOURCE-0000000000 + Processor: KSTREAM-AGGREGATE-0000000002 (stores: [Purchase_count_store]) + --> KTABLE-TOSTREAM-0000000003 + <-- KSTREAM-FILTER-0000000001 + Processor: KTABLE-TOSTREAM-0000000003 (stores: []) + --> KSTREAM-SINK-0000000004 + <-- KSTREAM-AGGREGATE-0000000002 + Sink: KSTREAM-SINK-0000000004 (topic: output) + <-- KTABLE-TOSTREAM-0000000003 + +Now, even though you've added processors before your state store, the store name and its changelog topic names don't change. This makes your topology more robust and resilient to changes made by adding or removing processors. + +# Conclusion + +It's a good practice to name your processing nodes when using the DSL, and it's even more important to do this when you have "stateful" processors your application such as repartition topics and state stores (and the accompanying changelog topics). + +Here are a couple of points to remember when naming your DSL topology: + + 1. If you have an _existing topology_ and you _haven't_ named your state stores (and changelog topics) and repartition topics, we recommended that you do so. But this will be a topology breaking change, so you'll need to shut down all application instances, make the changes, and run the [Streams Reset Tool](/41/streams/developer-guide/app-reset-tool). Although this may be inconvenient at first, it's worth the effort to protect your application from unexpected errors due to topology changes. + 2. If you have a _new topology_ , make sure you name the persistent parts of your topology: state stores (changelog topics) and repartition topics. This way, when you deploy your application, you're protected from topology changes that otherwise would break your Kafka Streams application. If you don't want to add names to stateless processors at first, that's fine as you can always go back and add the names later. + +Here's a quick reference on naming the critical parts of your Kafka Streams application to prevent topology name changes from breaking your application: Operation| Naming Class +---|--- +Aggregation repartition topics| Grouped +KStream-KStream Join repartition topics| StreamJoined +KStream-KTable Join repartition topic| Joined +KStream-KStream Join state stores| StreamJoined +State Stores (for aggregations and KTable-KTable joins)| Materialized +Stream/Table non-stateful operations| Named +To further enforce best practices, Kafka Streams provides a configuration option, `ensure.explicit.internal.resource.naming`: + + + / + Properties props = new Properties(); + props.put(StreamsConfig.ENSURE_EXPLICIT_INTERNAL_RESOURCE_NAMING_CONFIG, true); + + +This parameter ensures that all internal topics, state stores, and changelog topics have explicitly defined names. When this configuration is enabled, a Kafka Streams application will not start if any of these components rely on auto-generated names. This guarantees stability across topology updates, as manually defined names remain unchanged even when new processors or transformations are added. Enforcing explicit naming is particularly important in production environments, where consistency and backward compatibility are essential for maintaining reliable stream processing applications. + + * [Documentation](/documentation) + * [Kafka Streams](/streams) + * [Developer Guide](/streams/developer-guide/) + + diff --git a/content/en/41/streams/developer-guide/interactive-queries.md b/content/en/41/streams/developer-guide/interactive-queries.md new file mode 100644 index 000000000..cf1ef3164 --- /dev/null +++ b/content/en/41/streams/developer-guide/interactive-queries.md @@ -0,0 +1,433 @@ +--- +title: Interactive Queries +description: +weight: 8 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Interactive Queries + +Interactive queries allow you to leverage the state of your application from outside your application. The Kafka Streams enables your applications to be queryable. + +**Table of Contents** + + * Querying local state stores for an app instance + * Querying local key-value stores + * Querying local window stores + * Querying local custom state stores + * Querying remote state stores for the entire app + * Adding an RPC layer to your application + * Exposing the RPC endpoints of your application + * Discovering and accessing application instances and their local state stores + * Demo applications + + + +The full state of your application is typically [split across many distributed instances of your application](../architecture.html#streams_architecture_state), and across many state stores that are managed locally by these application instances. + +![](/41/images/streams-interactive-queries-03.png) + +There are local and remote components to interactively querying the state of your application. + +Local state + An application instance can query the locally managed portion of the state and directly query its own local state stores. You can use the corresponding local data in other parts of your application code, as long as it doesn't require calling the Kafka Streams API. Querying state stores is always read-only to guarantee that the underlying state stores will never be mutated out-of-band (e.g., you cannot add new entries). State stores should only be mutated by the corresponding processor topology and the input data it operates on. For more information, see Querying local state stores for an app instance. +Remote state + + +To query the full state of your application, you must connect the various fragments of the state, including: + + * query local state stores + * discover all running instances of your application in the network and their state stores + * communicate with these instances over the network (e.g., an RPC layer) + + + +Connecting these fragments enables communication between instances of the same app and communication from other applications for interactive queries. For more information, see Querying remote state stores for the entire app. + +Kafka Streams natively provides all of the required functionality for interactively querying the state of your application, except if you want to expose the full state of your application via interactive queries. To allow application instances to communicate over the network, you must add a Remote Procedure Call (RPC) layer to your application (e.g., REST API). + +This table shows the Kafka Streams native communication support for various procedures. + +Procedure | Application instance | Entire application +---|---|--- +Query local state stores of an app instance | Supported | Supported +Make an app instance discoverable to others | Supported | Supported +Discover all running app instances and their state stores | Supported | Supported +Communicate with app instances over the network (RPC) | Supported | Not supported (you must configure) + +# Querying local state stores for an app instance + +A Kafka Streams application typically runs on multiple instances. The state that is locally available on any given instance is only a subset of the [application's entire state](../architecture.html#streams-architecture-state). Querying the local stores on an instance will only return data locally available on that particular instance. + +The method `KafkaStreams#store(...)` finds an application instance's local state stores by name and type. Note that interactive queries are not supported for [versioned state stores](processor-api.html#streams-developer-guide-state-store-versioned) at this time. + +![](/41/images/streams-interactive-queries-api-01.png) + +Every application instance can directly query any of its local state stores. + +The _name_ of a state store is defined when you create the store. You can create the store explicitly by using the Processor API or implicitly by using stateful operations in the DSL. + +The _type_ of a state store is defined by `QueryableStoreType`. You can access the built-in types via the class `QueryableStoreTypes`. Kafka Streams currently has two built-in types: + + * A key-value store `QueryableStoreTypes#keyValueStore()`, see Querying local key-value stores. + * A window store `QueryableStoreTypes#windowStore()`, see Querying local window stores. + + + +You can also implement your own QueryableStoreType as described in section Querying local custom state stores. + +**Note** + +Kafka Streams materializes one state store per stream partition. This means your application will potentially manage many underlying state stores. The API enables you to query all of the underlying stores without having to know which partition the data is in. + +# Querying local key-value stores + +To query a local key-value store, you must first create a topology with a key-value store. This example creates a key-value store named "CountsKeyValueStore". This store will hold the latest count for any word that is found on the topic "word-count-input". + + + Properties props = ...; + StreamsBuilder builder = ...; + KStream textLines = ...; + + // Define the processing topology (here: WordCount) + KGroupedStream groupedByWord = textLines + .flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\W+"))) + .groupBy((key, word) -> word, Grouped.with(stringSerde, stringSerde)); + + // Create a key-value store named "CountsKeyValueStore" for the all-time word counts + groupedByWord.count(Materialized.as("CountsKeyValueStore")); + + // Start an instance of the topology + KafkaStreams streams = new KafkaStreams(builder, props); + streams.start(); + +After the application has started, you can get access to "CountsKeyValueStore" and then query it via the [ReadOnlyKeyValueStore](https://github.com/apache/kafka/blob/4.1/streams/src/main/java/org/apache/kafka/streams/state/ReadOnlyKeyValueStore.java) API: + + + // Get the key-value store CountsKeyValueStore + ReadOnlyKeyValueStore keyValueStore = + streams.store("CountsKeyValueStore", QueryableStoreTypes.keyValueStore()); + + // Get value by key + System.out.println("count for hello:" + keyValueStore.get("hello")); + + // Get the values for a range of keys available in this application instance + KeyValueIterator range = keyValueStore.range("all", "streams"); + while (range.hasNext()) { + KeyValue next = range.next(); + System.out.println("count for " + next.key + ": " + next.value); + } + + // Get the values for all of the keys available in this application instance + KeyValueIterator range = keyValueStore.all(); + while (range.hasNext()) { + KeyValue next = range.next(); + System.out.println("count for " + next.key + ": " + next.value); + } + +You can also materialize the results of stateless operators by using the overloaded methods that take a `queryableStoreName` as shown in the example below: + + + StreamsBuilder builder = ...; + KTable regionCounts = ...; + + // materialize the result of filtering corresponding to odd numbers + // the "queryableStoreName" can be subsequently queried. + KTable oddCounts = numberLines.filter((region, count) -> (count % 2 != 0), + Materialized.as("queryableStoreName")); + + // do not materialize the result of filtering corresponding to even numbers + // this means that these results will not be materialized and cannot be queried. + KTable oddCounts = numberLines.filter((region, count) -> (count % 2 == 0)); + +# Querying local window stores + +A window store will potentially have many results for any given key because the key can be present in multiple windows. However, there is only one result per window for a given key. + +To query a local window store, you must first create a topology with a window store. This example creates a window store named "CountsWindowStore" that contains the counts for words in 1-minute windows. + + + StreamsBuilder builder = ...; + KStream textLines = ...; + + // Define the processing topology (here: WordCount) + KGroupedStream groupedByWord = textLines + .flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\W+"))) + .groupBy((key, word) -> word, Grouped.with(stringSerde, stringSerde)); + + // Create a window state store named "CountsWindowStore" that contains the word counts for every minute + groupedByWord.windowedBy(TimeWindows.ofSizeWithNoGrace(Duration.ofSeconds(60))) + .count(Materialized.as("CountsWindowStore")); + +After the application has started, you can get access to "CountsWindowStore" and then query it via the [ReadOnlyWindowStore](https://github.com/apache/kafka/blob/4.1/streams/src/main/java/org/apache/kafka/streams/state/ReadOnlyWindowStore.java) API: + + + // Get the window store named "CountsWindowStore" + ReadOnlyWindowStore windowStore = + streams.store("CountsWindowStore", QueryableStoreTypes.windowStore()); + + // Fetch values for the key "world" for all of the windows available in this application instance. + // To get *all* available windows we fetch windows from the beginning of time until now. + Instant timeFrom = Instant.ofEpochMilli(0); // beginning of time = oldest available + Instant timeTo = Instant.now(); // now (in processing-time) + WindowStoreIterator iterator = windowStore.fetch("world", timeFrom, timeTo); + while (iterator.hasNext()) { + KeyValue next = iterator.next(); + long windowTimestamp = next.key; + System.out.println("Count of 'world' @ time " + windowTimestamp + " is " + next.value); + } + +# Querying local custom state stores + +**Note** + +Only the [Processor API](processor-api.html#streams-developer-guide-processor-api) supports custom state stores. + +Before querying the custom state stores you must implement these interfaces: + + * Your custom state store must implement `StateStore`. + * You must have an interface to represent the operations available on the store. + * You must provide an implementation of `StoreBuilder` for creating instances of your store. + * It is recommended that you provide an interface that restricts access to read-only operations. This prevents users of this API from mutating the state of your running Kafka Streams application out-of-band. + + + +The class/interface hierarchy for your custom store might look something like: + + + public class MyCustomStore implements StateStore, MyWriteableCustomStore { + // implementation of the actual store + } + + // Read-write interface for MyCustomStore + public interface MyWriteableCustomStore extends MyReadableCustomStore { + void write(K Key, V value); + } + + // Read-only interface for MyCustomStore + public interface MyReadableCustomStore { + V read(K key); + } + + public class MyCustomStoreBuilder implements StoreBuilder { + // implementation of the supplier for MyCustomStore + } + +To make this store queryable you must: + + * Provide an implementation of [QueryableStoreType](https://github.com/apache/kafka/blob/4.1/streams/src/main/java/org/apache/kafka/streams/state/QueryableStoreType.java). + * Provide a wrapper class that has access to all of the underlying instances of the store and is used for querying. + + + +Here is how to implement `QueryableStoreType`: + + + public class MyCustomStoreType implements QueryableStoreType> { + + // Only accept StateStores that are of type MyCustomStore + public boolean accepts(final StateStore stateStore) { + return stateStore instanceOf MyCustomStore; + } + + public MyReadableCustomStore create(final StateStoreProvider storeProvider, final String storeName) { + return new MyCustomStoreTypeWrapper(storeProvider, storeName, this); + } + + } + +A wrapper class is required because each instance of a Kafka Streams application may run multiple stream tasks and manage multiple local instances of a particular state store. The wrapper class hides this complexity and lets you query a "logical" state store by name without having to know about all of the underlying local instances of that state store. + +When implementing your wrapper class you must use the [StateStoreProvider](https://github.com/apache/kafka/blob/4.1/streams/src/main/java/org/apache/kafka/streams/state/internals/StateStoreProvider.java) interface to get access to the underlying instances of your store. `StateStoreProvider#stores(String storeName, QueryableStoreType queryableStoreType)` returns a `List` of state stores with the given storeName and of the type as defined by `queryableStoreType`. + +Here is an example implementation of the wrapper: + + + // We strongly recommended implementing a read-only interface + // to restrict usage of the store to safe read operations! + public class MyCustomStoreTypeWrapper implements MyReadableCustomStore { + + private final QueryableStoreType> customStoreType; + private final String storeName; + private final StateStoreProvider provider; + + public CustomStoreTypeWrapper(final StateStoreProvider provider, + final String storeName, + final QueryableStoreType> customStoreType) { + + // ... assign fields ... + } + + // Implement a safe read method + @Override + public V read(final K key) { + // Get all the stores with storeName and of customStoreType + final List> stores = provider.getStores(storeName, customStoreType); + // Try and find the value for the given key + final Optional value = stores.stream().filter(store -> store.read(key) != null).findFirst(); + // Return the value if it exists + return value.orElse(null); + } + + } + +You can now find and query your custom store: + + + Topology topology = ...; + ProcessorSupplier processorSuppler = ...; + + // Create CustomStoreSupplier for store name the-custom-store + MyCustomStoreBuilder customStoreBuilder = new MyCustomStoreBuilder("the-custom-store") //...; + // Add the source topic + topology.addSource("input", "inputTopic"); + // Add a custom processor that reads from the source topic + topology.addProcessor("the-processor", processorSupplier, "input"); + // Connect your custom state store to the custom processor above + topology.addStateStore(customStoreBuilder, "the-processor"); + + KafkaStreams streams = new KafkaStreams(topology, config); + streams.start(); + + // Get access to the custom store + MyReadableCustomStore store = streams.store("the-custom-store", new MyCustomStoreType()); + // Query the store + String value = store.read("key"); + +# Querying remote state stores for the entire app + +To query remote states for the entire app, you must expose the application's full state to other applications, including applications that are running on different machines. + +For example, you have a Kafka Streams application that processes user events in a multi-player video game, and you want to retrieve the latest status of each user directly and display it in a mobile app. Here are the required steps to make the full state of your application queryable: + + 1. Add an RPC layer to your application so that the instances of your application can be interacted with via the network (e.g., a REST API, Thrift, a custom protocol, and so on). The instances must respond to interactive queries. You can follow the reference examples provided to get started. + 2. Expose the RPC endpoints of your application's instances via the `application.server` configuration setting of Kafka Streams. Because RPC endpoints must be unique within a network, each instance has its own value for this configuration setting. This makes an application instance discoverable by other instances. + 3. In the RPC layer, discover remote application instances and their state stores and query locally available state stores to make the full state of your application queryable. The remote application instances can forward queries to other app instances if a particular instance lacks the local data to respond to a query. The locally available state stores can directly respond to queries. + + + +![](/41/images/streams-interactive-queries-api-02.png) + +Discover any running instances of the same application as well as the respective RPC endpoints they expose for interactive queries + +# Adding an RPC layer to your application + +There are many ways to add an RPC layer. The only requirements are that the RPC layer is embedded within the Kafka Streams application and that it exposes an endpoint that other application instances and applications can connect to. + +# Exposing the RPC endpoints of your application + +To enable remote state store discovery in a distributed Kafka Streams application, you must set the [configuration property](config-streams.html#streams-developer-guide-required-configs) in the config properties. The `application.server` property defines a unique `host:port` pair that points to the RPC endpoint of the respective instance of a Kafka Streams application. The value of this configuration property will vary across the instances of your application. When this property is set, Kafka Streams will keep track of the RPC endpoint information for every instance of an application, its state stores, and assigned stream partitions through instances of [StreamsMetadata](/41/javadoc/org/apache/kafka/streams/state/StreamsMetadata.html). + +**Tip** + +Consider leveraging the exposed RPC endpoints of your application for further functionality, such as piggybacking additional inter-application communication that goes beyond interactive queries. + +This example shows how to configure and run a Kafka Streams application that supports the discovery of its state stores. + + + Properties props = new Properties(); + // Set the unique RPC endpoint of this application instance through which it + // can be interactively queried. In a real application, the value would most + // probably not be hardcoded but derived dynamically. + String rpcEndpoint = "host1:4460"; + props.put(StreamsConfig.APPLICATION_SERVER_CONFIG, rpcEndpoint); + // ... further settings may follow here ... + + StreamsBuilder builder = new StreamsBuilder(); + + KStream textLines = builder.stream(stringSerde, stringSerde, "word-count-input"); + + final KGroupedStream groupedByWord = textLines + .flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\W+"))) + .groupBy((key, word) -> word, Grouped.with(stringSerde, stringSerde)); + + // This call to `count()` creates a state store named "word-count". + // The state store is discoverable and can be queried interactively. + groupedByWord.count(Materialized.as("word-count")); + + // Start an instance of the topology + KafkaStreams streams = new KafkaStreams(builder, props); + streams.start(); + + // Then, create and start the actual RPC service for remote access to this + // application instance's local state stores. + // + // This service should be started on the same host and port as defined above by + // the property `StreamsConfig.APPLICATION_SERVER_CONFIG`. The example below is + // fictitious, but we provide end-to-end demo applications (such as KafkaMusicExample) + // that showcase how to implement such a service to get you started. + MyRPCService rpcService = ...; + rpcService.listenAt(rpcEndpoint); + +# Discovering and accessing application instances and their local state stores + +The following methods return [StreamsMetadata](/41/javadoc/org/apache/kafka/streams/state/StreamsMetadata.html) objects, which provide meta-information about application instances such as their RPC endpoint and locally available state stores. + + * `KafkaStreams#allMetadata()`: find all instances of this application + * `KafkaStreams#allMetadataForStore(String storeName)`: find those applications instances that manage local instances of the state store "storeName" + * `KafkaStreams#metadataForKey(String storeName, K key, Serializer keySerializer)`: using the default stream partitioning strategy, find the one application instance that holds the data for the given key in the given state store + * `KafkaStreams#metadataForKey(String storeName, K key, StreamPartitioner partitioner)`: using `partitioner`, find the one application instance that holds the data for the given key in the given state store + + + +Attention + +If `application.server` is not configured for an application instance, then the above methods will not find any [StreamsMetadata](/41/javadoc/org/apache/kafka/streams/state/StreamsMetadata.html) for it. + +For example, we can now find the `StreamsMetadata` for the state store named "word-count" that we defined in the code example shown in the previous section: + + + KafkaStreams streams = ...; + // Find all the locations of local instances of the state store named "word-count" + Collection wordCountHosts = streams.allMetadataForStore("word-count"); + + // For illustrative purposes, we assume using an HTTP client to talk to remote app instances. + HttpClient http = ...; + + // Get the word count for word (aka key) 'alice': Approach 1 + // + // We first find the one app instance that manages the count for 'alice' in its local state stores. + StreamsMetadata metadata = streams.metadataForKey("word-count", "alice", Serdes.String().serializer()); + // Then, we query only that single app instance for the latest count of 'alice'. + // Note: The RPC URL shown below is fictitious and only serves to illustrate the idea. Ultimately, + // the URL (or, in general, the method of communication) will depend on the RPC layer you opted to + // implement. Again, we provide end-to-end demo applications (such as KafkaMusicExample) that showcase + // how to implement such an RPC layer. + Long result = http.getLong("http://" + metadata.host() + ":" + metadata.port() + "/word-count/alice"); + + // Get the word count for word (aka key) 'alice': Approach 2 + // + // Alternatively, we could also choose (say) a brute-force approach where we query every app instance + // until we find the one that happens to know about 'alice'. + Optional result = streams.allMetadataForStore("word-count") + .stream() + .map(streamsMetadata -> { + // Construct the (fictituous) full endpoint URL to query the current remote application instance + String url = "http://" + streamsMetadata.host() + ":" + streamsMetadata.port() + "/word-count/alice"; + // Read and return the count for 'alice', if any. + return http.getLong(url); + }) + .filter(s -> s != null) + .findFirst(); + +At this point the full state of the application is interactively queryable: + + * You can discover the running instances of the application and the state stores they manage locally. + * Through the RPC layer that was added to the application, you can communicate with these application instances over the network and query them for locally available state. + * The application instances are able to serve such queries because they can directly query their own local state stores and respond via the RPC layer. + * Collectively, this allows us to query the full state of the entire application. + + + +To see an end-to-end application with interactive queries, review the demo applications. + +[Previous](/41/streams/developer-guide/testing) [Next](/41/streams/developer-guide/memory-mgmt) + + * [Documentation](/documentation) + * [Kafka Streams](/streams) + * [Developer Guide](/streams/developer-guide/) + + diff --git a/content/en/41/streams/developer-guide/manage-topics.md b/content/en/41/streams/developer-guide/manage-topics.md new file mode 100644 index 000000000..4fd6d12f1 --- /dev/null +++ b/content/en/41/streams/developer-guide/manage-topics.md @@ -0,0 +1,63 @@ +--- +title: Managing Streams Application Topics +description: +weight: 11 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Managing Streams Application Topics + +A Kafka Streams application continuously reads from Kafka topics, processes the read data, and then writes the processing results back into Kafka topics. The application may also auto-create other Kafka topics in the Kafka brokers, for example state store changelogs topics. This section describes the differences these topic types and how to manage the topics and your applications. + +Kafka Streams distinguishes between user topics and internal topics. + +# User topics + +User topics exist externally to an application and are read from or written to by the application, including: + +Input topics + Topics that are specified via source processors in the application's topology; e.g. via `StreamsBuilder#stream()`, `StreamsBuilder#table()` and `Topology#addSource()`. +Output topics + Topics that are specified via sink processors in the application's topology; e.g. via `KStream#to()`, `KTable.to()` and `Topology#addSink()`. + +User topics must be created and manually managed ahead of time (e.g., via the [topic tools](../../kafka/post-deployment.html#kafka-operations-admin)). If user topics are shared among multiple applications for reading and writing, the application users must coordinate topic management. If user topics are centrally managed, then application users then would not need to manage topics themselves but simply obtain access to them. + +Note + +You should not use the auto-create topic feature on the brokers to create user topics, because: + + * Auto-creation of topics may be disabled in your Kafka cluster. + * Auto-creation automatically applies the default topic settings such as the replicaton factor. These default settings might not be what you want for certain output topics (e.g., `auto.create.topics.enable=true` in the [Kafka broker configuration](http://kafka.apache.org/0100/documentation.html#brokerconfigs)). + + + +# Internal topics + +Internal topics are used internally by the Kafka Streams application while executing, for example the changelog topics for state stores. These topics are created by the application and are only used by that stream application. + +If security is enabled on the Kafka brokers, you must grant the underlying clients admin permissions so that they can create internal topics set. For more information, see [Streams Security](security.html#streams-developer-guide-security). + +Note + +The internal topics follow the naming convention `--`, but this convention is not guaranteed for future releases. + +The following settings apply to the default configuration for internal topics: + + * For all internal topics, `message.timestamp.type` is set to `CreateTime`. + * For internal repartition topics, the compaction policy is `delete` and the retention time is `-1` (infinite). + * For internal changelog topics for key-value stores, the compaction policy is `compact`. + * For internal changelog topics for windowed key-value stores, the compaction policy is `delete,compact`. The retention time is set to 24 hours plus your setting for the windowed store. + * For internal changelog topics for versioned state stores, the cleanup policy is `compact`, and `min.compaction.lag.ms` is set to 24 hours plus the store's historyRetentionMs` value. + + + +[Previous](/41/streams/developer-guide/running-app) [Next](/41/streams/developer-guide/security) + + * [Documentation](/documentation) + * [Kafka Streams](/streams) + * [Developer Guide](/streams/developer-guide/) + + diff --git a/content/en/41/streams/developer-guide/memory-mgmt.md b/content/en/41/streams/developer-guide/memory-mgmt.md new file mode 100644 index 000000000..d4047d6c0 --- /dev/null +++ b/content/en/41/streams/developer-guide/memory-mgmt.md @@ -0,0 +1,196 @@ +--- +title: Memory Management +description: +weight: 9 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Memory Management + +You can specify the total memory (RAM) size used for internal caching and compacting of records. This caching happens before the records are written to state stores or forwarded downstream to other nodes. + +The record caches are implemented slightly different in the DSL and Processor API. + +**Table of Contents** + + * Record caches in the DSL + * Record caches in the Processor API + * RocksDB + * Other memory usage + + + +# Record caches in the DSL + +You can specify the total memory (RAM) size of the record cache for an instance of the processing topology. It is leveraged by the following `KTable` instances: + + * Source `KTable`: `KTable` instances that are created via `StreamsBuilder#table()` or `StreamsBuilder#globalTable()`. + * Aggregation `KTable`: instances of `KTable` that are created as a result of [aggregations](dsl-api.html#streams-developer-guide-dsl-aggregating). + + + +For such `KTable` instances, the record cache is used for: + + * Internal caching and compacting of output records before they are written by the underlying stateful [processor node](../core-concepts#streams_processor_node) to its internal state stores. + * Internal caching and compacting of output records before they are forwarded from the underlying stateful [processor node](../core-concepts#streams_processor_node) to any of its downstream processor nodes. + + + +Use the following example to understand the behaviors with and without record caching. In this example, the input is a `KStream` with the records `: , , , `. The focus in this example is on the records with key == `A`. + + * An [aggregation](dsl-api.html#streams-developer-guide-dsl-aggregating) computes the sum of record values, grouped by key, for the input and returns a `KTable`. + +> * **Without caching** : a sequence of output records is emitted for key `A` that represent changes in the resulting aggregation table. The parentheses (`()`) denote changes, the left number is the new aggregate value and the right number is the old aggregate value: `, , `. +> * **With caching** : a single output record is emitted for key `A` that would likely be compacted in the cache, leading to a single output record of ``. This record is written to the aggregation's internal state store and forwarded to any downstream operations. + + + + +The cache size is specified through the `cache.max.bytes.buffering` parameter, which is a global setting per processing topology: + + + // Enable record cache of size 10 MB. + Properties props = new Properties(); + props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 10 * 1024 * 1024L); + +This parameter controls the number of bytes allocated for caching. Specifically, for a processor topology instance with `T` threads and `C` bytes allocated for caching, each thread will have an even `C/T` bytes to construct its own cache and use as it sees fit among its tasks. This means that there are as many caches as there are threads, but no sharing of caches across threads happens. + +The basic API for the cache is made of `put()` and `get()` calls. Records are evicted using a simple LRU scheme after the cache size is reached. The first time a keyed record `R1 = ` finishes processing at a node, it is marked as dirty in the cache. Any other keyed record `R2 = ` with the same key `K1` that is processed on that node during that time will overwrite ``, this is referred to as "being compacted". This has the same effect as [Kafka's log compaction](https://kafka.apache.org/documentation.html#compaction), but happens earlier, while the records are still in memory, and within your client-side application, rather than on the server-side (i.e. the Kafka broker). After flushing, `R2` is forwarded to the next processing node and then written to the local state store. + +The semantics of caching is that data is flushed to the state store and forwarded to the next downstream processor node whenever the earliest of `commit.interval.ms` or `cache.max.bytes.buffering` (cache pressure) hits. Both `commit.interval.ms` and `cache.max.bytes.buffering` are global parameters. As such, it is not possible to specify different parameters for individual nodes. + +Here are example settings for both parameters based on desired scenarios. + + * To turn off caching the cache size can be set to zero: + + // Disable record cache + Properties props = new Properties(); + props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0); + + * To enable caching but still have an upper bound on how long records will be cached, you can set the commit interval. In this example, it is set to 1000 milliseconds: + + Properties props = new Properties(); + // Enable record cache of size 10 MB. + props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 10 * 1024 * 1024L); + // Set commit interval to 1 second. + props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000); + + + + +The effect of these two configurations is described in the figure below. The records are shown using 4 keys: blue, red, yellow, and green. Assume the cache has space for only 3 keys. + + * When the cache is disabled (a), all of the input records will be output. + + * When the cache is enabled (b): + +> * Most records are output at the end of commit intervals (e.g., at `t1` a single blue record is output, which is the final over-write of the blue key up to that time). +> * Some records are output because of cache pressure (i.e. before the end of a commit interval). For example, see the red record before `t2`. With smaller cache sizes we expect cache pressure to be the primary factor that dictates when records are output. With large cache sizes, the commit interval will be the primary factor. +> * The total number of records output has been reduced from 15 to 8. + + + + +![](/41/images/streams-cache-and-commit-interval.png) + +# Record caches in the Processor API + +You can specify the total memory (RAM) size of the record cache for an instance of the processing topology. It is used for internal caching and compacting of output records before they are written from a stateful processor node to its state stores. + +The record cache in the Processor API does not cache or compact any output records that are being forwarded downstream. This means that all downstream processor nodes can see all records, whereas the state stores see a reduced number of records. This does not impact correctness of the system, but is a performance optimization for the state stores. For example, with the Processor API you can store a record in a state store while forwarding a different value downstream. + +Following from the example first shown in section [State Stores](processor-api.html#streams-developer-guide-state-store), to disable caching, you can add the `withCachingDisabled` call (note that caches are enabled by default, however there is an explicit `withCachingEnabled` call). + + + StoreBuilder countStoreBuilder = + Stores.keyValueStoreBuilder( + Stores.persistentKeyValueStore("Counts"), + Serdes.String(), + Serdes.Long()) + .withCachingEnabled(); + +Record caches are not supported for [versioned state stores](processor-api.html#streams-developer-guide-state-store-versioned). + +To avoid reading stale data, you can `flush()` the store before creating the iterator. Note, that flushing too often can lead to performance degration if RocksDB is used, so we advice to avoid flushing manually in general. + +# RocksDB + +Each instance of RocksDB allocates off-heap memory for a block cache, index and filter blocks, and memtable (write buffer). Critical configs (for RocksDB version 4.1.0) include `block_cache_size`, `write_buffer_size` and `max_write_buffer_number`. These can be specified through the `rocksdb.config.setter` configuration. + +Also, we recommend changing RocksDB's default memory allocator, because the default allocator may lead to increased memory consumption. To change the memory allocator to `jemalloc`, you need to set the environment variable `LD_PRELOAD`before you start your Kafka Streams application: + + + # example: install jemalloc (on Debian) + $ apt install -y libjemalloc-dev + # set LD_PRELOAD before you start your Kafka Streams application + $ export LD_PRELOAD="/usr/lib/x86_64-linux-gnu/libjemalloc.so" + +As of 2.3.0 the memory usage across all instances can be bounded, limiting the total off-heap memory of your Kafka Streams application. To do so you must configure RocksDB to cache the index and filter blocks in the block cache, limit the memtable memory through a shared [WriteBufferManager](https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager) and count its memory against the block cache, and then pass the same Cache object to each instance. See [RocksDB Memory Usage](https://github.com/facebook/rocksdb/wiki/Memory-usage-in-RocksDB) for details. An example RocksDBConfigSetter implementing this is shown below: + + + public static class BoundedMemoryRocksDBConfig implements RocksDBConfigSetter { + + private static org.rocksdb.Cache cache = new org.rocksdb.LRUCache(TOTAL_OFF_HEAP_MEMORY, -1, false, INDEX_FILTER_BLOCK_RATIO);1 + private static org.rocksdb.WriteBufferManager writeBufferManager = new org.rocksdb.WriteBufferManager(TOTAL_MEMTABLE_MEMORY, cache); + + @Override + public void setConfig(final String storeName, final Options options, final Map configs) { + + BlockBasedTableConfig tableConfig = (BlockBasedTableConfig) options.tableFormatConfig(); + + // These three options in combination will limit the memory used by RocksDB to the size passed to the block cache (TOTAL_OFF_HEAP_MEMORY) + tableConfig.setBlockCache(cache); + tableConfig.setCacheIndexAndFilterBlocks(true); + options.setWriteBufferManager(writeBufferManager); + + // These options are recommended to be set when bounding the total memory + tableConfig.setCacheIndexAndFilterBlocksWithHighPriority(true);2 + tableConfig.setPinTopLevelIndexAndFilter(true); + tableConfig.setBlockSize(BLOCK_SIZE);3 + options.setMaxWriteBufferNumber(N_MEMTABLES); + options.setWriteBufferSize(MEMTABLE_SIZE); + + options.setTableFormatConfig(tableConfig); + } + + @Override + public void close(final String storeName, final Options options) { + // Cache and WriteBufferManager should not be closed here, as the same objects are shared by every store instance. + } + } + +1\. INDEX_FILTER_BLOCK_RATIO can be used to set a fraction of the block cache to set aside for "high priority" (aka index and filter) blocks, preventing them from being evicted by data blocks. The boolean parameter in the cache constructor lets you control whether the cache should enforce a strict memory limit by failing the read or iteration in the rare cases where it might go larger than its capacity. See the full signature of the LRUCache constructor [here](https://github.com/facebook/rocksdb/blob/master/java/src/main/java/org/rocksdb/LRUCache.java#L72). +2\. This must be set in order for INDEX_FILTER_BLOCK_RATIO to take effect (see footnote 1) as described in the [RocksDB docs](https://github.com/facebook/rocksdb/wiki/Block-Cache#caching-index-and-filter-blocks) +3\. You may want to modify the default [block size](https://github.com/apache/kafka/blob/2.3/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBStore.java#L79) per these instructions from the [RocksDB docs](https://github.com/facebook/rocksdb/wiki/Memory-usage-in-RocksDB#indexes-and-filter-blocks). A larger block size means index blocks will be smaller, but the cached data blocks may contain more cold data that would otherwise be evicted. + + +Note: +While we recommend setting at least the above configs, the specific options that yield the best performance are workload dependent and you should consider experimenting with these to determine the best choices for your specific use case. Keep in mind that the optimal configs for one app may not apply to one with a different topology or input topic. In addition to the recommended configs above, you may want to consider using partitioned index filters as described by the [RocksDB docs](https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters). + +# Other memory usage + +There are other modules inside Apache Kafka that allocate memory during runtime. They include the following: + + * Producer buffering, managed by the producer config `buffer.memory`. + * Consumer buffering, currently not strictly managed, but can be indirectly controlled by fetch size, i.e., `fetch.max.bytes` and `fetch.max.wait.ms`. + * Both producer and consumer also have separate TCP send / receive buffers that are not counted as the buffering memory. These are controlled by the `send.buffer.bytes` / `receive.buffer.bytes` configs. + * Deserialized objects buffering: after `consumer.poll()` returns records, they will be deserialized to extract timestamp and buffered in the streams space. Currently this is only indirectly controlled by `buffered.records.per.partition`. + + + +**Tip** + +**Iterators should be closed explicitly to release resources:** Store iterators (e.g., `KeyValueIterator` and `WindowStoreIterator`) must be closed explicitly upon completeness to release resources such as open file handlers and in-memory read buffers, or use try-with-resources statement (available since JDK7) for this Closeable class. + +Otherwise, stream application's memory usage keeps increasing when running until it hits an OOM. + +[Previous](/41/streams/developer-guide/interactive-queries) [Next](/41/streams/developer-guide/running-app) + + * [Documentation](/documentation) + * [Kafka Streams](/streams) + * [Developer Guide](/streams/developer-guide/) + + diff --git a/content/en/41/streams/developer-guide/processor-api.md b/content/en/41/streams/developer-guide/processor-api.md new file mode 100644 index 000000000..069600b1a --- /dev/null +++ b/content/en/41/streams/developer-guide/processor-api.md @@ -0,0 +1,368 @@ +--- +title: Processor API +description: +weight: 4 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Processor API + +The Processor API allows developers to define and connect custom processors and to interact with state stores. With the Processor API, you can define arbitrary stream processors that process one received record at a time, and connect these processors with their associated state stores to compose the processor topology that represents a customized processing logic. + +**Table of Contents** + + * Overview + * Defining a Stream Processor + * Unit Testing Processors + * State Stores + * Defining and creating a State Store + * Fault-tolerant State Stores + * Enable or Disable Fault Tolerance of State Stores (Store Changelogs) + * Timestamped State Stores + * Versioned Key-Value State Stores + * Readonly State Stores + * Implementing Custom State Stores + * Connecting Processors and State Stores + * Accessing Processor Context + + + +# Overview + +The Processor API can be used to implement both **stateless** as well as **stateful** operations, where the latter is achieved through the use of state stores. + +**Tip** + +**Combining the DSL and the Processor API:** You can combine the convenience of the DSL with the power and flexibility of the Processor API as described in the section [Applying processors (Processor API integration)](dsl-api.html#streams-developer-guide-dsl-process). + +For a complete list of available API functionality, see the [Streams](/41/javadoc/org/apache/kafka/streams/package-summary.html) API docs. + +# Defining a Stream Processor + +A [stream processor](../core-concepts.html#streams_processor_node) is a node in the processor topology that represents a single processing step. With the Processor API, you can define arbitrary stream processors that processes one received record at a time, and connect these processors with their associated state stores to compose the processor topology. + +You can define a customized stream processor by implementing the `Processor` interface, which provides the `process()` API method. The `process()` method is called on each of the received records. + +The `Processor` interface also has an `init()` method, which is called by the Kafka Streams library during task construction phase. Processor instances should perform any required initialization in this method. The `init()` method passes in a `ProcessorContext` instance, which provides access to the metadata of the currently processed record, including its source Kafka topic and partition, its corresponding message offset, and further such information. You can also use this context instance to schedule a punctuation function (via `ProcessorContext#schedule()`), to forward a new record to the downstream processors (via `ProcessorContext#forward()`), and to request a commit of the current processing progress (via `ProcessorContext#commit()`). Any resources you set up in `init()` can be cleaned up in the `close()` method. Note that Kafka Streams may re-use a single `Processor` object by calling `init()` on it again after `close()`. + +The `Processor` interface takes four generic parameters: `KIn, VIn, KOut, VOut`. These define the input and output types that the processor implementation can handle. `KIn` and `VIn` define the key and value types of the `Record` that will be passed to `process()`. Likewise, `KOut` and `VOut` define the forwarded key and value types for the result `Record` that `ProcessorContext#forward()` will accept. If your processor does not forward any records at all (or if it only forwards `null` keys or values), a best practice is to set the output generic type argument to `Void`. If it needs to forward multiple types that don't share a common superclass, you will have to set the output generic type argument to `Object`. + +Both the `Processor#process()` and the `ProcessorContext#forward()` methods handle records in the form of the `Record` data class. This class gives you access to the main components of a Kafka record: the key, value, timestamp and headers. When forwarding records, you can use the constructor to create a new `Record` from scratch, or you can use the convenience builder methods to replace one of the `Record`'s properties and copy over the rest. For example, `inputRecord.withValue(newValue)` would copy the key, timestamp, and headers from `inputRecord` while setting the output record's value to `newValue`. Note that this does not mutate `inputRecord`, but instead creates a shallow copy. Beware that this is only a shallow copy, so if you plan to mutate the key, value, or headers elsewhere in the program, you will want to create a deep copy of those fields yourself. + +In addition to handling incoming records via `Processor#process()`, you have the option to schedule periodic invocation (called "punctuation") in your processor's `init()` method by calling `ProcessorContext#schedule()` and passing it a `Punctuator`. The `PunctuationType` determines what notion of time is used for the punctuation scheduling: either [stream-time](../core-concepts.html#streams_time) or wall-clock-time (by default, stream-time is configured to represent event-time via `TimestampExtractor`). When stream-time is used, `punctuate()` is triggered purely by data because stream-time is determined (and advanced forward) by the timestamps derived from the input data. When there is no new input data arriving, stream-time is not advanced and thus `punctuate()` is not called. + +For example, if you schedule a `Punctuator` function every 10 seconds based on `PunctuationType.STREAM_TIME` and if you process a stream of 60 records with consecutive timestamps from 1 (first record) to 60 seconds (last record), then `punctuate()` would be called 6 times. This happens regardless of the time required to actually process those records. `punctuate()` would be called 6 times regardless of whether processing these 60 records takes a second, a minute, or an hour. + +When wall-clock-time (i.e. `PunctuationType.WALL_CLOCK_TIME`) is used, `punctuate()` is triggered purely by the wall-clock time. Reusing the example above, if the `Punctuator` function is scheduled based on `PunctuationType.WALL_CLOCK_TIME`, and if these 60 records were processed within 20 seconds, `punctuate()` is called 2 times (one time every 10 seconds). If these 60 records were processed within 5 seconds, then no `punctuate()` is called at all. Note that you can schedule multiple `Punctuator` callbacks with different `PunctuationType` types within the same processor by calling `ProcessorContext#schedule()` multiple times inside `init()` method. + +**Attention** + +Stream-time is only advanced when Streams processes records. If there are no records to process, or if Streams is waiting for new records due to the [Task Idling](/#streamsconfigs_max.task.idle.ms) configuration, then the stream time will not advance and `punctuate()` will not be triggered if `PunctuationType.STREAM_TIME` was specified. This behavior is independent of the configured timestamp extractor, i.e., using `WallclockTimestampExtractor` does not enable wall-clock triggering of `punctuate()`. + +**Example** + +The following example `Processor` defines a simple word-count algorithm and the following actions are performed: + + * In the `init()` method, schedule the punctuation every 1000 time units (the time unit is normally milliseconds, which in this example would translate to punctuation every 1 second) and retrieve the local state store by its name "Counts". + * In the `process()` method, upon each received record, split the value string into words, and update their counts into the state store (we will talk about this later in this section). + * In the `punctuate()` method, iterate the local state store and send the aggregated counts to the downstream processor (we will talk about downstream processors later in this section), and commit the current stream state. + + + + + public class WordCountProcessor implements Processor { + private KeyValueStore kvStore; + + @Override + public void init(final ProcessorContext context) { + context.schedule(Duration.ofSeconds(1), PunctuationType.STREAM_TIME, timestamp -> { + try (final KeyValueIterator iter = kvStore.all()) { + while (iter.hasNext()) { + final KeyValue entry = iter.next(); + context.forward(new Record<>(entry.key, entry.value.toString(), timestamp)); + } + } + }); + kvStore = context.getStateStore("Counts"); + } + + @Override + public void process(final Record record) { + final String[] words = record.value().toLowerCase(Locale.getDefault()).split("\W+"); + + for (final String word : words) { + final Integer oldValue = kvStore.get(word); + + if (oldValue == null) { + kvStore.put(word, 1); + } else { + kvStore.put(word, oldValue + 1); + } + } + } + + @Override + public void close() { + // close any resources managed by this processor + // Note: Do not close any StateStores as these are managed by the library + } + } + +**Note** + +**Stateful processing with state stores:** The `WordCountProcessor` defined above can access the currently received record in its `process()` method, and it can leverage state stores to maintain processing states to, for example, remember recently arrived records for stateful processing needs like aggregations and joins. For more information, see the state stores documentation. + +# Unit Testing Processors + +Kafka Streams comes with a `test-utils` module to help you write unit tests for your processors [here](testing.html#unit-testing-processors). + +# State Stores + +To implement a **stateful** `Processor`, you must provide one or more state stores to the processor (_stateless_ processors do not need state stores). State stores can be used to remember recently received input records, to track rolling aggregates, to de-duplicate input records, and more. Another feature of state stores is that they can be [interactively queried](interactive-queries.html#streams-developer-guide-interactive-queries) from other applications, such as a NodeJS-based dashboard or a microservice implemented in Scala or Go. + +The available state store types in Kafka Streams have fault tolerance enabled by default. + +# Defining and creating a State Store + +You can either use one of the available store types or implement your own custom store type. It's common practice to leverage an existing store type via the `Stores` factory. + +Note that, when using Kafka Streams, you normally don't create or instantiate state stores directly in your code. Rather, you define state stores indirectly by creating a so-called `StoreBuilder`. This builder is used by Kafka Streams as a factory to instantiate the actual state stores locally in application instances when and where needed. + +The following store types are available out of the box. + +Store Type | Storage Engine | Fault-tolerant? | Description +---|---|---|--- +Persistent `KeyValueStore` | RocksDB | Yes (enabled by default) | + + * **The recommended store type for most use cases.** + * Stores its data on local disk. + * Storage capacity: managed local state can be larger than the memory (heap space) of an application instance, but must fit into the available local disk space. + * RocksDB settings can be fine-tuned, see [RocksDB configuration](config-streams.html#streams-developer-guide-rocksdb-config). + * Available [store variants](/41/javadoc/org/apache/kafka/streams/state/Stores.html#persistentKeyValueStore\(java.lang.String\)): timestamped key-value store, versioned key-value store, time window key-value store, session window key-value store. + * Use [persistentTimestampedKeyValueStore](/41/javadoc/org/apache/kafka/streams/state/Stores.html#persistentTimestampedKeyValueStore\(java.lang.String\)) when you need a persistent key-(value/timestamp) store that supports put/get/delete and range queries. + * Use [persistentVersionedKeyValueStore](/41/javadoc/org/apache/kafka/streams/state/Stores.html#persistentVersionedKeyValueStore\(java.lang.String,java.time.Duration\)) when you need a persistent, versioned key-(value/timestamp) store that supports put/get/delete and timestamped get operations. + * Use [persistentWindowStore](/41/javadoc/org/apache/kafka/streams/state/Stores.html#persistentWindowStore\(java.lang.String,java.time.Duration,java.time.Duration,boolean\)) or [persistentTimestampedWindowStore](/41/javadoc/org/apache/kafka/streams/state/Stores.html#persistentTimestampedWindowStore\(java.lang.String,java.time.Duration,java.time.Duration,boolean\)) when you need a persistent timeWindowedKey-value or timeWindowedKey-(value/timestamp) store, respectively. + * Use [persistentSessionStore](/41/javadoc/org/apache/kafka/streams/state/Stores.html#persistentSessionStore\(java.lang.String,java.time.Duration\)) when you need a persistent sessionWindowedKey-value store. + + + + + // Creating a persistent key-value store: + // here, we create a `KeyValueStore` named "persistent-counts". + import org.apache.kafka.streams.state.StoreBuilder; + import org.apache.kafka.streams.state.Stores; + + // Using a `KeyValueStoreBuilder` to build a `KeyValueStore`. + StoreBuilder> countStoreSupplier = + Stores.keyValueStoreBuilder( + Stores.persistentKeyValueStore("persistent-counts"), + Serdes.String(), + Serdes.Long()); + KeyValueStore countStore = countStoreSupplier.build(); + +In-memory `KeyValueStore` | - | Yes (enabled by default) | + + * Stores its data in memory. + * Storage capacity: managed local state must fit into memory (heap space) of an application instance. + * Useful when application instances run in an environment where local disk space is either not available or local disk space is wiped in-between app instance restarts. + * Available [store variants](/41/javadoc/org/apache/kafka/streams/state/Stores.html#inMemoryKeyValueStore-java.lang.String-): time window key-value store, session window key-value store. + * Use [TimestampedKeyValueStore](/41/javadoc/org/apache/kafka/streams/state/TimestampedKeyValueStore.html) when you need a key-(value/timestamp) store that supports put/get/delete and range queries. + * Use [TimestampedWindowStore](/41/javadoc/org/apache/kafka/streams/state/TimestampedWindowStore.html) when you need to store windowedKey-(value/timestamp) pairs. + * There is no built-in in-memory, versioned key-value store at this time. + + + + + // Creating an in-memory key-value store: + // here, we create a `KeyValueStore` named "inmemory-counts". + import org.apache.kafka.streams.state.StoreBuilder; + import org.apache.kafka.streams.state.Stores; + + // Using a `KeyValueStoreBuilder` to build a `KeyValueStore`. + StoreBuilder> countStoreSupplier = + Stores.keyValueStoreBuilder( + Stores.inMemoryKeyValueStore("inmemory-counts"), + Serdes.String(), + Serdes.Long()); + KeyValueStore countStore = countStoreSupplier.build(); + +# Fault-tolerant State Stores + +To make state stores fault-tolerant and to allow for state store migration without data loss, a state store can be continuously backed up to a Kafka topic behind the scenes. For example, to migrate a stateful stream task from one machine to another when [elastically adding or removing capacity from your application](running-app.html#streams-developer-guide-execution-scaling). This topic is sometimes referred to as the state store's associated _changelog topic_ , or its _changelog_. For example, if you experience machine failure, the state store and the application's state can be fully restored from its changelog. You can enable or disable this backup feature for a state store. + +Fault-tolerant state stores are backed by a [compacted](https://kafka.apache.org/documentation.html#compaction) changelog topic. The purpose of compacting this topic is to prevent the topic from growing indefinitely, to reduce the storage consumed in the associated Kafka cluster, and to minimize recovery time if a state store needs to be restored from its changelog topic. + +Fault-tolerant windowed state stores are backed by a topic that uses both compaction and deletion. Because of the structure of the message keys that are being sent to the changelog topics, this combination of deletion and compaction is required for the changelog topics of window stores. For window stores, the message keys are composite keys that include the "normal" key and window timestamps. For these types of composite keys it would not be sufficient to only enable compaction to prevent a changelog topic from growing out of bounds. With deletion enabled, old windows that have expired will be cleaned up by Kafka's log cleaner as the log segments expire. The default retention setting is `Windows#maintainMs()` \+ 1 day. You can override this setting by specifying `StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG` in the `StreamsConfig`. + +When you open an `Iterator` from a state store you must call `close()` on the iterator when you are done working with it to reclaim resources; or you can use the iterator from within a try-with-resources statement. If you do not close an iterator, you may encounter an OOM error. + +# Enable or Disable Fault Tolerance of State Stores (Store Changelogs) + +You can enable or disable fault tolerance for a state store by enabling or disabling the change logging of the store through `enableLogging()` and `disableLogging()`. You can also fine-tune the associated topic's configuration if needed. + +Example for disabling fault-tolerance: + + + import org.apache.kafka.streams.state.StoreBuilder; + import org.apache.kafka.streams.state.Stores; + + StoreBuilder> countStoreSupplier = Stores.keyValueStoreBuilder( + Stores.persistentKeyValueStore("Counts"), + Serdes.String(), + Serdes.Long()) + .withLoggingDisabled(); // disable backing up the store to a changelog topic + +Attention + +If the changelog is disabled then the attached state store is no longer fault tolerant and it can't have any [standby replicas](config-streams.html#streams-developer-guide-standby-replicas). + +Here is an example for enabling fault tolerance, with additional changelog-topic configuration: You can add any log config from [kafka.log.LogConfig](https://github.com/apache/kafka/blob/trunk/core/src/main/scala/kafka/log/LogConfig.scala). Unrecognized configs will be ignored. + + + import org.apache.kafka.streams.state.StoreBuilder; + import org.apache.kafka.streams.state.Stores; + + Map changelogConfig = new HashMap(); + // override min.insync.replicas + changelogConfig.put(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "1") + + StoreBuilder> countStoreSupplier = Stores.keyValueStoreBuilder( + Stores.persistentKeyValueStore("Counts"), + Serdes.String(), + Serdes.Long()) + .withLoggingEnabled(changelogConfig); // enable changelogging, with custom changelog settings + +# Timestamped State Stores + +KTables always store timestamps by default. A timestamped state store improves stream processing semantics and enables handling out-of-order data in source KTables, detecting out-of-order joins and aggregations, and getting the timestamp of the latest update in an Interactive Query. + +You can query timestamped state stores both with and without a timestamp. + +**Upgrade note:** All users upgrade with a single rolling bounce per instance. + + * For Processor API users, nothing changes in existing applications, and you have the option of using the timestamped stores. + * For DSL operators, store data is upgraded lazily in the background. + * No upgrade happens if you provide a custom XxxBytesStoreSupplier, but you can opt-in by implementing the [TimestampedBytesStore](/41/javadoc/org/apache/kafka/streams/state/TimestampedBytesStore.html) interface. In this case, the old format is retained, and Streams uses a proxy store that removes/adds timestamps on read/write. + + + +# Versioned Key-Value State Stores + +Versioned key-value state stores are available since Kafka Streams 3.5. Rather than storing a single record version (value and timestamp) per key, versioned state stores may store multiple record versions per key. This allows versioned state stores to support timestamped retrieval operations to return the latest record (per key) as of a specified timestamp. + +You can create a persistent, versioned state store by passing a [VersionedBytesStoreSupplier](/41/javadoc/org/apache/kafka/streams/state/Stores.html#persistentVersionedKeyValueStore\(java.lang.String,java.time.Duration\)) to the [versionedKeyValueStoreBuilder](/41/javadoc/org/apache/kafka/streams/state/Stores.html#versionedKeyValueStoreBuilder\(java.lang.String,java.time.Duration\)), or by implementing your own [VersionedKeyValueStore](/41/javadoc/org/apache/kafka/streams/state/VersionedKeyValueStore.html). + +Each versioned store has an associated, fixed-duration _history retention_ parameter which specifies long old record versions should be kept for. In particular, a versioned store guarantees to return accurate results for timestamped retrieval operations where the timestamp being queried is within history retention of the current observed stream time. + +History retention also doubles as its _grace period_ , which determines how far back in time out-of-order writes to the store will be accepted. A versioned store will not accept writes (inserts, updates, or deletions) if the timestamp associated with the write is older than the current observed stream time by more than the grace period. Stream time in this context is tracked per-partition, rather than per-key, which means it's important that grace period (i.e., history retention) be set high enough to accommodate a record with one key arriving out-of-order relative to a record for another key. + +Because the memory footprint of versioned key-value stores is higher than that of non-versioned key-value stores, you may want to adjust your [RocksDB memory settings](memory-mgmt.html#streams-developer-guide-memory-management-rocksdb) accordingly. Benchmarking your application with versioned stores is also advised as performance is expected to be worse than when using non-versioned stores. + +Versioned stores do not support caching or interactive queries at this time. Also, window stores and global tables may not be versioned. + +**Upgrade note:** Versioned state stores are opt-in only; no automatic upgrades from non-versioned to versioned stores will take place. + +Upgrades are supported from persistent, non-versioned key-value stores to persistent, versioned key-value stores as long as the original store has the same changelog topic format as the versioned store being upgraded to. Both persistent [key-value stores](/41/javadoc/org/apache/kafka/streams/state/Stores.html#persistentKeyValueStore\(java.lang.String\)) and [timestamped key-value stores](/41/javadoc/org/apache/kafka/streams/state/Stores.html#persistentTimestampedKeyValueStore\(java.lang.String\)) share the same changelog topic format as [persistent versioned key-value stores](/41/javadoc/org/apache/kafka/streams/state/Stores.html#persistentVersionedKeyValueStore\(java.lang.String,java.time.Duration\)), and therefore both are eligible for upgrades. + +If you wish to upgrade an application using persistent, non-versioned key-value stores to use persistent, versioned key-value stores instead, you can perform the following procedure: + + * Stop all application instances, and [clear any local state directories](app-reset-tool.html#streams-developer-guide-reset-local-environment) for the store(s) being upgraded. + * Update your application code to use versioned stores where desired. + * Update your changelog topic configs, for the relevant state stores, to set the value of `min.compaction.lag.ms` to be at least your desired history retention. History retention plus one day is recommended as buffer for the use of broker wall clock time during compaction. + * Restart your application instances and allow time for the versioned stores to rebuild state from changelog. + + + +# ReadOnly State Stores + +A read-only state store materialized the data from its input topic. It also uses the input topic for fault-tolerance, and thus does not have an additional changelog topic (the input topic is re-used as changelog). Thus, the input topic should be configured with [log compaction](https://kafka.apache.org/documentation.html#compaction). Note that no other processor should modify the content of the state store, and the only writer should be the associated "state update processor"; other processors may read the content of the read-only store. + +**note:** beware of the partitioning requirements when using read-only state stores for lookups during processing. You might want to make sure the original changelog topic is co-partitioned with the processors reading the read-only statestore. + +# Implementing Custom State Stores + +You can use the built-in state store types or implement your own. The primary interface to implement for the store is `org.apache.kafka.streams.processor.StateStore`. Kafka Streams also has a few extended interfaces such as `KeyValueStore` and `VersionedKeyValueStore`. + +Note that your customized `org.apache.kafka.streams.processor.StateStore` implementation also needs to provide the logic on how to restore the state via the `org.apache.kafka.streams.processor.StateRestoreCallback` or `org.apache.kafka.streams.processor.BatchingStateRestoreCallback` interface. Details on how to instantiate these interfaces can be found in the [javadocs](/41/javadoc/org/apache/kafka/streams/processor/StateStore.html). + +You also need to provide a "builder" for the store by implementing the `org.apache.kafka.streams.state.StoreBuilder` interface, which Kafka Streams uses to create instances of your store. + +# Accessing Processor Context + +As we have mentioned in the Defining a Stream Processor section, a `ProcessorContext` control the processing workflow, such as scheduling a punctuation function, and committing the current processed state. + +This object can also be used to access the metadata related with the application like `applicationId`, `taskId`, and `stateDir`, and also `RecordMetadata` such as `topic`, `partition`, and `offset`. + +# Connecting Processors and State Stores + +Now that a processor (WordCountProcessor) and the state stores have been defined, you can construct the processor topology by connecting these processors and state stores together by using the `Topology` instance. In addition, you can add source processors with the specified Kafka topics to generate input data streams into the topology, and sink processors with the specified Kafka topics to generate output data streams out of the topology. + +Here is an example implementation: + + + Topology builder = new Topology(); + // add the source processor node that takes Kafka topic "source-topic" as input + builder.addSource("Source", "source-topic") + // add the WordCountProcessor node which takes the source processor as its upstream processor + .addProcessor("Process", () -> new WordCountProcessor(), "Source") + // add the count store associated with the WordCountProcessor processor + .addStateStore(countStoreBuilder, "Process") + // add the sink processor node that takes Kafka topic "sink-topic" as output + // and the WordCountProcessor node as its upstream processor + .addSink("Sink", "sink-topic", "Process"); + +Here is a quick explanation of this example: + + * A source processor node named `"Source"` is added to the topology using the `addSource` method, with one Kafka topic `"source-topic"` fed to it. + * A processor node named `"Process"` with the pre-defined `WordCountProcessor` logic is then added as the downstream processor of the `"Source"` node using the `addProcessor` method. + * A predefined persistent key-value state store is created and associated with the `"Process"` node, using `countStoreBuilder`. + * A sink processor node is then added to complete the topology using the `addSink` method, taking the `"Process"` node as its upstream processor and writing to a separate `"sink-topic"` Kafka topic (note that users can also use another overloaded variant of `addSink` to dynamically determine the Kafka topic to write to for each received record from the upstream processor). + + + +In some cases, it may be more convenient to add and connect a state store at the same time as you add the processor to the topology. This can be done by implementing `ConnectedStoreProvider#stores()` on the `ProcessorSupplier` instead of calling `Topology#addStateStore()`, like this: + + + Topology builder = new Topology(); + // add the source processor node that takes Kafka "source-topic" as input + builder.addSource("Source", "source-topic") + // add the WordCountProcessor node which takes the source processor as its upstream processor. + // the ProcessorSupplier provides the count store associated with the WordCountProcessor + .addProcessor("Process", new ProcessorSupplier() { + public Processor get() { + return new WordCountProcessor(); + } + + public Set> stores() { + final StoreBuilder> countsStoreBuilder = + Stores + .keyValueStoreBuilder( + Stores.persistentKeyValueStore("Counts"), + Serdes.String(), + Serdes.Long() + ); + return Collections.singleton(countsStoreBuilder); + } + }, "Source") + // add the sink processor node that takes Kafka topic "sink-topic" as output + // and the WordCountProcessor node as its upstream processor + .addSink("Sink", "sink-topic", "Process"); + +This allows for a processor to "own" state stores, effectively encapsulating their usage from the user wiring the topology. Multiple processors that share a state store may provide the same store with this technique, as long as the `StoreBuilder` is the same `instance`. + +In these topologies, the `"Process"` stream processor node is considered a downstream processor of the `"Source"` node, and an upstream processor of the `"Sink"` node. As a result, whenever the `"Source"` node forwards a newly fetched record from Kafka to its downstream `"Process"` node, the `WordCountProcessor#process()` method is triggered to process the record and update the associated state store. Whenever `context#forward()` is called in the `WordCountProcessor#punctuate()` method, the aggregate records will be sent via the `"Sink"` processor node to the Kafka topic `"sink-topic"`. Note that in the `WordCountProcessor` implementation, you must refer to the same store name `"Counts"` when accessing the key-value store, otherwise an exception will be thrown at runtime, indicating that the state store cannot be found. If the state store is not associated with the processor in the `Topology` code, accessing it in the processor's `init()` method will also throw an exception at runtime, indicating the state store is not accessible from this processor. + +Note that the `Topology#addProcessor` function takes a `ProcessorSupplier` as argument, and that the supplier pattern requires that a new `Processor` instance is returned each time `ProcessorSupplier#get()` is called. Creating a single `Processor` object and returning the same object reference in `ProcessorSupplier#get()` would be a violation of the supplier pattern and leads to runtime exceptions. So remember not to provide a singleton `Processor` instance to `Topology`. The `ProcessorSupplier` should always generate a new instance each time `ProcessorSupplier#get()` gets called. + +Now that you have fully defined your processor topology in your application, you can proceed to [running the Kafka Streams application](running-app.html#streams-developer-guide-execution). + +[Previous](/41/streams/developer-guide/dsl-api) [Next](/41/streams/developer-guide/datatypes) + + * [Documentation](/documentation) + * [Kafka Streams](/streams) + * [Developer Guide](/streams/developer-guide/) + + diff --git a/content/en/41/streams/developer-guide/running-app.md b/content/en/41/streams/developer-guide/running-app.md new file mode 100644 index 000000000..f2ac23fcb --- /dev/null +++ b/content/en/41/streams/developer-guide/running-app.md @@ -0,0 +1,99 @@ +--- +title: Running Streams Applications +description: +weight: 10 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Running Streams Applications + +You can run Java applications that use the Kafka Streams library without any additional configuration or requirements. Kafka Streams also provides the ability to receive notification of the various states of the application. The ability to monitor the runtime status is discussed in [the monitoring guide](/#kafka_streams_monitoring). + +**Table of Contents** + + * Starting a Kafka Streams application + * Elastic scaling of your application + * Adding capacity to your application + * Removing capacity from your application + * State restoration during workload rebalance + * Determining how many application instances to run + + + +# Starting a Kafka Streams application + +You can package your Java application as a fat JAR file and then start the application like this: + + + # Start the application in class `com.example.MyStreamsApp` + # from the fat JAR named `path-to-app-fatjar.jar`. + $ java -cp path-to-app-fatjar.jar com.example.MyStreamsApp + +When you start your application you are launching a Kafka Streams instance of your application. You can run multiple instances of your application. A common scenario is that there are multiple instances of your application running in parallel. For more information, see [Parallelism Model](../architecture.html#streams_architecture_tasks). + +When the application instance starts running, the defined processor topology will be initialized as one or more stream tasks. If the processor topology defines any state stores, these are also constructed during the initialization period. For more information, see the State restoration during workload rebalance section). + +# Elastic scaling of your application + +Kafka Streams makes your stream processing applications elastic and scalable. You can add and remove processing capacity dynamically during application runtime without any downtime or data loss. This makes your applications resilient in the face of failures and for allows you to perform maintenance as needed (e.g. rolling upgrades). + +For more information about this elasticity, see the [Parallelism Model](../architecture.html#streams_architecture_tasks) section. Kafka Streams leverages the Kafka group management functionality, which is built right into the [Kafka wire protocol](https://cwiki.apache.org/confluence/x/uxvVAQ). It is the foundation that enables the elasticity of Kafka Streams applications: members of a group coordinate and collaborate jointly on the consumption and processing of data in Kafka. Additionally, Kafka Streams provides stateful processing and allows for fault-tolerant state in environments where application instances may come and go at any time. + +# Adding capacity to your application + +If you need more processing capacity for your stream processing application, you can simply start another instance of your stream processing application, e.g. on another machine, in order to scale out. The instances of your application will become aware of each other and automatically begin to share the processing work. More specifically, what will be handed over from the existing instances to the new instances is (some of) the stream tasks that have been run by the existing instances. Moving stream tasks from one instance to another results in moving the processing work plus any internal state of these stream tasks (the state of a stream task will be re-created in the target instance by restoring the state from its corresponding changelog topic). + +The various instances of your application each run in their own JVM process, which means that each instance can leverage all the processing capacity that is available to their respective JVM process (minus the capacity that any non-Kafka-Streams part of your application may be using). This explains why running additional instances will grant your application additional processing capacity. The exact capacity you will be adding by running a new instance depends of course on the environment in which the new instance runs: available CPU cores, available main memory and Java heap space, local storage, network bandwidth, and so on. Similarly, if you stop any of the running instances of your application, then you are removing and freeing up the respective processing capacity. + +![](/41/images/streams-elastic-scaling-1.png) + +Before adding capacity: only a single instance of your Kafka Streams application is running. At this point the corresponding Kafka consumer group of your application contains only a single member (this instance). All data is being read and processed by this single instance. + +![](/41/images/streams-elastic-scaling-2.png) + +After adding capacity: now two additional instances of your Kafka Streams application are running, and they have automatically joined the application's Kafka consumer group for a total of three current members. These three instances are automatically splitting the processing work between each other. The splitting is based on the Kafka topic partitions from which data is being read. + +# Removing capacity from your application + +To remove processing capacity, you can stop running stream processing application instances (e.g., shut down two of the four instances), it will automatically leave the application’s consumer group, and the remaining instances of your application will automatically take over the processing work. The remaining instances take over the stream tasks that were run by the stopped instances. Moving stream tasks from one instance to another results in moving the processing work plus any internal state of these stream tasks. The state of a stream task is recreated in the target instance from its changelog topic. + +![](/41/images/streams-elastic-scaling-3.png) + +# State restoration during workload rebalance + +When a task is migrated, the task processing state is fully restored before the application instance resumes processing. This guarantees the correct processing results. In Kafka Streams, state restoration is usually done by replaying the corresponding changelog topic to reconstruct the state store. To minimize changelog-based restoration latency by using replicated local state stores, you can specify `num.standby.replicas`. When a stream task is initialized or re-initialized on the application instance, its state store is restored like this: + + * If no local state store exists, the changelog is replayed from the earliest to the current offset. This reconstructs the local state store to the most recent snapshot. + * If a local state store exists, the changelog is replayed from the previously checkpointed offset. The changes are applied and the state is restored to the most recent snapshot. This method takes less time because it is applying a smaller portion of the changelog. + + + +For more information, see [Standby Replicas](config-streams.html#num-standby-replicas). + +As of version 2.6, Streams will now do most of a task's restoration in the background through warmup replicas. These will be assigned to instances that need to restore a lot of state for a task. A stateful active task will only be assigned to an instance once its state is within the configured [`acceptable.recovery.lag`](config-streams.html#acceptable-recovery-lag), if one exists. This means that most of the time, a task migration will **not** result in downtime for that task. It will remain active on the instance that's already caught up, while the instance that it's being migrated to works on restoring the state. Streams will [regularly probe](config-streams.html#probing-rebalance-interval-ms) for warmup tasks that have finished restoring and transition them to active tasks when ready. + +Note, the one exception to this task availability is if none of the instances have a caught up version of that task. In that case, we have no choice but to assign the active task to an instance that is not caught up and will have to block further processing on restoration of the task's state from the changelog. If high availability is important for your application, you are highly recommended to enable standbys. + +# Determining how many application instances to run + +The parallelism of a Kafka Streams application is primarily determined by how many partitions the input topics have. For example, if your application reads from a single topic that has ten partitions, then you can run up to ten instances of your applications. You can run further instances, but these will be idle. + +The number of topic partitions is the upper limit for the parallelism of your Kafka Streams application and for the number of running instances of your application. + +To achieve balanced workload processing across application instances and to prevent processing hotpots, you should distribute data and processing workloads: + + * Data should be equally distributed across topic partitions. For example, if two topic partitions each have 1 million messages, this is better than a single partition with 2 million messages and none in the other. + * Processing workload should be equally distributed across topic partitions. For example, if the time to process messages varies widely, then it is better to spread the processing-intensive messages across partitions rather than storing these messages within the same partition. + + + +[Previous](/41/streams/developer-guide/memory-mgmt) [Next](/41/streams/developer-guide/manage-topics) + + * [Documentation](/documentation) + * [Kafka Streams](/streams) + * [Developer Guide](/streams/developer-guide/) + + diff --git a/content/en/41/streams/developer-guide/security.md b/content/en/41/streams/developer-guide/security.md new file mode 100644 index 000000000..afb3962ce --- /dev/null +++ b/content/en/41/streams/developer-guide/security.md @@ -0,0 +1,103 @@ +--- +title: Streams Security +description: +weight: 12 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Streams Security + +**Table of Contents** + + * Required ACL setting for secure Kafka clusters + * Security example + + + +Kafka Streams natively integrates with the [Kafka's security features](../../../documentation.html#security) and supports all of the client-side security features in Kafka. Streams leverages the [Java Producer and Consumer API](../../../documentation.html#api). + +To secure your Stream processing applications, configure the security settings in the corresponding Kafka producer and consumer clients, and then specify the corresponding configuration settings in your Kafka Streams application. + +Kafka supports cluster encryption and authentication, including a mix of authenticated and unauthenticated, and encrypted and non-encrypted clients. Using security is optional. + +Here a few relevant client-side security features: + +Encrypt data-in-transit between your applications and Kafka brokers + You can enable the encryption of the client-server communication between your applications and the Kafka brokers. For example, you can configure your applications to always use encryption when reading and writing data to and from Kafka. This is critical when reading and writing data across security domains such as internal network, public internet, and partner networks. +Client authentication + You can enable client authentication for connections from your application to Kafka brokers. For example, you can define that only specific applications are allowed to connect to your Kafka cluster. +Client authorization + You can enable client authorization of read and write operations by your applications. For example, you can define that only specific applications are allowed to read from a Kafka topic. You can also restrict write access to Kafka topics to prevent data pollution or fraudulent activities. + +For more information about the security features in Apache Kafka, see [Kafka Security](../../../documentation.html#security). + +# Required ACL setting for secure Kafka clusters + +Kafka clusters can use ACLs to control access to resources (like the ability to create topics), and for such clusters each client, including Kafka Streams, is required to authenticate as a particular user in order to be authorized with appropriate access. In particular, when Streams applications are run against a secured Kafka cluster, the principal running the application must have the ACL set so that the application has the permissions to create, read and write [internal topics](manage-topics.html#streams-developer-guide-topics-internal). + +To avoid providing this permission to your application, you can create the required internal topics manually. If the internal topics exist, Kafka Streams will not try to recreate them. Note, that the internal repartition and changelog topics must be created with the correct number of partitions--otherwise, Kafka Streams will fail on startup. The topics must be created with the same number of partitions as your input topic, or if there are multiple topics, the maximum number of partitions across all input topics. Additionally, changelog topics must be created with log compaction enabled--otherwise, your application might lose data. For changelog topics for windowed KTables, apply "delete,compact" and set the retention time based on the corresponding store retention time. To avoid premature deletion, add a delta to the store retention time. By default, Kafka Streams adds 24 hours to the store retention time. You can find out more about the names of the required internal topics via `Topology#describe()`. All internal topics follow the naming pattern `--` where the `suffix` is either `repartition` or `changelog`. Note, that there is no guarantee about this naming pattern in future releases--it's not part of the public API. + +Since all internal topics as well as the embedded consumer group name are prefixed with the [application id](/41/streams/developer-guide/config-streams.html#required-configuration-parameters), it is recommended to use ACLs on prefixed resource pattern to configure control lists to allow client to manage all topics and consumer groups started with this prefix as `--resource-pattern-type prefixed --topic your.application.id --operation All ` (see [KIP-277](https://cwiki.apache.org/confluence/x/zlOHB) and [KIP-290](https://cwiki.apache.org/confluence/x/QpvLB) for details). + +# Security example + +The purpose is to configure a Kafka Streams application to enable client authentication and encrypt data-in-transit when communicating with its Kafka cluster. + +This example assumes that the Kafka brokers in the cluster already have their security setup and that the necessary SSL certificates are available to the application in the local filesystem locations. For example, if you are using Docker then you must also include these SSL certificates in the correct locations within the Docker image. + +The snippet below shows the settings to enable client authentication and SSL encryption for data-in-transit between your Kafka Streams application and the Kafka cluster it is reading and writing from: + + + # Essential security settings to enable client authentication and SSL encryption + bootstrap.servers=kafka.example.com:9093 + security.protocol=SSL + ssl.truststore.location=/etc/security/tls/kafka.client.truststore.jks + ssl.truststore.password=test1234 + ssl.keystore.location=/etc/security/tls/kafka.client.keystore.jks + ssl.keystore.password=test1234 + ssl.key.password=test1234 + +Configure these settings in the application for your `Properties` instance. These settings will encrypt any data-in-transit that is being read from or written to Kafka, and your application will authenticate itself against the Kafka brokers that it is communicating with. Note that this example does not cover client authorization. + + + // Code of your Java application that uses the Kafka Streams library + Properties settings = new Properties(); + settings.put(StreamsConfig.APPLICATION_ID_CONFIG, "secure-kafka-streams-app"); + // Where to find secure Kafka brokers. Here, it's on port 9093. + settings.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka.example.com:9093"); + // + // ...further non-security related settings may follow here... + // + // Security settings. + // 1. These settings must match the security settings of the secure Kafka cluster. + // 2. The SSL trust store and key store files must be locally accessible to the application. + settings.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL"); + settings.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, "/etc/security/tls/kafka.client.truststore.jks"); + settings.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "test1234"); + settings.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, "/etc/security/tls/kafka.client.keystore.jks"); + settings.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, "test1234"); + settings.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, "test1234"); + +If you incorrectly configure a security setting in your application, it will fail at runtime, typically right after you start it. For example, if you enter an incorrect password for the `ssl.keystore.password` setting, an error message similar to this would be logged and then the application would terminate: + + + # Misconfigured ssl.keystore.password + Exception in thread "main" org.apache.kafka.common.KafkaException: Failed to construct kafka producer + [...snip...] + Caused by: org.apache.kafka.common.KafkaException: org.apache.kafka.common.KafkaException: + java.io.IOException: Keystore was tampered with, or password was incorrect + [...snip...] + Caused by: java.security.UnrecoverableKeyException: Password verification failed + +Monitor your Kafka Streams application log files for such error messages to spot any misconfigured applications quickly. + +[Previous](/41/streams/developer-guide/manage-topics) [Next](/41/streams/developer-guide/app-reset-tool) + + * [Documentation](/documentation) + * [Kafka Streams](/streams) + * [Developer Guide](/streams/developer-guide/) + + diff --git a/content/en/41/streams/developer-guide/testing.md b/content/en/41/streams/developer-guide/testing.md new file mode 100644 index 000000000..1ce4a5cdc --- /dev/null +++ b/content/en/41/streams/developer-guide/testing.md @@ -0,0 +1,318 @@ +--- +title: Testing a Streams Application +description: +weight: 7 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Testing Kafka Streams + +**Table of Contents** + + * Importing the test utilities + * Testing Streams applications + * Unit testing Processors + + + +# Importing the test utilities + +To test a Kafka Streams application, Kafka provides a test-utils artifact that can be added as regular dependency to your test code base. Example `pom.xml` snippet when using Maven: + + + + org.apache.kafka + kafka-streams-test-utils + 4.1.0 + test + + +# Testing a Streams application + +The test-utils package provides a `TopologyTestDriver` that can be used pipe data through a `Topology` that is either assembled manually using Processor API or via the DSL using `StreamsBuilder`. The test driver simulates the library runtime that continuously fetches records from input topics and processes them by traversing the topology. You can use the test driver to verify that your specified processor topology computes the correct result with the manually piped in data records. The test driver captures the results records and allows to query its embedded state stores. + + + // Processor API + Topology topology = new Topology(); + topology.addSource("sourceProcessor", "input-topic"); + topology.addProcessor("processor", ..., "sourceProcessor"); + topology.addSink("sinkProcessor", "output-topic", "processor"); + // or + // using DSL + StreamsBuilder builder = new StreamsBuilder(); + builder.stream("input-topic").filter(...).to("output-topic"); + Topology topology = builder.build(); + + // create test driver + TopologyTestDriver testDriver = new TopologyTestDriver(topology); + +With the test driver you can create `TestInputTopic` giving topic name and the corresponding serializers. `TestInputTopic` provides various methods to pipe new message values, keys and values, or list of KeyValue objects. + + + TestInputTopic inputTopic = testDriver.createInputTopic("input-topic", stringSerde.serializer(), longSerde.serializer()); + inputTopic.pipeInput("key", 42L); + +To verify the output, you can use `TestOutputTopic` where you configure the topic and the corresponding deserializers during initialization. It offers helper methods to read only certain parts of the result records or the collection of records. For example, you can validate returned `KeyValue` with standard assertions if you only care about the key and value, but not the timestamp of the result record. + + + TestOutputTopic outputTopic = testDriver.createOutputTopic("output-topic", stringSerde.deserializer(), longSerde.deserializer()); + assertThat(outputTopic.readKeyValue(), equalTo(new KeyValue<>("key", 42L))); + +`TopologyTestDriver` supports punctuations, too. Event-time punctuations are triggered automatically based on the processed records' timestamps. Wall-clock-time punctuations can also be triggered by advancing the test driver's wall-clock-time (the driver mocks wall-clock-time internally to give users control over it). + + + testDriver.advanceWallClockTime(Duration.ofSeconds(20)); + +Additionally, you can access state stores via the test driver before or after a test. Accessing stores before a test is useful to pre-populate a store with some initial values. After data was processed, expected updates to the store can be verified. + + + KeyValueStore store = testDriver.getKeyValueStore("store-name"); + +Note, that you should always close the test driver at the end to make sure all resources are release properly. + + + testDriver.close(); + +# Example + +The following example demonstrates how to use the test driver and helper classes. The example creates a topology that computes the maximum value per key using a key-value-store. While processing, no output is generated, but only the store is updated. Output is only sent downstream based on event-time and wall-clock punctuations. + + + private TopologyTestDriver testDriver; + private TestInputTopic inputTopic; + private TestOutputTopic outputTopic; + private KeyValueStore store; + + private Serde stringSerde = new Serdes.StringSerde(); + private Serde longSerde = new Serdes.LongSerde(); + + @Before + public void setup() { + Topology topology = new Topology(); + topology.addSource("sourceProcessor", "input-topic"); + topology.addProcessor("aggregator", new CustomMaxAggregatorSupplier(), "sourceProcessor"); + topology.addStateStore( + Stores.keyValueStoreBuilder( + Stores.inMemoryKeyValueStore("aggStore"), + Serdes.String(), + Serdes.Long()).withLoggingDisabled(), // need to disable logging to allow store pre-populating + "aggregator"); + topology.addSink("sinkProcessor", "result-topic", "aggregator"); + + // setup test driver + Properties props = new Properties(); + props.setProperty(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); + props.setProperty(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.Long().getClass().getName()); + testDriver = new TopologyTestDriver(topology, props); + + // setup test topics + inputTopic = testDriver.createInputTopic("input-topic", stringSerde.serializer(), longSerde.serializer()); + outputTopic = testDriver.createOutputTopic("result-topic", stringSerde.deserializer(), longSerde.deserializer()); + + // pre-populate store + store = testDriver.getKeyValueStore("aggStore"); + store.put("a", 21L); + } + + @After + public void tearDown() { + testDriver.close(); + } + + @Test + public void shouldFlushStoreForFirstInput() { + inputTopic.pipeInput("a", 1L); + assertThat(outputTopic.readKeyValue(), equalTo(new KeyValue<>("a", 21L))); + assertThat(outputTopic.isEmpty(), is(true)); + } + + @Test + public void shouldNotUpdateStoreForSmallerValue() { + inputTopic.pipeInput("a", 1L); + assertThat(store.get("a"), equalTo(21L)); + assertThat(outputTopic.readKeyValue(), equalTo(new KeyValue<>("a", 21L))); + assertThat(outputTopic.isEmpty(), is(true)); + } + + @Test + public void shouldNotUpdateStoreForLargerValue() { + inputTopic.pipeInput("a", 42L); + assertThat(store.get("a"), equalTo(42L)); + assertThat(outputTopic.readKeyValue(), equalTo(new KeyValue<>("a", 42L))); + assertThat(outputTopic.isEmpty(), is(true)); + } + + @Test + public void shouldUpdateStoreForNewKey() { + inputTopic.pipeInput("b", 21L); + assertThat(store.get("b"), equalTo(21L)); + assertThat(outputTopic.readKeyValue(), equalTo(new KeyValue<>("a", 21L))); + assertThat(outputTopic.readKeyValue(), equalTo(new KeyValue<>("b", 21L))); + assertThat(outputTopic.isEmpty(), is(true)); + } + + @Test + public void shouldPunctuateIfEvenTimeAdvances() { + final Instant recordTime = Instant.now(); + inputTopic.pipeInput("a", 1L, recordTime); + assertThat(outputTopic.readKeyValue(), equalTo(new KeyValue<>("a", 21L))); + + inputTopic.pipeInput("a", 1L, recordTime); + assertThat(outputTopic.isEmpty(), is(true)); + + inputTopic.pipeInput("a", 1L, recordTime.plusSeconds(10L)); + assertThat(outputTopic.readKeyValue(), equalTo(new KeyValue<>("a", 21L))); + assertThat(outputTopic.isEmpty(), is(true)); + } + + @Test + public void shouldPunctuateIfWallClockTimeAdvances() { + testDriver.advanceWallClockTime(Duration.ofSeconds(60)); + assertThat(outputTopic.readKeyValue(), equalTo(new KeyValue<>("a", 21L))); + assertThat(outputTopic.isEmpty(), is(true)); + } + + public class CustomMaxAggregatorSupplier implements ProcessorSupplier { + @Override + public Processor get() { + return new CustomMaxAggregator(); + } + } + + public class CustomMaxAggregator implements Processor { + ProcessorContext context; + private KeyValueStore store; + + @SuppressWarnings("unchecked") + @Override + public void init(ProcessorContext context) { + this.context = context; + context.schedule(Duration.ofSeconds(60), PunctuationType.WALL_CLOCK_TIME, time -> flushStore()); + context.schedule(Duration.ofSeconds(10), PunctuationType.STREAM_TIME, time -> flushStore()); + store = (KeyValueStore) context.getStateStore("aggStore"); + } + + @Override + public void process(String key, Long value) { + Long oldValue = store.get(key); + if (oldValue == null || value > oldValue) { + store.put(key, value); + } + } + + private void flushStore() { + KeyValueIterator it = store.all(); + while (it.hasNext()) { + KeyValue next = it.next(); + context.forward(next.key, next.value); + } + } + + @Override + public void close() {} + } + +# Unit Testing Processors + +If you [write a Processor](processor-api.html), you will want to test it. + +Because the `Processor` forwards its results to the context rather than returning them, Unit testing requires a mocked context capable of capturing forwarded data for inspection. For this reason, we provide a `MockProcessorContext` in `test-utils`. + +**Construction** + +To begin with, instantiate your processor and initialize it with the mock context: + + + final Processor processorUnderTest = ...; + final MockProcessorContext context = new MockProcessorContext<>(); + processorUnderTest.init(context); + +If you need to pass configuration to your processor or set the default serdes, you can create the mock with config: + + + final Properties props = new Properties(); + props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); + props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.Long().getClass()); + props.put("some.other.config", "some config value"); + final MockProcessorContext context = new MockProcessorContext<>(props); + +**Captured data** + +The mock will capture any values that your processor forwards. You can make assertions on them: + + + processorUnderTest.process("key", "value"); + + final Iterator> forwarded = context.forwarded().iterator(); + assertEquals(forwarded.next().record(), new Record<>(..., ...)); + assertFalse(forwarded.hasNext()); + + // you can reset forwards to clear the captured data. This may be helpful in constructing longer scenarios. + context.resetForwards(); + + assertEquals(context.forwarded().size(), 0); + +If your processor forwards to specific child processors, you can query the context for captured data by child name: + + + final List> captures = context.forwarded("childProcessorName"); + +The mock also captures whether your processor has called `commit()` on the context: + + + assertTrue(context.committed()); + + // commit captures can also be reset. + context.resetCommit(); + + assertFalse(context.committed()); + +**Setting record metadata** + +In case your processor logic depends on the record metadata (topic, partition, offset), you can set them on the context: + + + context.setRecordMetadata("topicName", /*partition*/ 0, /*offset*/ 0L); + +Once these are set, the context will continue returning the same values, until you set new ones. + +**State stores** + +In case your punctuator is stateful, the mock context allows you to register state stores. You're encouraged to use a simple in-memory store of the appropriate type (KeyValue, Windowed, or Session), since the mock context does _not_ manage changelogs, state directories, etc. + + + final KeyValueStore store = + Stores.keyValueStoreBuilder( + Stores.inMemoryKeyValueStore("myStore"), + Serdes.String(), + Serdes.Integer() + ) + .withLoggingDisabled() // Changelog is not supported by MockProcessorContext. + .build(); + store.init(context, store); + context.register(store, /*deprecated parameter*/ false, /*parameter unused in mock*/ null); + +**Verifying punctuators** + +Processors can schedule punctuators to handle periodic tasks. The mock context does _not_ automatically execute punctuators, but it does capture them to allow you to unit test them as well: + + + final MockProcessorContext.CapturedPunctuator capturedPunctuator = context.scheduledPunctuators().get(0); + final long interval = capturedPunctuator.getIntervalMs(); + final PunctuationType type = capturedPunctuator.getType(); + final boolean cancelled = capturedPunctuator.cancelled(); + final Punctuator punctuator = capturedPunctuator.getPunctuator(); + punctuator.punctuate(/*timestamp*/ 0L); + +If you need to write tests involving automatic firing of scheduled punctuators, we recommend creating a simple topology with your processor and using the [`TopologyTestDriver`](testing.html#testing-topologytestdriver). + +[Previous](/41/streams/developer-guide/datatypes) [Next](/41/streams/developer-guide/interactive-queries) + + * [Documentation](/documentation) + * [Kafka Streams](/streams) + * [Developer Guide](/streams/developer-guide/) + + diff --git a/content/en/41/streams/developer-guide/write-streams-app.md b/content/en/41/streams/developer-guide/write-streams-app.md new file mode 100644 index 000000000..9432bbf47 --- /dev/null +++ b/content/en/41/streams/developer-guide/write-streams-app.md @@ -0,0 +1,145 @@ +--- +title: Writing a Streams Application +description: +weight: 1 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Writing a Streams Application + +**Table of Contents** + + * Libraries and Maven artifacts + * Using Kafka Streams within your application code + * Testing a Streams application + + + +Any Java or Scala application that makes use of the Kafka Streams library is considered a Kafka Streams application. The computational logic of a Kafka Streams application is defined as a [processor topology](../core-concepts#streams_topology), which is a graph of stream processors (nodes) and streams (edges). + +You can define the processor topology with the Kafka Streams APIs: + +[Kafka Streams DSL](dsl-api.html#streams-developer-guide-dsl) + A high-level API that provides the most common data transformation operations such as `map`, `filter`, `join`, and `aggregations` out of the box. The DSL is the recommended starting point for developers new to Kafka Streams, and should cover many use cases and stream processing needs. If you're writing a Scala application then you can use the [Kafka Streams DSL for Scala](dsl-api.html#scala-dsl) library which removes much of the Java/Scala interoperability boilerplate as opposed to working directly with the Java DSL. +[Processor API](processor-api.html#streams-developer-guide-processor-api) + A low-level API that lets you add and connect processors as well as interact directly with state stores. The Processor API provides you with even more flexibility than the DSL but at the expense of requiring more manual work on the side of the application developer (e.g., more lines of code). + +# Libraries and Maven artifacts + +This section lists the Kafka Streams related libraries that are available for writing your Kafka Streams applications. + +You can define dependencies on the following libraries for your Kafka Streams applications. + +Group ID | Artifact ID | Version | Description +---|---|---|--- +`org.apache.kafka` | `kafka-streams` | `4.1.0` | (Required) Base library for Kafka Streams. +`org.apache.kafka` | `kafka-clients` | `4.1.0` | (Required) Kafka client library. Contains built-in serializers/deserializers. +`org.apache.kafka` | `kafka-streams-scala` | `4.1.0` | (Optional) Kafka Streams DSL for Scala library to write Scala Kafka Streams applications. When not using SBT you will need to suffix the artifact ID with the correct version of Scala your application is using (`_2.12`, `_2.13`) + +**Tip** + +See the section [Data Types and Serialization](datatypes.html#streams-developer-guide-serdes) for more information about Serializers/Deserializers. + +Example `pom.xml` snippet when using Maven: + + + + org.apache.kafka + kafka-streams + 4.1.0 + + + org.apache.kafka + kafka-clients + 4.1.0 + + + org.apache.kafka + kafka-streams-scala_2.13 + 4.1.0 + + +# Using Kafka Streams within your application code + +You can call Kafka Streams from anywhere in your application code, but usually these calls are made within the `main()` method of your application, or some variant thereof. The basic elements of defining a processing topology within your application are described below. + +First, you must create an instance of `KafkaStreams`. + + * The first argument of the `KafkaStreams` constructor takes a topology (either `StreamsBuilder#build()` for the [DSL](dsl-api.html#streams-developer-guide-dsl) or `Topology` for the [Processor API](processor-api.html#streams-developer-guide-processor-api)) that is used to define a topology. + * The second argument is an instance of `java.util.Properties`, which defines the configuration for this specific topology. + + + +Code example: + + + import org.apache.kafka.streams.KafkaStreams; + import org.apache.kafka.streams.kstream.StreamsBuilder; + import org.apache.kafka.streams.processor.Topology; + + // Use the builders to define the actual processing topology, e.g. to specify + // from which input topics to read, which stream operations (filter, map, etc.) + // should be called, and so on. We will cover this in detail in the subsequent + // sections of this Developer Guide. + + StreamsBuilder builder = ...; // when using the DSL + Topology topology = builder.build(); + // + // OR + // + Topology topology = ...; // when using the Processor API + + // Use the configuration to tell your application where the Kafka cluster is, + // which Serializers/Deserializers to use by default, to specify security settings, + // and so on. + Properties props = ...; + + KafkaStreams streams = new KafkaStreams(topology, props); + +At this point, internal structures are initialized, but the processing is not started yet. You have to explicitly start the Kafka Streams thread by calling the `KafkaStreams#start()` method: + + + // Start the Kafka Streams threads + streams.start(); + +If there are other instances of this stream processing application running elsewhere (e.g., on another machine), Kafka Streams transparently re-assigns tasks from the existing instances to the new instance that you just started. For more information, see [Stream Partitions and Tasks](../architecture.html#streams_architecture_tasks) and [Threading Model](../architecture.html#streams_architecture_threads). + +To catch any unexpected exceptions, you can set an `java.lang.Thread.UncaughtExceptionHandler` before you start the application. This handler is called whenever a stream thread is terminated by an unexpected exception: + + + streams.setUncaughtExceptionHandler((Thread thread, Throwable throwable) -> { + // here you should examine the throwable/exception and perform an appropriate action! + }); + + +To stop the application instance, call the `KafkaStreams#close()` method: + + + // Stop the Kafka Streams threads + streams.close(); + +To allow your application to gracefully shutdown in response to SIGTERM, it is recommended that you add a shutdown hook and call `KafkaStreams#close`. + +Here is a shutdown hook example in Java: + + + // Add shutdown hook to stop the Kafka Streams threads. + // You can optionally provide a timeout to `close`. + Runtime.getRuntime().addShutdownHook(new Thread(streams::close)); + +After an application is stopped, Kafka Streams will migrate any tasks that had been running in this instance to available remaining instances. + +# Testing a Streams application + +Kafka Streams comes with a `test-utils` module to help you test your application [here](testing.html). + +[Previous](/41/streams/developer-guide/) [Next](/41/streams/developer-guide/config-streams) + + * [Documentation](/documentation) + * [Kafka Streams](/streams) + * [Developer Guide](/streams/developer-guide/) + + diff --git a/content/en/41/streams/introduction.md b/content/en/41/streams/introduction.md new file mode 100644 index 000000000..eb3b7e928 --- /dev/null +++ b/content/en/41/streams/introduction.md @@ -0,0 +1,161 @@ +--- +title: Introduction +description: +weight: 1 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Kafka Streams + +[Introduction](/41/streams/) [Run Demo App](/41/streams/quickstart) [Tutorial: Write App](/41/streams/tutorial) [Concepts](/41/streams/core-concepts) [Architecture](/41/streams/architecture) [Developer Guide](/41/streams/developer-guide/) [Javadoc](/41/javadoc/index.html?org/apache/kafka/streams/KafkaStreams.html) [Upgrade](/41/streams/upgrade-guide) + +# The easiest way to write mission-critical real-time applications and microservices + +Kafka Streams is a client library for building applications and microservices, where the input and output data are stored in Kafka clusters. It combines the simplicity of writing and deploying standard Java and Scala applications on the client side with the benefits of Kafka's server-side cluster technology. + +![](/41/images/intro_to_streams-iframe-placeholder.png) (Clicking the image will load a video from YouTube) ![](/41/images/creating-streams-iframe-placeholder.png) (Clicking the image will load a video from YouTube) ![](/41/images/transforming_part_1-iframe-placeholder.png) (Clicking the image will load a video from YouTube) ![](/41/images/transforming_part_2-iframe-placeholder.png) (Clicking the image will load a video from YouTube) + +# TOUR OF THE STREAMS API + +1Intro to Streams + +2Creating a Streams Application + +3Transforming Data Pt. 1 + +4Transforming Data Pt. 2 + +* * * + +# Why you'll love using Kafka Streams! + + * Elastic, highly scalable, fault-tolerant + * Deploy to containers, VMs, bare metal, cloud + * Equally viable for small, medium, & large use cases + * Fully integrated with Kafka security + * Write standard Java and Scala applications + * Exactly-once processing semantics + * No separate processing cluster required + * Develop on Mac, Linux, Windows + + + +[Write your first app](/41/streams/tutorial) + +* * * + +# Kafka Streams use cases + +[ ](https://open.nytimes.com/publishing-with-apache-kafka-at-the-new-york-times-7f0e3b7d2077) + +[The New York Times uses Apache Kafka ](https://open.nytimes.com/publishing-with-apache-kafka-at-the-new-york-times-7f0e3b7d2077)and the Kafka Streams to store and distribute, in real-time, published content to the various applications and systems that make it available to the readers. + +[ ](https://www.confluent.io/blog/ranking-websites-real-time-apache-kafkas-streams-api/) + +As the leading online fashion retailer in Europe, Zalando uses Kafka as an ESB (Enterprise Service Bus), which helps us in transitioning from a monolithic to a micro services architecture. Using Kafka for processing [ event streams](https://www.confluent.io/blog/ranking-websites-real-time-apache-kafkas-streams-api/) enables our technical team to do near-real time business intelligence. + +[ ](https://engineering.linecorp.com/en/blog/detail/80) + +[LINE uses Apache Kafka](https://engineering.linecorp.com/en/blog/detail/80) as a central datahub for our services to communicate to one another. Hundreds of billions of messages are produced daily and are used to execute various business logic, threat detection, search indexing and data analysis. LINE leverages Kafka Streams to reliably transform and filter topics enabling sub topics consumers can efficiently consume, meanwhile retaining easy maintainability thanks to its sophisticated yet minimal code base. + +[ ](https://medium.com/@Pinterest_Engineering/using-kafka-streams-api-for-predictive-budgeting-9f58d206c996) + +[Pinterest uses Apache Kafka and the Kafka Streams](https://medium.com/@Pinterest_Engineering/using-kafka-streams-api-for-predictive-budgeting-9f58d206c996) at large scale to power the real-time, predictive budgeting system of their advertising infrastructure. With Kafka Streams, spend predictions are more accurate than ever. + +[ ](https://www.confluent.io/blog/real-time-financial-alerts-rabobank-apache-kafkas-streams-api/) + +Rabobank is one of the 3 largest banks in the Netherlands. Its digital nervous system, the Business Event Bus, is powered by Apache Kafka. It is used by an increasing amount of financial processes and services, one of which is Rabo Alerts. This service alerts customers in real-time upon financial events and is [built using Kafka Streams.](https://www.confluent.io/blog/real-time-financial-alerts-rabobank-apache-kafkas-streams-api/) + +[ ](https://speakerdeck.com/xenji/kafka-and-debezium-at-trivago-code-dot-talks-2017-edition) + +Trivago is a global hotel search platform. We are focused on reshaping the way travelers search for and compare hotels, while enabling hotel advertisers to grow their businesses by providing access to a broad audience of travelers via our websites and apps. As of 2017, we offer access to approximately 1.8 million hotels and other accommodations in over 190 countries. We use Kafka, Kafka Connect, and Kafka Streams to [enable our developers](https://speakerdeck.com/xenji/kafka-and-debezium-at-trivago-code-dot-talks-2017-edition) to access data freely in the company. Kafka Streams powers parts of our analytics pipeline and delivers endless options to explore and operate on the data sources we have at hand. + +# Hello Kafka Streams + +The code example below implements a WordCount application that is elastic, highly scalable, fault-tolerant, stateful, and ready to run in production at large scale + +Java Scala + + + import org.apache.kafka.common.serialization.Serdes; + import org.apache.kafka.common.utils.Bytes; + import org.apache.kafka.streams.KafkaStreams; + import org.apache.kafka.streams.StreamsBuilder; + import org.apache.kafka.streams.StreamsConfig; + import org.apache.kafka.streams.kstream.KStream; + import org.apache.kafka.streams.kstream.KTable; + import org.apache.kafka.streams.kstream.Materialized; + import org.apache.kafka.streams.kstream.Produced; + import org.apache.kafka.streams.state.KeyValueStore; + + import java.util.Arrays; + import java.util.Properties; + + public class WordCountApplication { + + public static void main(final String[] args) throws Exception { + Properties props = new Properties(); + props.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-application"); + props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka-broker1:9092"); + props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); + props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); + + StreamsBuilder builder = new StreamsBuilder(); + KStream textLines = builder.stream("TextLinesTopic"); + KTable wordCounts = textLines + .flatMapValues(textLine -> Arrays.asList(textLine.toLowerCase().split("\W+"))) + .groupBy((key, word) -> word) + .count(Materialized.>as("counts-store")); + wordCounts.toStream().to("WordsWithCountsTopic", Produced.with(Serdes.String(), Serdes.Long())); + + KafkaStreams streams = new KafkaStreams(builder.build(), props); + streams.start(); + } + + } + + + import java.util.Properties + import java.util.concurrent.TimeUnit + + import org.apache.kafka.streams.kstream.Materialized + import org.apache.kafka.streams.scala.ImplicitConversions._ + import org.apache.kafka.streams.scala._ + import org.apache.kafka.streams.scala.kstream._ + import org.apache.kafka.streams.{KafkaStreams, StreamsConfig} + + object WordCountApplication extends App { + import Serdes._ + + val props: Properties = { + val p = new Properties() + p.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-application") + p.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka-broker1:9092") + p + } + + val builder: StreamsBuilder = new StreamsBuilder + val textLines: KStream[String, String] = builder.stream[String, String]("TextLinesTopic") + val wordCounts: KTable[String, Long] = textLines + .flatMapValues(textLine => textLine.toLowerCase.split("\W+")) + .groupBy((_, word) => word) + .count()(Materialized.as("counts-store")) + wordCounts.toStream.to("WordsWithCountsTopic") + + val streams: KafkaStreams = new KafkaStreams(builder.build(), props) + streams.start() + + sys.ShutdownHookThread { + streams.close(10, TimeUnit.SECONDS) + } + } + +[Previous](/41/documentation) [Next](/41/streams/quickstart) + + * [Documentation](/documentation) + * [Kafka Streams](/streams) + + diff --git a/content/en/41/streams/quickstart.md b/content/en/41/streams/quickstart.md new file mode 100644 index 000000000..5bcb534af --- /dev/null +++ b/content/en/41/streams/quickstart.md @@ -0,0 +1,241 @@ +--- +title: Quick Start +description: +weight: 2 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Run Kafka Streams Demo Application + +[Introduction](/41/streams/) [Run Demo App](/41/streams/quickstart) [Tutorial: Write App](/41/streams/tutorial) [Concepts](/41/streams/core-concepts) [Architecture](/41/streams/architecture) [Developer Guide](/41/streams/developer-guide/) [Upgrade](/41/streams/upgrade-guide) + +This tutorial assumes you are starting fresh and have no existing Kafka data. However, if you have already started Kafka, feel free to skip the first two steps. + +Kafka Streams is a client library for building mission-critical real-time applications and microservices, where the input and/or output data is stored in Kafka clusters. Kafka Streams combines the simplicity of writing and deploying standard Java and Scala applications on the client side with the benefits of Kafka's server-side cluster technology to make these applications highly scalable, elastic, fault-tolerant, distributed, and much more. + +This quickstart example will demonstrate how to run a streaming application coded in this library. Here is the gist of the `[WordCountDemo](https://github.com/apache/kafka/blob/4.1/streams/examples/src/main/java/org/apache/kafka/streams/examples/wordcount/WordCountDemo.java)` example code. + + + // Serializers/deserializers (serde) for String and Long types + final Serde stringSerde = Serdes.String(); + final Serde longSerde = Serdes.Long(); + + // Construct a `KStream` from the input topic "streams-plaintext-input", where message values + // represent lines of text (for the sake of this example, we ignore whatever may be stored + // in the message keys). + KStream textLines = builder.stream( + "streams-plaintext-input", + Consumed.with(stringSerde, stringSerde) + ); + + KTable wordCounts = textLines + // Split each text line, by whitespace, into words. + .flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\W+"))) + + // Group the text words as message keys + .groupBy((key, value) -> value) + + // Count the occurrences of each word (message key). + .count(); + + // Store the running counts as a changelog stream to the output topic. + wordCounts.toStream().to("streams-wordcount-output", Produced.with(Serdes.String(), Serdes.Long())); + +It implements the WordCount algorithm, which computes a word occurrence histogram from the input text. However, unlike other WordCount examples you might have seen before that operate on bounded data, the WordCount demo application behaves slightly differently because it is designed to operate on an **infinite, unbounded stream** of data. Similar to the bounded variant, it is a stateful algorithm that tracks and updates the counts of words. However, since it must assume potentially unbounded input data, it will periodically output its current state and results while continuing to process more data because it cannot know when it has processed "all" the input data. + +As the first step, we will start Kafka (unless you already have it started) and then we will prepare input data to a Kafka topic, which will subsequently be processed by a Kafka Streams application. + +## Step 1: Download the code + +[Download](https://www.apache.org/dyn/closer.cgi?path=/kafka/4.1.0/kafka_2.13-4.1.0.tgz "Kafka downloads") the 4.1.0 release and un-tar it. Note that there are multiple downloadable Scala versions and we choose to use the recommended version (2.13) here: + + + $ tar -xzf kafka_2.13-4.1.0.tgz + $ cd kafka_2.13-4.1.0 + +## Step 2: Start the Kafka server + +Generate a Cluster UUID + + + $ KAFKA_CLUSTER_ID="$(bin/kafka-storage.sh random-uuid)" + +Format Log Directories + + + $ bin/kafka-storage.sh format --standalone -t $KAFKA_CLUSTER_ID -c config/server.properties + +Start the Kafka Server + + + $ bin/kafka-server-start.sh config/server.properties + +## Step 3: Prepare input topic and start Kafka producer + +Next, we create the input topic named **streams-plaintext-input** and the output topic named **streams-wordcount-output** : + + + $ bin/kafka-topics.sh --create \ + --bootstrap-server localhost:9092 \ + --replication-factor 1 \ + --partitions 1 \ + --topic streams-plaintext-input + Created topic "streams-plaintext-input". + +Note: we create the output topic with compaction enabled because the output stream is a changelog stream (cf. explanation of application output below). + + + $ bin/kafka-topics.sh --create \ + --bootstrap-server localhost:9092 \ + --replication-factor 1 \ + --partitions 1 \ + --topic streams-wordcount-output \ + --config cleanup.policy=compact + Created topic "streams-wordcount-output". + +The created topic can be described with the same **kafka-topics** tool: + + + $ bin/kafka-topics.sh --bootstrap-server localhost:9092 --describe + Topic:streams-wordcount-output PartitionCount:1 ReplicationFactor:1 Configs:cleanup.policy=compact,segment.bytes=1073741824 + Topic: streams-wordcount-output Partition: 0 Leader: 0 Replicas: 0 Isr: 0 + Topic:streams-plaintext-input PartitionCount:1 ReplicationFactor:1 Configs:segment.bytes=1073741824 + Topic: streams-plaintext-input Partition: 0 Leader: 0 Replicas: 0 Isr: 0 + +## Step 4: Start the Wordcount Application + +The following command starts the WordCount demo application: + + + $ bin/kafka-run-class.sh org.apache.kafka.streams.examples.wordcount.WordCountDemo + +The demo application will read from the input topic **streams-plaintext-input** , perform the computations of the WordCount algorithm on each of the read messages, and continuously write its current results to the output topic **streams-wordcount-output**. Hence there won't be any STDOUT output except log entries as the results are written back into in Kafka. + +Now we can start the console producer in a separate terminal to write some input data to this topic: + + + $ bin/kafka-console-producer.sh --bootstrap-server localhost:9092 --topic streams-plaintext-input + +and inspect the output of the WordCount demo application by reading from its output topic with the console consumer in a separate terminal: + + + $ bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 \ + --topic streams-wordcount-output \ + --from-beginning \ + --property print.key=true \ + --property print.value=true \ + --property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \ + --property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer + +## Step 5: Process some data + +Now let's write some message with the console producer into the input topic **streams-plaintext-input** by entering a single line of text and then hit . This will send a new message to the input topic, where the message key is null and the message value is the string encoded text line that you just entered (in practice, input data for applications will typically be streaming continuously into Kafka, rather than being manually entered as we do in this quickstart): + + + $ bin/kafka-console-producer.sh --bootstrap-server localhost:9092 --topic streams-plaintext-input + >all streams lead to kafka + +This message will be processed by the Wordcount application and the following output data will be written to the **streams-wordcount-output** topic and printed by the console consumer: + + + $ bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 \ + --topic streams-wordcount-output \ + --from-beginning \ + --property print.key=true \ + --property print.value=true \ + --property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \ + --property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer + + all 1 + streams 1 + lead 1 + to 1 + kafka 1 + +Here, the first column is the Kafka message key in `java.lang.String` format and represents a word that is being counted, and the second column is the message value in `java.lang.Long`format, representing the word's latest count. + +Now let's continue writing one more message with the console producer into the input topic **streams-plaintext-input**. Enter the text line "hello kafka streams" and hit . Your terminal should look as follows: + + + $ bin/kafka-console-producer.sh --bootstrap-server localhost:9092 --topic streams-plaintext-input + >all streams lead to kafka + >hello kafka streams + +In your other terminal in which the console consumer is running, you will observe that the WordCount application wrote new output data: + + + $ bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 \ + --topic streams-wordcount-output \ + --from-beginning \ + --property print.key=true \ + --property print.value=true \ + --property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \ + --property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer + + all 1 + streams 1 + lead 1 + to 1 + kafka 1 + hello 1 + kafka 2 + streams 2 + +Here the last printed lines **kafka 2** and **streams 2** indicate updates to the keys **kafka** and **streams** whose counts have been incremented from **1** to **2**. Whenever you write further input messages to the input topic, you will observe new messages being added to the **streams-wordcount-output** topic, representing the most recent word counts as computed by the WordCount application. Let's enter one final input text line "join kafka summit" and hit in the console producer to the input topic **streams-plaintext-input** before we wrap up this quickstart: + + + $ bin/kafka-console-producer.sh --bootstrap-server localhost:9092 --topic streams-plaintext-input + >all streams lead to kafka + >hello kafka streams + >join kafka summit + +The **streams-wordcount-output** topic will subsequently show the corresponding updated word counts (see last three lines): + + + $ bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 \ + --topic streams-wordcount-output \ + --from-beginning \ + --property print.key=true \ + --property print.value=true \ + --property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \ + --property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer + + all 1 + streams 1 + lead 1 + to 1 + kafka 1 + hello 1 + kafka 2 + streams 2 + join 1 + kafka 3 + summit 1 + +As one can see, outputs of the Wordcount application is actually a continuous stream of updates, where each output record (i.e. each line in the original output above) is an updated count of a single word, aka record key such as "kafka". For multiple records with the same key, each later record is an update of the previous one. + +The two diagrams below illustrate what is essentially happening behind the scenes. The first column shows the evolution of the current state of the `KTable` that is counting word occurrences for `count`. The second column shows the change records that result from state updates to the KTable and that are being sent to the output Kafka topic **streams-wordcount-output**. + +![](/41/images/streams-table-updates-02.png) ![](/41/images/streams-table-updates-01.png) + +First the text line "all streams lead to kafka" is being processed. The `KTable` is being built up as each new word results in a new table entry (highlighted with a green background), and a corresponding change record is sent to the downstream `KStream`. + +When the second text line "hello kafka streams" is processed, we observe, for the first time, that existing entries in the `KTable` are being updated (here: for the words "kafka" and for "streams"). And again, change records are being sent to the output topic. + +And so on (we skip the illustration of how the third line is being processed). This explains why the output topic has the contents we showed above, because it contains the full record of changes. + +Looking beyond the scope of this concrete example, what Kafka Streams is doing here is to leverage the duality between a table and a changelog stream (here: table = the KTable, changelog stream = the downstream KStream): you can publish every change of the table to a stream, and if you consume the entire changelog stream from beginning to end, you can reconstruct the contents of the table. + +## Step 6: Teardown the application + +You can now stop the console consumer, the console producer, the Wordcount application, the Kafka broker in order via **Ctrl-C**. + +[Previous](/41/streams) [Next](/41/streams/tutorial) + + * [Documentation](/documentation) + * [Kafka Streams](/streams) + + diff --git a/content/en/41/streams/tutorial.md b/content/en/41/streams/tutorial.md new file mode 100644 index 000000000..d3edb5917 --- /dev/null +++ b/content/en/41/streams/tutorial.md @@ -0,0 +1,450 @@ +--- +title: Write a streams app +description: +weight: 3 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Tutorial: Write a Kafka Streams Application + +[Introduction](/41/streams/) [Run Demo App](/41/streams/quickstart) [Tutorial: Write App](/41/streams/tutorial) [Concepts](/41/streams/core-concepts) [Architecture](/41/streams/architecture) [Developer Guide](/41/streams/developer-guide/) [Upgrade](/41/streams/upgrade-guide) + +In this guide we will start from scratch on setting up your own project to write a stream processing application using Kafka Streams. It is highly recommended to read the [quickstart](/41/streams/quickstart) first on how to run a Streams application written in Kafka Streams if you have not done so. + +## Setting up a Maven Project + +We are going to use a Kafka Streams Maven Archetype for creating a Streams project structure with the following commands: + + + $ mvn archetype:generate \ + -DarchetypeGroupId=org.apache.kafka \ + -DarchetypeArtifactId=streams-quickstart-java \ + -DarchetypeVersion=4.1.0 \ + -DgroupId=streams.examples \ + -DartifactId=streams-quickstart \ + -Dversion=0.1 \ + -Dpackage=myapps + +You can use a different value for `groupId`, `artifactId` and `package` parameters if you like. Assuming the above parameter values are used, this command will create a project structure that looks like this: + + + $ tree streams-quickstart + streams-quickstart + |-- pom.xml + |-- src + |-- main + |-- java + | |-- myapps + | |-- LineSplit.java + | |-- Pipe.java + | |-- WordCount.java + |-- resources + |-- log4j.properties + +The `pom.xml` file included in the project already has the Streams dependency defined. Note, that the generated `pom.xml` targets Java 11. + +There are already several example programs written with Streams library under `src/main/java`. Since we are going to start writing such programs from scratch, we can now delete these examples: + + + $ cd streams-quickstart + $ rm src/main/java/myapps/*.java + +## Writing a first Streams application: Pipe + +It's coding time now! Feel free to open your favorite IDE and import this Maven project, or simply open a text editor and create a java file under `src/main/java/myapps`. Let's name it `Pipe.java`: + + + package myapps; + + public class Pipe { + + public static void main(String[] args) throws Exception { + + } + } + +We are going to fill in the `main` function to write this pipe program. Note that we will not list the import statements as we go since IDEs can usually add them automatically. However if you are using a text editor you need to manually add the imports, and at the end of this section we'll show the complete code snippet with import statement for you. + +The first step to write a Streams application is to create a `java.util.Properties` map to specify different Streams execution configuration values as defined in `StreamsConfig`. A couple of important configuration values you need to set are: `StreamsConfig.BOOTSTRAP_SERVERS_CONFIG`, which specifies a list of host/port pairs to use for establishing the initial connection to the Kafka cluster, and `StreamsConfig.APPLICATION_ID_CONFIG`, which gives the unique identifier of your Streams application to distinguish itself with other applications talking to the same Kafka cluster: + + + Properties props = new Properties(); + props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-pipe"); + props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); // assuming that the Kafka broker this application is talking to runs on local machine with port 9092 + +In addition, you can customize other configurations in the same map, for example, default serialization and deserialization libraries for the record key-value pairs: + + + props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); + props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); + +For a full list of configurations of Kafka Streams please refer to this [table](/41/#streamsconfigs). + +Next we will define the computational logic of our Streams application. In Kafka Streams this computational logic is defined as a `topology` of connected processor nodes. We can use a topology builder to construct such a topology, + + + final StreamsBuilder builder = new StreamsBuilder(); + +And then create a source stream from a Kafka topic named `streams-plaintext-input` using this topology builder: + + + KStream source = builder.stream("streams-plaintext-input"); + +Now we get a `KStream` that is continuously generating records from its source Kafka topic `streams-plaintext-input`. The records are organized as `String` typed key-value pairs. The simplest thing we can do with this stream is to write it into another Kafka topic, say it's named `streams-pipe-output`: + + + source.to("streams-pipe-output"); + +Note that we can also concatenate the above two lines into a single line as: + + + builder.stream("streams-plaintext-input").to("streams-pipe-output"); + +We can inspect what kind of `topology` is created from this builder by doing the following: + + + final Topology topology = builder.build(); + +And print its description to standard output as: + + + System.out.println(topology.describe()); + +If we just stop here, compile and run the program, it will output the following information: + + + $ mvn clean package + $ mvn exec:java -Dexec.mainClass=myapps.Pipe + Sub-topologies: + Sub-topology: 0 + Source: KSTREAM-SOURCE-0000000000(topics: streams-plaintext-input) --> KSTREAM-SINK-0000000001 + Sink: KSTREAM-SINK-0000000001(topic: streams-pipe-output) <-- KSTREAM-SOURCE-0000000000 + Global Stores: + none + +As shown above, it illustrates that the constructed topology has two processor nodes, a source node `KSTREAM-SOURCE-0000000000` and a sink node `KSTREAM-SINK-0000000001`. `KSTREAM-SOURCE-0000000000` continuously read records from Kafka topic `streams-plaintext-input` and pipe them to its downstream node `KSTREAM-SINK-0000000001`; `KSTREAM-SINK-0000000001` will write each of its received record in order to another Kafka topic `streams-pipe-output` (the `-->` and `<--` arrows dictates the downstream and upstream processor nodes of this node, i.e. "children" and "parents" within the topology graph). It also illustrates that this simple topology has no global state stores associated with it (we will talk about state stores more in the following sections). + +Note that we can always describe the topology as we did above at any given point while we are building it in the code, so as a user you can interactively "try and taste" your computational logic defined in the topology until you are happy with it. Suppose we are already done with this simple topology that just pipes data from one Kafka topic to another in an endless streaming manner, we can now construct the Streams client with the two components we have just constructed above: the configuration map specified in a `java.util.Properties` instance and the `Topology` object. + + + final KafkaStreams streams = new KafkaStreams(topology, props); + +By calling its `start()` function we can trigger the execution of this client. The execution won't stop until `close()` is called on this client. We can, for example, add a shutdown hook with a countdown latch to capture a user interrupt and close the client upon terminating this program: + + + final CountDownLatch latch = new CountDownLatch(1); + + // attach shutdown handler to catch control-c + Runtime.getRuntime().addShutdownHook(new Thread("streams-shutdown-hook") { + @Override + public void run() { + streams.close(); + latch.countDown(); + } + }); + + try { + streams.start(); + latch.await(); + } catch (Throwable e) { + System.exit(1); + } + System.exit(0); + +The complete code so far looks like this: + + + package myapps; + + import org.apache.kafka.common.serialization.Serdes; + import org.apache.kafka.streams.KafkaStreams; + import org.apache.kafka.streams.StreamsBuilder; + import org.apache.kafka.streams.StreamsConfig; + import org.apache.kafka.streams.Topology; + + import java.util.Properties; + import java.util.concurrent.CountDownLatch; + + public class Pipe { + + public static void main(String[] args) throws Exception { + Properties props = new Properties(); + props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-pipe"); + props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); + props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); + + final StreamsBuilder builder = new StreamsBuilder(); + + builder.stream("streams-plaintext-input").to("streams-pipe-output"); + + final Topology topology = builder.build(); + + final KafkaStreams streams = new KafkaStreams(topology, props); + final CountDownLatch latch = new CountDownLatch(1); + + // attach shutdown handler to catch control-c + Runtime.getRuntime().addShutdownHook(new Thread("streams-shutdown-hook") { + @Override + public void run() { + streams.close(); + latch.countDown(); + } + }); + + try { + streams.start(); + latch.await(); + } catch (Throwable e) { + System.exit(1); + } + System.exit(0); + } + } + +If you already have the Kafka broker up and running at `localhost:9092`, and the topics `streams-plaintext-input` and `streams-pipe-output` created on that broker, you can run this code in your IDE or on the command line, using Maven: + + + $ mvn clean package + $ mvn exec:java -Dexec.mainClass=myapps.Pipe + +For detailed instructions on how to run a Streams application and observe its computing results, please read the [Play with a Streams Application](/41/streams/quickstart) section. We will not talk about this in the rest of this section. + +## Writing a second Streams application: Line Split + +We have learned how to construct a Streams client with its two key components: the `StreamsConfig` and `Topology`. Now let's move on to add some real processing logic by augmenting the current topology. We can first create another program by first copy the existing `Pipe.java` class: + + + $ cp src/main/java/myapps/Pipe.java src/main/java/myapps/LineSplit.java + +And change its class name as well as the application id config to distinguish with the original program: + + + public class LineSplit { + + public static void main(String[] args) throws Exception { + Properties props = new Properties(); + props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-linesplit"); + // ... + } + } + +Since each of the source stream's record is a `String` typed key-value pair, let's treat the value string as a text line and split it into words with a `FlatMapValues` operator: + + + KStream source = builder.stream("streams-plaintext-input"); + KStream words = source.flatMapValues(new ValueMapper>() { + @Override + public Iterable apply(String value) { + return Arrays.asList(value.split("\W+")); + } + }); + +The operator will take the `source` stream as its input, and generate a new stream named `words` by processing each record from its source stream in order and breaking its value string into a list of words, and producing each word as a new record to the output `words` stream. This is a stateless operator that does not need to keep track of any previously received records or processed results. Note if you are using JDK 8 you can use lambda expression and simplify the above code as: + + + KStream source = builder.stream("streams-plaintext-input"); + KStream words = source.flatMapValues(value -> Arrays.asList(value.split("\W+"))); + +And finally we can write the word stream back into another Kafka topic, say `streams-linesplit-output`. Again, these two steps can be concatenated as the following (assuming lambda expression is used): + + + KStream source = builder.stream("streams-plaintext-input"); + source.flatMapValues(value -> Arrays.asList(value.split("\W+"))) + .to("streams-linesplit-output"); + +If we now describe this augmented topology as `System.out.println(topology.describe())`, we will get the following: + + + $ mvn clean package + $ mvn exec:java -Dexec.mainClass=myapps.LineSplit + Sub-topologies: + Sub-topology: 0 + Source: KSTREAM-SOURCE-0000000000(topics: streams-plaintext-input) --> KSTREAM-FLATMAPVALUES-0000000001 + Processor: KSTREAM-FLATMAPVALUES-0000000001(stores: []) --> KSTREAM-SINK-0000000002 <-- KSTREAM-SOURCE-0000000000 + Sink: KSTREAM-SINK-0000000002(topic: streams-linesplit-output) <-- KSTREAM-FLATMAPVALUES-0000000001 + Global Stores: + none + +As we can see above, a new processor node `KSTREAM-FLATMAPVALUES-0000000001` is injected into the topology between the original source and sink nodes. It takes the source node as its parent and the sink node as its child. In other words, each record fetched by the source node will first traverse to the newly added `KSTREAM-FLATMAPVALUES-0000000001` node to be processed, and one or more new records will be generated as a result. They will continue traverse down to the sink node to be written back to Kafka. Note this processor node is "stateless" as it is not associated with any stores (i.e. `(stores: [])`). + +The complete code looks like this (assuming lambda expression is used): + + + package myapps; + + import org.apache.kafka.common.serialization.Serdes; + import org.apache.kafka.streams.KafkaStreams; + import org.apache.kafka.streams.StreamsBuilder; + import org.apache.kafka.streams.StreamsConfig; + import org.apache.kafka.streams.Topology; + import org.apache.kafka.streams.kstream.KStream; + + import java.util.Arrays; + import java.util.Properties; + import java.util.concurrent.CountDownLatch; + + public class LineSplit { + + public static void main(String[] args) throws Exception { + Properties props = new Properties(); + props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-linesplit"); + props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); + props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); + + final StreamsBuilder builder = new StreamsBuilder(); + + KStream source = builder.stream("streams-plaintext-input"); + source.flatMapValues(value -> Arrays.asList(value.split("\W+"))) + .to("streams-linesplit-output"); + + final Topology topology = builder.build(); + final KafkaStreams streams = new KafkaStreams(topology, props); + final CountDownLatch latch = new CountDownLatch(1); + + // ... same as Pipe.java above + } + } + +## Writing a third Streams application: Wordcount + +Let's now take a step further to add some "stateful" computations to the topology by counting the occurrence of the words split from the source text stream. Following similar steps let's create another program based on the `LineSplit.java` class: + + + public class WordCount { + + public static void main(String[] args) throws Exception { + Properties props = new Properties(); + props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-wordcount"); + // ... + } + } + +In order to count the words we can first modify the `flatMapValues` operator to treat all of them as lower case (assuming lambda expression is used): + + + source.flatMapValues(new ValueMapper>() { + @Override + public Iterable apply(String value) { + return Arrays.asList(value.toLowerCase(Locale.getDefault()).split("\W+")); + } + }); + +In order to do the counting aggregation we have to first specify that we want to key the stream on the value string, i.e. the lower cased word, with a `groupBy` operator. This operator generate a new grouped stream, which can then be aggregated by a `count` operator, which generates a running count on each of the grouped keys: + + + KTable counts = + source.flatMapValues(new ValueMapper>() { + @Override + public Iterable apply(String value) { + return Arrays.asList(value.toLowerCase(Locale.getDefault()).split("\W+")); + } + }) + .groupBy(new KeyValueMapper() { + @Override + public String apply(String key, String value) { + return value; + } + }) + // Materialize the result into a KeyValueStore named "counts-store". + // The Materialized store is always of type as this is the format of the inner most store. + .count(Materialized.> as("counts-store")); + +Note that the `count` operator has a `Materialized` parameter that specifies that the running count should be stored in a state store named `counts-store`. This `counts-store` store can be queried in real-time, with details described in the [Developer Manual](/41/streams/developer-guide#streams_interactive_queries). + +We can also write the `counts` KTable's changelog stream back into another Kafka topic, say `streams-wordcount-output`. Because the result is a changelog stream, the output topic `streams-wordcount-output` should be configured with log compaction enabled. Note that this time the value type is no longer `String` but `Long`, so the default serialization classes are not viable for writing it to Kafka anymore. We need to provide overridden serialization methods for `Long` types, otherwise a runtime exception will be thrown: + + + counts.toStream().to("streams-wordcount-output", Produced.with(Serdes.String(), Serdes.Long())); + +Note that in order to read the changelog stream from topic `streams-wordcount-output`, one needs to set the value deserialization as `org.apache.kafka.common.serialization.LongDeserializer`. Details of this can be found in the [Play with a Streams Application](/41/streams/quickstart) section. Assuming lambda expression from JDK 8 can be used, the above code can be simplified as: + + + KStream source = builder.stream("streams-plaintext-input"); + source.flatMapValues(value -> Arrays.asList(value.toLowerCase(Locale.getDefault()).split("\W+"))) + .groupBy((key, value) -> value) + .count(Materialized.>as("counts-store")) + .toStream() + .to("streams-wordcount-output", Produced.with(Serdes.String(), Serdes.Long())); + +If we again describe this augmented topology as `System.out.println(topology.describe())`, we will get the following: + + + $ mvn clean package + $ mvn exec:java -Dexec.mainClass=myapps.WordCount + Sub-topologies: + Sub-topology: 0 + Source: KSTREAM-SOURCE-0000000000(topics: streams-plaintext-input) --> KSTREAM-FLATMAPVALUES-0000000001 + Processor: KSTREAM-FLATMAPVALUES-0000000001(stores: []) --> KSTREAM-KEY-SELECT-0000000002 <-- KSTREAM-SOURCE-0000000000 + Processor: KSTREAM-KEY-SELECT-0000000002(stores: []) --> KSTREAM-FILTER-0000000005 <-- KSTREAM-FLATMAPVALUES-0000000001 + Processor: KSTREAM-FILTER-0000000005(stores: []) --> KSTREAM-SINK-0000000004 <-- KSTREAM-KEY-SELECT-0000000002 + Sink: KSTREAM-SINK-0000000004(topic: counts-store-repartition) <-- KSTREAM-FILTER-0000000005 + Sub-topology: 1 + Source: KSTREAM-SOURCE-0000000006(topics: counts-store-repartition) --> KSTREAM-AGGREGATE-0000000003 + Processor: KSTREAM-AGGREGATE-0000000003(stores: [counts-store]) --> KTABLE-TOSTREAM-0000000007 <-- KSTREAM-SOURCE-0000000006 + Processor: KTABLE-TOSTREAM-0000000007(stores: []) --> KSTREAM-SINK-0000000008 <-- KSTREAM-AGGREGATE-0000000003 + Sink: KSTREAM-SINK-0000000008(topic: streams-wordcount-output) <-- KTABLE-TOSTREAM-0000000007 + Global Stores: + none + +As we can see above, the topology now contains two disconnected sub-topologies. The first sub-topology's sink node `KSTREAM-SINK-0000000004` will write to a repartition topic `counts-store-repartition`, which will be read by the second sub-topology's source node `KSTREAM-SOURCE-0000000006`. The repartition topic is used to "shuffle" the source stream by its aggregation key, which is in this case the value string. In addition, inside the first sub-topology a stateless `KSTREAM-FILTER-0000000005` node is injected between the grouping `KSTREAM-KEY-SELECT-0000000002` node and the sink node to filter out any intermediate record whose aggregate key is empty. + +In the second sub-topology, the aggregation node `KSTREAM-AGGREGATE-0000000003` is associated with a state store named `counts-store` (the name is specified by the user in the `count` operator). Upon receiving each record from its upcoming stream source node, the aggregation processor will first query its associated `counts-store` store to get the current count for that key, augment by one, and then write the new count back to the store. Each updated count for the key will also be piped downstream to the `KTABLE-TOSTREAM-0000000007` node, which interpret this update stream as a record stream before further piping to the sink node `KSTREAM-SINK-0000000008` for writing back to Kafka. + +The complete code looks like this (assuming lambda expression is used): + + + package myapps; + + import org.apache.kafka.common.serialization.Serdes; + import org.apache.kafka.common.utils.Bytes; + import org.apache.kafka.streams.KafkaStreams; + import org.apache.kafka.streams.StreamsBuilder; + import org.apache.kafka.streams.StreamsConfig; + import org.apache.kafka.streams.Topology; + import org.apache.kafka.streams.kstream.KStream; + import org.apache.kafka.streams.kstream.Materialized; + import org.apache.kafka.streams.kstream.Produced; + import org.apache.kafka.streams.state.KeyValueStore; + + import java.util.Arrays; + import java.util.Locale; + import java.util.Properties; + import java.util.concurrent.CountDownLatch; + + public class WordCount { + + public static void main(String[] args) throws Exception { + Properties props = new Properties(); + props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-wordcount"); + props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); + props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); + + final StreamsBuilder builder = new StreamsBuilder(); + + KStream source = builder.stream("streams-plaintext-input"); + source.flatMapValues(value -> Arrays.asList(value.toLowerCase(Locale.getDefault()).split("\W+"))) + .groupBy((key, value) -> value) + .count(Materialized.>as("counts-store")) + .toStream() + .to("streams-wordcount-output", Produced.with(Serdes.String(), Serdes.Long())); + + final Topology topology = builder.build(); + final KafkaStreams streams = new KafkaStreams(topology, props); + final CountDownLatch latch = new CountDownLatch(1); + + // ... same as Pipe.java above + } + } + +[Previous](/41/streams/quickstart) [Next](/41/streams/core-concepts) + + * [Documentation](/documentation) + * [Kafka Streams](/streams) + + diff --git a/content/en/41/streams/upgrade-guide.md b/content/en/41/streams/upgrade-guide.md new file mode 100644 index 000000000..9a597cde7 --- /dev/null +++ b/content/en/41/streams/upgrade-guide.md @@ -0,0 +1,885 @@ +--- +title: Upgrade Guide +description: +weight: 6 +tags: ['kafka', 'docs'] +aliases: +keywords: +type: docs +--- + +# Upgrade Guide and API Changes + +[Introduction](/41/streams/) [Run Demo App](/41/streams/quickstart) [Tutorial: Write App](/41/streams/tutorial) [Concepts](/41/streams/core-concepts) [Architecture](/41/streams/architecture) [Developer Guide](/41/streams/developer-guide/) [Upgrade](/41/streams/upgrade-guide) + +Upgrading from any older version to 4.1.0 is possible: if upgrading from 3.4 or below, you will need to do two rolling bounces, where during the first rolling bounce phase you set the config `upgrade.from="older version"` (possible values are `"0.10.0" - "3.4"`) and during the second you remove it. This is required to safely handle 3 changes. The first is introduction of the new cooperative rebalancing protocol of the embedded consumer. The second is a change in foreign-key join serialization format. Note that you will remain using the old eager rebalancing protocol if you skip or delay the second rolling bounce, but you can safely switch over to cooperative at any time once the entire group is on 2.4+ by removing the config value and bouncing. For more details please refer to [KIP-429](https://cwiki.apache.org/confluence/x/vAclBg). The third is a change in the serialization format for an internal repartition topic. For more details, please refer to [KIP-904](https://cwiki.apache.org/confluence/x/P5VbDg): + + * prepare your application instances for a rolling bounce and make sure that config `upgrade.from` is set to the version from which it is being upgrade. + * bounce each instance of your application once + * prepare your newly deployed 4.1.0 application instances for a second round of rolling bounces; make sure to remove the value for config `upgrade.from` + * bounce each instance of your application once more to complete the upgrade + + + +As an alternative, an offline upgrade is also possible. Upgrading from any versions as old as 0.10.0.x to 4.1.0 in offline mode require the following steps: + + * stop all old (e.g., 0.10.0.x) application instances + * update your code and swap old code and jar file with new code and new jar file + * restart all new (4.1.0) application instances + + + +Note: The cooperative rebalancing protocol has been the default since 2.4, but we have continued to support the eager rebalancing protocol to provide users an upgrade path. This support will be dropped in a future release, so any users still on the eager protocol should prepare to finish upgrading their applications to the cooperative protocol in version 3.1. This only affects users who are still on a version older than 2.4, and users who have upgraded already but have not yet removed the `upgrade.from` config that they set when upgrading from a version below 2.4. Users fitting into the latter case will simply need to unset this config when upgrading beyond 3.1, while users in the former case will need to follow a slightly different upgrade path if they attempt to upgrade from 2.3 or below to a version above 3.1. Those applications will need to go through a bridge release, by first upgrading to a version between 2.4 - 3.1 and setting the `upgrade.from` config, then removing that config and upgrading to the final version above 3.1. See [KAFKA-8575](https://issues.apache.org/jira/browse/KAFKA-8575) for more details. + +For a table that shows Streams API compatibility with Kafka broker versions, see Broker Compatibility. + +# Notable compatibility changes in past releases + +Starting in version 4.0.0, Kafka Streams will only be compatible when running against brokers on version 2.1 or higher. Additionally, exactly-once semantics (EOS) will require brokers to be at least version 2.5. + +Downgrading from 3.5.x or newer version to 3.4.x or older version needs special attention: Since 3.5.0 release, Kafka Streams uses a new serialization format for repartition topics. This means that older versions of Kafka Streams would not be able to recognize the bytes written by newer versions, and hence it is harder to downgrade Kafka Streams with version 3.5.0 or newer to older versions in-flight. For more details, please refer to [KIP-904](https://cwiki.apache.org/confluence/x/P5VbDg). For a downgrade, first switch the config from `"upgrade.from"` to the version you are downgrading to. This disables writing of the new serialization format in your application. It's important to wait in this state long enough to make sure that the application has finished processing any "in-flight" messages written into the repartition topics in the new serialization format. Afterwards, you can downgrade your application to a pre-3.5.x version. + +Downgrading from 3.0.x or newer version to 2.8.x or older version needs special attention: Since 3.0.0 release, Kafka Streams uses a newer RocksDB version whose on-disk format changed. This means that old versioned RocksDB would not be able to recognize the bytes written by that newer versioned RocksDB, and hence it is harder to downgrade Kafka Streams with version 3.0.0 or newer to older versions in-flight. Users need to wipe out the local RocksDB state stores written by the new versioned Kafka Streams before swapping in the older versioned Kafka Streams bytecode, which would then restore the state stores with the old on-disk format from the changelogs. + +Kafka Streams does not support running multiple instances of the same application as different processes on the same physical state directory. Starting in 2.8.0 (as well as 2.7.1 and 2.6.2), this restriction will be enforced. If you wish to run more than one instance of Kafka Streams, you must configure them with different values for `state.dir`. + +Starting in Kafka Streams 2.6.x, a new processing mode is available, named EOS version 2. This can be configured by setting `"processing.guarantee"` to `"exactly_once_v2"` for application versions 3.0+, or setting it to `"exactly_once_beta"` for versions between 2.6 and 2.8. To use this new feature, your brokers must be on version 2.5.x or newer. If you want to upgrade your EOS application from an older version and enable this feature in version 3.0+, you first need to upgrade your application to version 3.0.x, staying on `"exactly_once"`, and then do second round of rolling bounces to switch to `"exactly_once_v2"`. If you are upgrading an EOS application from an older (pre-2.6) version to a version between 2.6 and 2.8, follow these same steps but with the config `"exactly_once_beta"` instead. No special steps are required to upgrade an application using `"exactly_once_beta"` from version 2.6+ to 3.0 or higher: you can just change the config from `"exactly_once_beta"` to `"exactly_once_v2"` during the rolling upgrade. For a downgrade, do the reverse: first switch the config from `"exactly_once_v2"` to `"exactly_once"` to disable the feature in your 2.6.x application. Afterward, you can downgrade your application to a pre-2.6.x version. + +Since 2.6.0 release, Kafka Streams depends on a RocksDB version that requires MacOS 10.14 or higher. + +To run a Kafka Streams application version 2.2.1, 2.3.0, or higher a broker version 0.11.0 or higher is required and the on-disk message format must be 0.11 or higher. Brokers must be on version 0.10.1 or higher to run a Kafka Streams application version 0.10.1 to 2.2.0. Additionally, on-disk message format must be 0.10 or higher to run a Kafka Streams application version 1.0 to 2.2.0. For Kafka Streams 0.10.0, broker version 0.10.0 or higher is required. + +In deprecated `KStreamBuilder` class, when a `KTable` is created from a source topic via `KStreamBuilder.table()`, its materialized state store will reuse the source topic as its changelog topic for restoring, and will disable logging to avoid appending new updates to the source topic; in the `StreamsBuilder` class introduced in 1.0, this behavior was changed accidentally: we still reuse the source topic as the changelog topic for restoring, but will also create a separate changelog topic to append the update records from source topic to. In the 2.0 release, we have fixed this issue and now users can choose whether or not to reuse the source topic based on the `StreamsConfig#TOPOLOGY_OPTIMIZATION_CONFIG`: if you are upgrading from the old `KStreamBuilder` class and hence you need to change your code to use the new `StreamsBuilder`, you should set this config value to `StreamsConfig#OPTIMIZE` to continue reusing the source topic; if you are upgrading from 1.0 or 1.1 where you are already using `StreamsBuilder` and hence have already created a separate changelog topic, you should set this config value to `StreamsConfig#NO_OPTIMIZATION` when upgrading to 4.1.0 in order to use that changelog topic for restoring the state store. More details about the new config `StreamsConfig#TOPOLOGY_OPTIMIZATION_CONFIG` can be found in [KIP-295](https://cwiki.apache.org/confluence/x/V53LB). + +# Streams API changes in 4.1.0 + +## Early Access of the Streams Rebalance Protocol + +The Streams Rebalance Protocol is a broker-driven rebalancing system designed specifically for Kafka Streams applications. Following the pattern of KIP-848, which moved rebalance coordination of plain consumers from clients to brokers, KIP-1071 extends this model to Kafka Streams workloads. Instead of clients computing new assignments on the client during rebalance events involving all members of the group, assignments are computed continuously on the broker. Instead of using a consumer group, the streams application registers as a streams group with the broker, which manages and exposes all metadata required for coordination of the streams application instances. + +This Early Access release covers a subset of the functionality detailed in [KIP-1071](https://cwiki.apache.org/confluence/display/KAFKA/KIP-1071%3A+Streams+Rebalance+Protocol). Do not use the new protocol in production. The API is subject to change in future releases. + +**What's Included in Early Access** + + * **Core Streams Group Rebalance Protocol:** The `group.protocol=streams` configuration enables the dedicated streams rebalance protocol. This separates streams groups from consumer groups and provides a streams-specific group membership lifecycle and metadata management on the broker. + * **Sticky Task Assignor:** A basic task assignment strategy that minimizes task movement during rebalances is included. + * **Interactive Query Support:** IQ operations are compatible with the new streams protocol. + * **New Admin RPC:** The `StreamsGroupDescribe` RPC provides streams-specific metadata separate from consumer group information, with corresponding access via the `Admin` client. + * **CLI Integration:** You can list, describe, and delete streams groups via the `kafka-streams-groups.sh` script. + + + +**What's Not Included in Early Access** + + * **Static Membership:** Setting a client `instance.id` will be rejected. + * **Topology Updates:** If a topology is changed significantly (e.g., by adding new source topics or changing the number of sub-topologies), a new streams group must be created. + * **High Availability Assignor:** Only the sticky assignor is supported. + * **Regular Expressions:** Pattern-based topic subscription is not supported. + * **Reset Operations:** CLI offset reset operations are not supported. + * **Protocol Migration:** Group migration is not available between the classic and new streams protocols. + + + +**Why Use the Streams Rebalance Protocol?** + + * **Broker-Driven Coordination:** Centralizes task assignment logic on brokers instead of the client. This provides consistent, authoritative task assignment decisions from a single coordination point and reduces the potential for split-brain scenarios. + * **Faster, More Stable Rebalances:** Reduces rebalance duration and impact by removing the global synchronization point. This minimizes application downtime during membership changes or failures. + * **Better Observability:** Provides dedicated metrics and admin interfaces that separate streams from consumer groups, leading to clearer troubleshooting with broker-side observability. + + + +Enabling the protocol requires the brokers and clients are running Apache Kafka 4.1. It should be enabled only on new clusters for testing purposes. Set `unstable.feature.versions.enable=true` for controllers and brokers, and set `unstable.api.versions.enable=true` on the brokers as well. In your Kafka Streams application configuration, set `group.protocol=streams`. After the new feature is configured, check `kafka-features.sh --bootstrap-server localhost:9092 describe` and `streams.version` should now have FinalizedVersionLevel 1. + +Migration between the classic consumer group protocol and the Streams Rebalance Protocol is not supported in either direction. An application using this protocol must use a new `application.id` that has not been used by any application on the classic protocol. Furthermore, this ID must not be in use as a `group.id` by any consumer ("classic" or "consumer") nor share-group application. It is also possible to delete a previous consumer group using `kafka-consumer-groups.sh` before starting the application with the new protocol, which will however also delete all offsets for that group. + +To operate the new streams groups, explore the options of `kafka-streams-groups.sh` to list, describe, and delete streams groups. In the new protocol, `session.timeout.ms`, `heartbeat.interval.ms` and `num.standby.replicas` are group-level configurations, which are ignored when they are set on the client side. Use the `kafka-configs.sh` tool to set these configurations, for example: `kafka-configs.sh --bootstrap-server localhost:9092 --alter --entity-type groups --entity-name wordcount --add-config streams.num.standby.replicas=1`. + +Please provide feedback on this feature via the [Kafka mailing lists](https://kafka.apache.org/contact) or by filing [JIRA issues](https://kafka.apache.org/contributing). + +## Other changes + +The introduction of [KIP-1111](https://cwiki.apache.org/confluence/x/4Y_MEw) enables you to enforce explicit naming for all internal resources of the topology, including internal topics (e.g., changelog and repartition topics) and their associated state stores. This ensures that every internal resource is named before the Kafka Streams application is deployed, which is essential for upgrading your topology. You can enable this feature via `StreamsConfig` using the `StreamsConfig#ENSURE_EXPLICIT_INTERNAL_RESOURCE_NAMING_CONFIG` parameter. When set to `true`, the application will refuse to start if any internal resource has an auto-generated name. + +# Streams API changes in 4.0.0 + +In this release, eos-v1 (Exactly Once Semantics version 1) is no longer supported. To use eos-v2, brokers must be running version 2.5 or later. Additionally, all deprecated methods, classes, APIs, and config parameters up to and including AK 3.5 release have been removed. A few important ones are listed below. The full list can be found in [KAFKA-12822](https://issues.apache.org/jira/browse/KAFKA-12822). + + * [Old processor APIs](https://issues.apache.org/jira/browse/KAFKA-12829) + * ["transformer" methods and classes in both Java and Scala](https://issues.apache.org/jira/browse/KAFKA-16339) + * migrating from `KStreams#transformValues()` to `KStreams.processValues()` might not be safe due to [KAFKA-19668](https://issues.apache.org/jira/browse/KAFKA-19668). Please refer to the [migration guide](/41/streams/developer-guide/dsl-api.html#transformers-removal-and-migration-to-processors) for more details. + * ["transformer" methods and classes in both Java and Scala](https://issues.apache.org/jira/browse/KAFKA-16339) + * [kstream.KStream#branch in both Java and Scala](https://issues.apache.org/jira/browse/KAFKA-12824) + * [builder methods for Time/Session/Join/SlidingWindows](https://issues.apache.org/jira/browse/KAFKA-16332) + * [KafkaStreams#setUncaughtExceptionHandler()](https://issues.apache.org/jira/browse/KAFKA-12827) + + + +In this release the `ClientInstanceIds` instance stores the global consumer`Uuid` for the [KIP-714](https://cwiki.apache.org/confluence/x/2xRRCg#KIP714:Clientmetricsandobservability-Clientidentificationandtheclientinstanceid) id with a key of global stream-thread name appended with `"-global-consumer"` where before it was only the global stream-thread name. + +In this release two configs `default.deserialization.exception.handler` and `default.production.exception.handler` are deprecated, as they don't have any overwrites, which is described in [KIP-1056](https://cwiki.apache.org/confluence/x/Y41yEg) You can refer to new configs via `deserialization.exception.handler` and `production.exception.handler`. + +In previous release, a new version of the Processor API was introduced and the old Processor API was incrementally replaced and deprecated. [KIP-1070](https://cwiki.apache.org/confluence/x/sxCTEg) follow this path by deprecating `MockProcessorContext`, `Transformer`, `TransformerSupplier`, `ValueTransformer`, and `ValueTransformerSupplier`. + +Previously, the `ProductionExceptionHandler` was not invoked on a (retriable) `TimeoutException`. With Kafka Streams 4.0, the handler is called, and the default handler would return `RETRY` to not change existing behavior. However, a custom handler can now decide to break the infinite retry loop by returning either `CONTINUE` or `FAIL` ([KIP-1065](https://cwiki.apache.org/confluence/x/LQ6TEg)). + +In this release, Kafka Streams metrics can be collected broker side via the KIP-714 broker-plugin. For more detailed information, refer to [KIP-1076](https://cwiki.apache.org/confluence/x/XA-OEg) document please. + +[KIP-1077](https://cwiki.apache.org/confluence/x/eA-OEg) deprecates the `ForeachProcessor` class. This change is aimed at improving the organization and clarity of the Kafka Streams API by ensuring that internal classes are not exposed in public packages. + +[KIP-1078](https://cwiki.apache.org/confluence/x/hg-OEg) deprecates the leaking getter methods in the `Joined` helper class. These methods are deprecated without a replacement for future removal, as they don't add any value to Kafka Streams users. + +To ensures better encapsulation and organization of configuration documentation within Kafka Streams, [KIP-1085](https://cwiki.apache.org/confluence/x/hYz9Eg) deprecate certain public doc description variables that are only used within the `StreamsConfig` or `TopologyConfig` classes. Additionally, the unused variable `DUMMY_THREAD_INDEX` will also be deprecated. + +Due to the removal of the already deprecated `#through` method in Kafka Streams, the `intermediateTopicsOption` of `StreamsResetter` tool in Apache Kafka is not needed any more and therefore is deprecated ([KIP-1087](https://cwiki.apache.org/confluence/x/Vo39Eg)). + +Since string metrics cannot be collected on the broker side (KIP-714), [KIP-1091](https://cwiki.apache.org/confluence/x/IgstEw) introduces numeric counterparts to allow proper broker-side metric collection for Kafka Streams applications. These metrics will be available at the `INFO` recording level, and a thread-level metric with a String value will be available for users leveraging Java Management Extensions (`JMX`). + +In order to reduce storage overhead and improve API usability, a new method in the Java and Scala APIs that accepts a BiFunction for foreign key extraction is introduced by [KIP-1104](https://cwiki.apache.org/confluence/x/gIuMEw). KIP-1104 allows foreign key extraction from both the key and value in KTable joins in Apache Kafka. Previously, foreign key joins in KTables only allowed extraction from the value, which led to data duplication and potential inconsistencies. This enhancement introduces a new method in the Java and Scala APIs that accepts a BiFunction for foreign key extraction, enabling more intuitive and efficient joins. The existing methods will be deprecated but not removed, ensuring backward compatibility. This change aims to reduce storage overhead and improve API usability. + +With introduction of [KIP-1106](https://cwiki.apache.org/confluence/x/NIyMEw), the existing `Topology.AutoOffsetReset` is deprecated and replaced with a new class `org.apache.kafka.streams.AutoOffsetReset` to capture the reset strategies. New methods will be added to the `org.apache.kafka.streams.Topology` and `org.apache.kafka.streams.kstream.Consumed` classes to support the new reset strategy. These changes aim to provide more flexibility and efficiency in managing offsets, especially in scenarios involving long-term storage and infinite retention. + +You can now configure your topology with a `ProcessorWrapper`, which allows you to access and optionally wrap/replace any processor in the topology by injecting an alternative `ProcessorSupplier` in its place. This can be used to peek records and access the processor context even for DSL operators, for example to implement a logging or tracing framework, or to aid in testing or debugging scenarios. You must implement the `ProcessorWrapper` interface and then pass the class or class name into the configs via the new `StreamsConfig#PROCESSOR_WRAPPER_CLASS_CONFIG` config. NOTE: this config is applied during the topology building phase, and therefore will not take effect unless the config is passed in when creating the StreamsBuilder (DSL) or Topology(PAPI) objects. You MUST use the StreamsBuilder/Topology constructor overload that accepts a TopologyConfig parameter for the `StreamsConfig#PROCESSOR_WRAPPER_CLASS_CONFIG` to be picked up. See [KIP-1112](https://cwiki.apache.org/confluence/x/TZCMEw) for more details. + +Upgraded RocksDB dependency to version 9.7.3 (from 7.9.2). This upgrade incorporates various improvements and optimizations within RocksDB. However, it also introduces some API changes. The `org.rocksdb.AccessHint` class, along with its associated methods, has been removed. Several methods related to compressed block cache configuration in the `BlockBasedTableConfig` class have been removed, including `blockCacheCompressedNumShardBits`, `blockCacheCompressedSize`, and their corresponding setters. These functionalities are now consolidated under the `cache` option, and developers should configure their compressed block cache using the `setCache` method instead. The `NO_FILE_CLOSES` field has been removed from the `org.rocksdb.TickerTypeenum` as a result the `number-open-files` metrics does not work as expected. Metric `number-open-files` returns constant -1 from now on until it will officially be removed. The `org.rocksdb.Options.setLogger()` method now accepts a `LoggerInterface` as a parameter instead of the previous `Logger`. Some data types used in RocksDB's Java API have been modified. These changes, along with the removed class, field, and new methods, are primarily relevant to users implementing custom RocksDB configurations. These changes are expected to be largely transparent to most Kafka Streams users. However, those employing advanced RocksDB customizations within their Streams applications, particularly through the `rocksdb.config.setter`, are advised to consult the detailed RocksDB 9.7.3 changelog to ensure a smooth transition and adapt their configurations as needed. Specifically, users leveraging the removed `AccessHint` class, the removed methods from the `BlockBasedTableConfig` class, the `NO_FILE_CLOSES` field from `TickerType`, or relying on the previous signature of `setLogger()` will need to update their implementations. + +# Streams API changes in 3.9.0 + +The introduction of [KIP-1033](https://cwiki.apache.org/confluence/x/xQniEQ) enables you to provide a processing exception handler to manage exceptions during the processing of a record rather than throwing the exception all the way out of your streams application. You can provide the configs via the `StreamsConfig` as `StreamsConfig#PROCESSING_EXCEPTION_HANDLER_CLASS_CONFIG`. The specified handler must implement the `org.apache.kafka.streams.errors.ProcessingExceptionHandler` interface. + +Kafka Streams now allows to customize the logging interval of stream-thread runtime summary, via the newly added config `log.summary.interval.ms`. By default, the summary is logged every 2 minutes. More details can be found in [KIP-1049](https://cwiki.apache.org/confluence/x/fwpeEg). + +# Streams API changes in 3.8.0 + +Kafka Streams now supports customizable task assignment strategies via the `task.assignor.class` configuration. The configuration can be set to the fully qualified class name of a custom task assignor implementation that has to extend the new `org.apache.kafka.streams.processor.assignment.TaskAssignor` interface. The new configuration also allows users to bring back the behavior of the old task assignor `StickyTaskAssignor` that was used before the introduction of the `HighAvailabilityTaskAssignor`. If no custom task assignor is configured, the default task assignor `HighAvailabilityTaskAssignor` is used. If you were using the `internal.task.assignor.class` config, you should switch to using the new `task.assignor.class` config instead, as the internal config will be removed in a future release. If you were previously plugging in the `StickyTaskAssignor` via the legacy `internal.task.assignor.class` config, you will need to make sure that you are importing the new `org.apache.kafka.streams.processor.assignment.StickTaskAssignor` when you switch over to the new `task.assignor.class` config, which is a version of the `StickyTaskAssignor` that implements the new public `TaskAssignor` interface. For more details, see the public interface section of [KIP-924](https://cwiki.apache.org/confluence/x/PxU0Dw). + +The Processor API now support so-called read-only state stores, added via [KIP-813](https://cwiki.apache.org/confluence/x/q53kCw). These stores don't have a dedicated changelog topic, but use their source topic for fault-tolerance, similar to `KTables` with source-topic optimization enabled. + +To improve detection of leaked state store iterators, we added new store-level metrics to track the number and age of open iterators. The new metrics are `num-open-iterators`, `iterator-duration-avg`, `iterator-duration-max` and `oldest-iterator-open-since-ms`. These metrics are available for all state stores, including RocksDB, in-memory, and custom stores. More details can be found in [KIP-989](https://cwiki.apache.org/confluence/x/9KCzDw). + +# Streams API changes in 3.7.0 + +We added a new method to `KafkaStreams`, namely `KafkaStreams#setStandbyUpdateListener()` in [KIP-988](https://cwiki.apache.org/confluence/x/yqCzDw), in which users can provide their customized implementation of the newly added `StandbyUpdateListener` interface to continuously monitor changes to standby tasks. + +IQv2 supports `RangeQuery` that allows to specify unbounded, bounded, or half-open key-ranges, which return data in unordered (byte[]-lexicographical) order (per partition). [KIP-985](https://cwiki.apache.org/confluence/x/eKCzDw) extends this functionality by adding `.withDescendingKeys()` and `.withAscendingKeys()`to allow user to receive data in descending or ascending order. + +[KIP-992](https://cwiki.apache.org/confluence/x/TYxEE) adds two new query types, namely `TimestampedKeyQuery` and `TimestampedRangeQuery`. Both should be used to query a timestamped key-value store, to retrieve a `ValueAndTimestamp` result. The existing `KeyQuery` and `RangeQuery` are changed to always return the value only for timestamped key-value stores. + +IQv2 adds support for `MultiVersionedKeyQuery` (introduced in [KIP-968](https://cwiki.apache.org/confluence/x/WpSzDw)) that allows retrieving a set of records from a versioned state store for a given key and a specified time range. Users have to use `fromTime(Instant)` and/or `toTime(Instant)` to specify a half or a complete time range. + +IQv2 adds support for `VersionedKeyQuery` (introduced in [KIP-960](https://cwiki.apache.org/confluence/x/qo_zDw)) that allows retrieving a single record from a versioned state store based on its key and timestamp. Users have to use the `asOf(Instant)` method to define a query that returns the record's version for the specified timestamp. To be more precise, the key query returns the record with the greatest timestamp `<= Instant`. + +The non-null key requirements for Kafka Streams join operators were relaxed as part of [KIP-962](https://cwiki.apache.org/confluence/x/f5CzDw). The behavior of the following operators changed. + + * left join KStream-KStream: no longer drop left records with null-key and call ValueJoiner with 'null' for right value. + * outer join KStream-KStream: no longer drop left/right records with null-key and call ValueJoiner with 'null' for right/left value. + * left-foreign-key join KTable-KTable: no longer drop left records with null-foreign-key returned by the ForeignKeyExtractor and call ValueJoiner with 'null' for right value. + * left join KStream-KTable: no longer drop left records with null-key and call ValueJoiner with 'null' for right value. + * left join KStream-GlobalTable: no longer drop records when KeyValueMapper returns 'null' and call ValueJoiner with 'null' for right value. + +Stream-DSL users who want to keep the current behavior can prepend a .filter() operator to the aforementioned operators and filter accordingly. The following snippets illustrate how to keep the old behavior. + + + + //left join KStream-KStream + leftStream + .filter((key, value) -> key != null) + .leftJoin(rightStream, (leftValue, rightValue) -> join(leftValue, rightValue), windows); + + //outer join KStream-KStream + rightStream + .filter((key, value) -> key != null); + leftStream + .filter((key, value) -> key != null) + .outerJoin(rightStream, (leftValue, rightValue) -> join(leftValue, rightValue), windows); + + //left-foreign-key join KTable-KTable + Function<String;, String> foreignKeyExtractor = leftValue -> ... + leftTable + .filter((key, value) -> foreignKeyExtractor.apply(value) != null) + .leftJoin(rightTable, foreignKeyExtractor, (leftValue, rightValue) -> join(leftValue, rightValue), Named.as("left-foreign-key-table-join")); + + //left join KStream-KTable + leftStream + .filter((key, value) -> key != null) + .leftJoin(kTable, (k, leftValue, rightValue) -> join(leftValue, rightValue)); + + //left join KStream-GlobalTable + KeyValueMapper<String;, String, String> keyValueMapper = (key, value) -> ...; + leftStream + .filter((key, value) -> keyValueMapper.apply(key,value) != null) + .leftJoin(globalTable, keyValueMapper, (leftValue, rightValue) -> join(leftValue, rightValue)); + + + +The `default.dsl.store` config was deprecated in favor of the new `dsl.store.suppliers.class` config to allow for custom state store implementations to be configured as the default. If you currently specify `default.dsl.store=ROCKS_DB` or `default.dsl.store=IN_MEMORY` replace those configurations with `dsl.store.suppliers.class=BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers.class` and `dsl.stores.suppliers.class=BuiltInDslStoreSuppliers.InMemoryDslStoreSuppliers.class` respectively + +A new configuration option `balance_subtopology` for `rack.aware.assignment.strategy` was introduced in 3.7 release. For more information, including how it can be enabled and further configured, see the [**Kafka Streams Developer Guide**](/41/streams/developer-guide/config-streams.html#rack-aware-assignment-strategy). + +# Streams API changes in 3.6.0 + +Rack aware task assignment was introduced in [KIP-925](https://cwiki.apache.org/confluence/x/CQ40Dw). Rack aware task assignment can be enabled for `StickyTaskAssignor` or `HighAvailabilityTaskAssignor` to compute task assignments which can minimize cross rack traffic under certain conditions. For more information, including how it can be enabled and further configured, see the [**Kafka Streams Developer Guide**](/41/streams/developer-guide/config-streams.html#rack-aware-assignment-strategy). + +IQv2 supports a `RangeQuery` that allows to specify unbounded, bounded, or half-open key-ranges. Users have to use `withUpperBound(K)`, `withLowerBound(K)`, or `withNoBounds()` to specify half-open or unbounded ranges, but cannot use `withRange(K lower, K upper)` for the same. [KIP-941](https://cwiki.apache.org/confluence/x/_Rk0Dw) closes this gap by allowing to pass in `null` as upper and lower bound (with semantics "no bound") to simplify the usage of the `RangeQuery` class. + +KStreams-to-KTable joins now have an option for adding a grace period. The grace period is enabled on the `Joined` object using with `withGracePeriod()` method. This change was introduced in [KIP-923](https://cwiki.apache.org/confluence/x/lAs0Dw). To use the grace period option in the Stream-Table join the table must be [versioned](/41/streams/developer-guide/dsl-api.html#versioned-state-stores). For more information, including how it can be enabled and further configured, see the [**Kafka Streams Developer Guide**](/41/streams/developer-guide/config-streams.html#rack-aware-assignment-strategy). + +# Streams API changes in 3.5.0 + +A new state store type, versioned key-value stores, was introduced in [KIP-889](https://cwiki.apache.org/confluence/x/AIwODg) and [KIP-914](https://cwiki.apache.org/confluence/x/QorFDg). Rather than storing a single record version (value and timestamp) per key, versioned state stores may store multiple record versions per key. This allows versioned state stores to support timestamped retrieval operations to return the latest record (per key) as of a specified timestamp. For more information, including how to upgrade from a non-versioned key-value store to a versioned store in an existing application, see the [Developer Guide](/41/streams/developer-guide/dsl-api.html#versioned-state-stores). Versioned key-value stores are opt-in only; existing applications will not be affected upon upgrading to 3.5 without explicit code changes. + +In addition to KIP-899, [KIP-914](https://cwiki.apache.org/confluence/x/QorFDg) updates DSL processing semantics if a user opts-in to use the new versioned key-value stores. Using the new versioned key-value stores, DSL processing are able to handle out-of-order data better: For example, late record may be dropped and stream-table joins do a timestamped based lookup into the table. Table aggregations and primary/foreign-key table-table joins are also improved. Note: versioned key-value stores are not supported for global-KTable and don't work with `suppress()`. + +[KIP-904](https://cwiki.apache.org/confluence/x/P5VbDg) improves the implementation of KTable aggregations. In general, an input KTable update triggers a result refinent for two rows; however, prior to KIP-904, if both refinements happen to the same result row, two independent updates to the same row are applied, resulting in spurious itermediate results. KIP-904 allows us to detect this case, and to only apply a single update avoiding spurious intermediate results. + +Error handling is improved via [KIP-399](https://cwiki.apache.org/confluence/x/R4nQBQ). The existing `ProductionExceptionHandler` now also covers serialization errors. + +We added a new Serde type `Boolean` in [KIP-907](https://cwiki.apache.org/confluence/x/pZpbDg) + +[KIP-884](https://cwiki.apache.org/confluence/x/AZfGDQ) adds a new config `default.client.supplier` that allows to use a custom `KafkaClientSupplier` without any code changes. + +# Streams API changes in 3.4.0 + +[KIP-770](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=186878390) deprecates config `cache.max.bytes.buffering` in favor of the newly introduced config `statestore.cache.max.bytes`. To improve monitoring, two new metrics `input-buffer-bytes-total` and `cache-size-bytes-total` were added at the DEBUG level. Note, that the KIP is only partially implemented in the 3.4.0 release, and config `input.buffer.max.bytes` is not available yet. + +[KIP-873](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=211883356) enables you to multicast result records to multiple partition of downstream sink topics and adds functionality for choosing to drop result records without sending. The `Integer StreamPartitioner.partition()` method is deprecated and replaced by the newly added `Optiona≶Set>StreamPartitioner.partitions()` method, which enables returning a set of partitions to send the record to. + +[KIP-862](https://cwiki.apache.org/confluence/x/WSf1D) adds a DSL optimization for stream-stream self-joins. The optimization is enabled via a new option `single.store.self.join` which can be set via existing config `topology.optimization`. If enabled, the DSL will use a different join processor implementation that uses a single RocksDB store instead of two, to avoid unnecessary data duplication for the self-join case. + +[KIP-865](https://cwiki.apache.org/confluence/x/UY9rDQ) updates the Kafka Streams application reset tool’s server parameter name to conform to the other Kafka tooling by deprecating the `--bootstrap-servers` parameter and introducing a new `--bootstrap-server` parameter in its place. + +# Streams API changes in 3.3.0 + +Kafka Streams does not send a "leave group" request when an instance is closed. This behavior implies that a rebalance is delayed until `max.poll.interval.ms` passed. [KIP-812](https://cwiki.apache.org/confluence/x/KZvkCw) introduces `KafkaStreams.close(CloseOptions)` overload, which allows forcing an instance to leave the group immediately. Note: Due to internal limitations, `CloseOptions` only works for static consumer groups at this point (cf. [KAFKA-16514](https://issues.apache.org/jira/browse/KAFKA-16514) for more details and a fix in some future release). + +[KIP-820](https://cwiki.apache.org/confluence/x/yKbkCw) adapts the PAPI type-safety improvement of KIP-478 into the DSL. The existing methods `KStream.transform`, `KStream.flatTransform`, `KStream.transformValues`, and `KStream.flatTransformValues` as well as all overloads of `void KStream.process` are deprecated in favor of the newly added methods + + * `KStream KStream.process(ProcessorSupplier, ...)` + * `KStream KStream.processValues(FixedKeyProcessorSupplier, ...)` + +Both new methods have multiple overloads and return a `KStream` instead of `void` as the deprecated `process()` methods did. In addition, `FixedKeyProcessor`, `FixedKeyRecord`, `FixedKeyProcessorContext`, and `ContextualFixedKeyProcessor` are introduced to guard against disallowed key modification inside `processValues()`. Furthermore, `ProcessingContext` is added for a better interface hierarchy. **CAUTION:** The newly added `KStream.processValues()` method introduced a regression bug ([KAFKA-19668](https://issues.apache.org/jira/browse/KAFKA-19668)). If you have "merge repartition topics" optimization enabled, it is not safe to migrate from `transformValues()` to `processValues()` in 3.3.0 release. The bug is only fixed with Kafka Streams 4.0.1, 4.1.1, and 4.2.0. For more details, please refer to the [migration guide](/41/streams/developer-guide/dsl-api.html#transformers-removal-and-migration-to-processors). + +Emitting a windowed aggregation result only after a window is closed is currently supported via the `suppress()` operator. However, `suppress()` uses an in-memory implementation and does not support RocksDB. To close this gap, [KIP-825](https://cwiki.apache.org/confluence/x/n7fkCw) introduces "emit strategies", which are built into the aggregation operator directly to use the already existing RocksDB store. `TimeWindowedKStream.emitStrategy(EmitStrategy)` and `SessionWindowedKStream.emitStrategy(EmitStrategy)` allow picking between "emit on window update" (default) and "emit on window close" strategies. Additionally, a few new emit metrics are added, as well as a necessary new method, `SessionStore.findSessions(long, long)`. + +[KIP-834](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=211882832) allows pausing and resuming a Kafka Streams instance. Pausing implies that processing input records and executing punctuations will be skipped; Kafka Streams will continue to poll to maintain its group membership and may commit offsets. In addition to the new methods `KafkaStreams.pause()` and `KafkaStreams.resume()`, it is also supported to check if an instance is paused via the `KafkaStreams.isPaused()` method. + +To improve monitoring of Kafka Streams applications, [KIP-846](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=211886093) adds four new metrics `bytes-consumed-total`, `records-consumed-total`, `bytes-produced-total`, and `records-produced-total` within a new **topic level** scope. The metrics are collected at INFO level for source and sink nodes, respectively. + +# Streams API changes in 3.2.0 + +RocksDB offers many metrics which are critical to monitor and tune its performance. Kafka Streams started to make RocksDB metrics accessible like any other Kafka metric via [KIP-471](https://cwiki.apache.org/confluence/x/A5LiBg) in 2.4.0 release. However, the KIP was only partially implemented, and is now completed with the 3.2.0 release. For a full list of available RocksDB metrics, please consult the [monitoring documentation](/41/#kafka_streams_client_monitoring). + +Kafka Streams ships with RocksDB and in-memory store implementations and users can pick which one to use. However, for the DSL, the choice is a per-operator one, making it cumbersome to switch from the default RocksDB store to in-memory store for all operators, especially for larger topologies. [KIP-591](https://cwiki.apache.org/confluence/x/eCvcC) adds a new config `default.dsl.store` that enables setting the default store for all DSL operators globally. Note that it is required to pass `TopologyConfig` to the `StreamsBuilder` constructor to make use of this new config. + +For multi-AZ deployments, it is desired to assign StandbyTasks to a KafkaStreams instance running in a different AZ than the corresponding active StreamTask. [KIP-708](https://cwiki.apache.org/confluence/x/UQ5RCg) enables configuring Kafka Streams instances with a rack-aware StandbyTask assignment strategy, by using the new added configs `rack.aware.assignment.tags` and corresponding `client.tag.`. + +[KIP-791](https://cwiki.apache.org/confluence/x/I5BnCw) adds a new method `Optional StateStoreContext.recordMetadata()` to expose record metadata. This helps for example to provide read-your-writes consistency guarantees in interactive queries. + +[Interactive Queries](/streams/developer-guide/interactive-queries.html) allow users to tap into the operational state of Kafka Streams processor nodes. The existing API is tightly coupled with the actual state store interfaces and thus the internal implementation of state store. To break up this tight coupling and allow for building more advanced IQ features, [KIP-796](https://cwiki.apache.org/confluence/x/34xnCw) introduces a completely new IQv2 API, via `StateQueryRequest` and `StateQueryResult` classes, as well as `Query` and `QueryResult` interfaces (plus additional helper classes). In addition, multiple built-in query types were added: `KeyQuery` for key lookups and `RangeQuery` (via [KIP-805](https://cwiki.apache.org/confluence/x/85OqCw)) for key-range queries on key-value stores, as well as `WindowKeyQuery` and `WindowRangeQuery` (via [KIP-806](https://cwiki.apache.org/confluence/x/LJaqCw)) for key and range lookup into windowed stores. + +The Kafka Streams DSL may insert so-called repartition topics for certain DSL operators to ensure correct partitioning of data. These topics are configured with infinite retention time, and Kafka Streams purges old data explicitly via "delete record" requests, when commiting input topic offsets. [KIP-811](https://cwiki.apache.org/confluence/x/JY-kCw) adds a new config `repartition.purge.interval.ms` allowing you to configure the purge interval independently of the commit interval. + +# Streams API changes in 3.1.0 + +The semantics of left/outer stream-stream join got improved via [KIP-633](https://cwiki.apache.org/confluence/x/Ho2NCg). Previously, left-/outer stream-stream join might have emitted so-call spurious left/outer results, due to an eager-emit strategy. The implementation was changed to emit left/outer join result records only after the join window is closed. The old API to specify the join window, i.e., `JoinWindows.of()` that enables the eager-emit strategy, was deprecated in favor of a `JoinWindows.ofTimeDifferenceAndGrace()` and `JoinWindows.ofTimeDifferencWithNoGrace()`. The new semantics are only enabled if you use the new join window builders. +Additionally, KIP-633 makes setting a grace period also mandatory for windowed aggregations, i.e., for `TimeWindows` (hopping/tumbling), `SessionWindows`, and `SlidingWindows`. The corresponding builder methods `.of(...)` were deprecated in favor of the new `.ofTimeDifferenceAndGrace()` and `.ofTimeDifferencWithNoGrace()` methods. + +[KIP-761](https://cwiki.apache.org/confluence/x/vAUBCw) adds new metrics that allow to track blocking times on the underlying consumer and producer clients. Check out the section on [Kafka Streams metrics](/#kafka_streams_monitoring) for more details. + +[Interactive Queries](/streams/developer-guide/interactive-queries.html) were improved via [KIP-763](https://cwiki.apache.org/confluence/x/jAoBCw) [KIP-766](https://cwiki.apache.org/confluence/x/tIIjCw). Range queries now accept `null` as lower/upper key-range bound to indicate an open-ended lower/upper bound. + +Foreign-key table-table joins now support custom partitioners via [KIP-775](https://cwiki.apache.org/confluence/x/-QhACw). Previously, if an input table was partitioned by a non-default partitioner, joining records might fail. With KIP-775 you now can pass a custom `StreamPartitioner` into the join using the newly added `TableJoined` object. + +# Streams API changes in 3.0.0 + +We improved the semantics of [task idling (`max.task.idle.ms`)](/streams/developer-guide/config-streams.html#max-task-idle-ms). Now Streams provides stronger in-order join and merge processing semantics. Streams's new default pauses processing on tasks with multiple input partitions when one of the partitions has no data buffered locally but has a non-zero lag. In other words, Streams will wait to fetch records that are already available on the broker. This results in improved join semantics, since it allows Streams to interleave the two input partitions in timestamp order instead of just processing whichever partition happens to be buffered. There is an option to disable this new behavior, and there is also an option to make Streams wait even longer for new records to be _produced_ to the input partitions, which you can use to get stronger time semantics when you know some of your producers may be slow. See the [config reference](/streams/developer-guide/config-streams.html#max-task-idle-ms) for more information, and [KIP-695](https://cwiki.apache.org/confluence/x/JSXZCQ) for the larger context of this change. + +Interactive Queries may throw new exceptions for different errors: + + * `UnknownStateStoreException`: If the specified store name does not exist in the topology, an `UnknownStateStoreException` will be thrown instead of the former `InvalidStateStoreException`. + * `StreamsNotStartedException`: If Streams state is `CREATED`, a `StreamsNotStartedException` will be thrown. + * `InvalidStateStorePartitionException`: If the specified partition does not exist, a `InvalidStateStorePartitionException` will be thrown. + + + +See [KIP-216](https://cwiki.apache.org/confluence/x/0JpzB) for more information. + +We deprecated the StreamsConfig `processing.guarantee` configuration value `"exactly_once"` (for EOS version 1) in favor of the improved EOS version 2, formerly configured via `"exactly_once_beta`. To avoid confusion about the term "beta" in the config name and highlight the production-readiness of EOS version 2, we have also renamed "eos-beta" to "eos-v2" and deprecated the configuration value `"exactly_once_beta"`, replacing it with a new configuration value `"exactly_once_v2"` Users of exactly-once semantics should plan to migrate to the eos-v2 config and prepare for the removal of the deprecated configs in 4.0 or after at least a year from the release of 3.0, whichever comes last. Note that eos-v2 requires broker version 2.5 or higher, like eos-beta, so users should begin to upgrade their kafka cluster if necessary. See [KIP-732](https://cwiki.apache.org/confluence/x/zJONCg) for more details. + +We removed the default implementation of `RocksDBConfigSetter#close()`. + +We dropped the default 24 hours grace period for windowed operations such as Window or Session aggregates, or stream-stream joins. This period determines how long after a window ends any out-of-order records will still be processed. Records coming in after the grace period has elapsed are considered late and will be dropped. But in operators such as suppression, a large grace period has the drawback of incurring an equally large output latency. The current API made it all too easy to miss the grace period config completely, leading you to wonder why your application seems to produce no output -- it actually is, but not for 24 hours. + +To prevent accidentally or unknowingly falling back to the default 24hr grace period, we deprecated all of the existing static constructors for the `Windows` classes (such as `TimeWindows#of`). These are replaced by new static constructors of two flavors: `#ofSizeAndGrace` and `#ofSizeWithNoGrace` (these are for the `TimeWindows` class; analogous APIs exist for the `JoinWindows`, `SessionWindows`, and SlidingWindows classes). With these new APIs you are forced to set the grace period explicitly, or else consciously choose to opt out by selecting the `WithNoGrace` flavor which sets it to 0 for situations where you really don't care about the grace period, for example during testing or when playing around with Kafka Streams for the first time. Note that using the new APIs for the `JoinWindows` class will also enable a fix for spurious left/outer join results, as described in the following paragraph. For more details on the grace period and new static constructors, see [KIP-633](https://cwiki.apache.org/confluence/x/Ho2NCg) + +Additionally, in older versions Kafka Streams emitted stream-stream left/outer join results eagerly. This behavior may lead to spurious left/outer join result records. In this release, we changed the behavior to avoid spurious results and left/outer join result are only emitted after the join window is closed, i.e., after the grace period elapsed. To maintain backward compatibility, the old API `JoinWindows#of(timeDifference)` preserves the old eager-emit behavior and only the new APIs `JoinWindows#ofTimeDifferenceAndGrace()` and `JoinsWindows#ofTimeDifferenceNoGrace` enable the new behavior. Check out [KAFKA-10847](https://issues.apache.org/jira/browse/KAFKA-10847) for more information. + +The public `topicGroupId` and `partition` fields on TaskId have been deprecated and replaced with getters. Please migrate to using the new `TaskId.subtopology()` (which replaces `topicGroupId`) and `TaskId.partition()` APIs instead. Also, the `TaskId#readFrom` and `TaskId#writeTo` methods have been deprecated and will be removed, as they were never intended for public use. We have also deprecated the `org.apache.kafka.streams.processor.TaskMetadata` class and introduced a new interface `org.apache.kafka.streams.TaskMetadata` to be used instead. This change was introduced to better reflect the fact that `TaskMetadata` was not meant to be instantiated outside of Kafka codebase. Please note that the new `TaskMetadata` offers APIs that better represent the task id as an actual `TaskId` object instead of a String. Please migrate to the new `org.apache.kafka.streams.TaskMetadata` which offers these better methods, for example, by using the new `ThreadMetadata#activeTasks` and `ThreadMetadata#standbyTasks`. `org.apache.kafka.streams.processor.ThreadMetadata` class is also now deprecated and the newly introduced interface `org.apache.kafka.streams.ThreadMetadata` is to be used instead. In this new `ThreadMetadata` interface, any reference to the deprecated `TaskMetadata` is replaced by the new interface. Finally, also `org.apache.kafka.streams.state.StreamsMetadata` has been deprecated. Please migrate to the new `org.apache.kafka.streams.StreamsMetadata`. We have deprecated several methods under `org.apache.kafka.streams.KafkaStreams` that returned the aforementioned deprecated classes: + + * Users of `KafkaStreams#allMetadata` are meant to migrate to the new `KafkaStreams#metadataForAllStreamsClients`. + * Users of `KafkaStreams#allMetadataForStore(String)` are meant to migrate to the new `KafkaStreams#streamsMetadataForStore(String)`. + * Users of `KafkaStreams#localThreadsMetadata` are meant to migrate to the new `KafkaStreams#metadataForLocalThreads`. + + + +See [KIP-740](https://cwiki.apache.org/confluence/x/vYTOCg) and [KIP-744](https://cwiki.apache.org/confluence/x/XIrOCg) for more details. + +We removed the following deprecated APIs: + + * `--zookeeper` flag of the application reset tool: deprecated in Kafka 1.0.0 ([KIP-198](https://cwiki.apache.org/confluence/x/6J1jB)). + * `--execute` flag of the application reset tool: deprecated in Kafka 1.1.0 ([KIP-171](https://cwiki.apache.org/confluence/x/ApI7B)). + * `StreamsBuilder#addGlobalStore` (one overload): deprecated in Kafka 1.1.0 ([KIP-233](https://cwiki.apache.org/confluence/x/vKpzB)). + * `ProcessorContext#forward` (some overloads): deprecated in Kafka 2.0.0 ([KIP-251](https://cwiki.apache.org/confluence/x/Ih6HB)). + * `WindowBytesStoreSupplier#segments`: deprecated in Kafka 2.1.0 ([KIP-319](https://cwiki.apache.org/confluence/x/mQU0BQ)). + * `segments, until, maintainMs` on `TimeWindows`, `JoinWindows`, and `SessionWindows`: deprecated in Kafka 2.1.0 ([KIP-328](https://cwiki.apache.org/confluence/x/sQU0BQ)). + * Overloaded `JoinWindows#of, before, after`, `SessionWindows#with`, `TimeWindows#of, advanceBy`, `UnlimitedWindows#startOn` and `KafkaStreams#close` with `long` typed parameters: deprecated in Kafka 2.1.0 ([KIP-358](https://cwiki.apache.org/confluence/x/IBNPBQ)). + * Overloaded `KStream#groupBy, groupByKey` and `KTable#groupBy` with `Serialized` parameter: deprecated in Kafka 2.1.0 ([KIP-372](https://cwiki.apache.org/confluence/x/mgJ1BQ)). + * `Joined#named, name`: deprecated in Kafka 2.3.0 ([KIP-307](https://cwiki.apache.org/confluence/x/xikYBQ)). + * `TopologyTestDriver#pipeInput, readOutput`, `OutputVerifier` and `ConsumerRecordFactory` classes ([KIP-470](https://cwiki.apache.org/confluence/x/tI-iBg)). + * `KafkaClientSupplier#getAdminClient`: deprecated in Kafka 2.4.0 ([KIP-476](https://cwiki.apache.org/confluence/x/V9XiBg)). + * Overloaded `KStream#join, leftJoin, outerJoin` with `KStream` and `Joined` parameters: deprecated in Kafka 2.4.0 ([KIP-479](https://cwiki.apache.org/confluence/x/EBEgBw)). + * `WindowStore#put(K key, V value)`: deprecated in Kafka 2.4.0 ([KIP-474](https://cwiki.apache.org/confluence/x/kcviBg)). + * `UsePreviousTimeOnInvalidTimestamp`: deprecated in Kafka 2.5.0 as renamed to `UsePartitionTimeOnInvalidTimestamp` ([KIP-530](https://cwiki.apache.org/confluence/x/BxXABw)). + * Overloaded `KafkaStreams#metadataForKey`: deprecated in Kafka 2.5.0 ([KIP-535](https://cwiki.apache.org/confluence/x/Xg-jBw)). + * Overloaded `KafkaStreams#store`: deprecated in Kafka 2.5.0 ([KIP-562](https://cwiki.apache.org/confluence/x/QYyvC)). + + + +The following dependencies were removed from Kafka Streams: + + * Connect-json: As of Kafka Streams no longer has a compile time dependency on "connect:json" module ([KAFKA-5146](https://issues.apache.org/jira/browse/KAFKA-5146)). Projects that were relying on this transitive dependency will have to explicitly declare it. + + + +The default value for configuration parameter `replication.factor` was changed to `-1` (meaning: use broker default replication factor). The `replication.factor` value of `-1` requires broker version 2.4 or newer. + +The new serde type was introduced `ListSerde`: + + * Added class `ListSerde` to (de)serialize `List`-based objects + * Introduced `ListSerializer` and `ListDeserializer` to power the new functionality + + + +# Streams API changes in 2.8.0 + +We extended `StreamJoined` to include the options `withLoggingEnabled()` and `withLoggingDisabled()` in [KIP-689](https://cwiki.apache.org/confluence/x/DyrZCQ). + +We added two new methods to `KafkaStreams`, namely `KafkaStreams#addStreamThread()` and `KafkaStreams#removeStreamThread()` in [KIP-663](https://cwiki.apache.org/confluence/x/FDd4CQ). These methods have enabled adding and removing StreamThreads to a running KafkaStreams client. + +We deprecated `KafkaStreams#setUncaughtExceptionHandler(final Thread.UncaughtExceptionHandler uncaughtExceptionHandler)` in favor of `KafkaStreams#setUncaughtExceptionHandler(final StreamsUncaughtExceptionHandler streamsUncaughtExceptionHandler)` in [KIP-671](https://cwiki.apache.org/confluence/x/lkN4CQ). The default handler will close the Kafka Streams client and the client will transit to state ERROR. If you implement a custom handler, the new interface allows you to return a `StreamThreadExceptionResponse`, which will determine how the application will respond to a stream thread failure. + +Changes in [KIP-663](https://cwiki.apache.org/confluence/x/FDd4CQ) necessitated the KafkaStreams client state machine to update, which was done in [KIP-696](https://cwiki.apache.org/confluence/x/lCvZCQ). The ERROR state is now terminal with PENDING_ERROR being a transitional state where the resources are closing. The ERROR state indicates that there is something wrong and the Kafka Streams client should not be blindly restarted without classifying the error that caused the thread to fail. If the error is of a type that you would like to retry, you should have the `StreamsUncaughtExceptionHandler` return `REPLACE_THREAD`. When all stream threads are dead there is no automatic transition to ERROR as a new stream thread can be added. + +The `TimeWindowedDeserializer` constructor `TimeWindowedDeserializer(final Deserializer inner)` was deprecated to encourage users to properly set their window size through `TimeWindowedDeserializer(final Deserializer inner, Long windowSize)`. An additional streams config, `window.size.ms`, was added for users that cannot set the window size through the constructor, such as when using the console consumer. [KIP-659](https://cwiki.apache.org/confluence/x/aDR4CQ) has more details. + +To simplify testing, two new constructors that don't require a `Properties` parameter have been added to the `TopologyTestDriver` class. If `Properties` are passed into the constructor, it is no longer required to set mandatory configuration parameters (cf. [KIP-680](https://cwiki.apache.org/confluence/x/MB3ZCQ)). + +We added the `prefixScan()` method to interface `ReadOnlyKeyValueStore`. The new `prefixScan()` allows fetching all values whose keys start with a given prefix. See [KIP-614](https://cwiki.apache.org/confluence/x/qhkRCQ) for more details. + +Kafka Streams is now handling `TimeoutException` thrown by the consumer, producer, and admin client. If a timeout occurs on a task, Kafka Streams moves to the next task and retries to make progress on the failed task in the next iteration. To bound how long Kafka Streams retries a task, you can set `task.timeout.ms` (default is 5 minutes). If a task does not make progress within the specified task timeout, which is tracked on a per-task basis, Kafka Streams throws a `TimeoutException` (cf. [KIP-572](https://cwiki.apache.org/confluence/x/5ArcC)). + +We changed the default value of `default.key.serde` and `default.value.serde` to be `null` instead of `ByteArraySerde`. Users will now see a `ConfigException` if their serdes are not correctly configured through those configs or passed in explicitly. See [KIP-741](https://cwiki.apache.org/confluence/x/bIbOCg) for more details. + +# Streams API changes in 2.7.0 + +In `KeyQueryMetadata` we deprecated `getActiveHost()`, `getStandbyHosts()` as well as `getPartition()` and replaced them with `activeHost()`, `standbyHosts()` and `partition()` respectively. `KeyQueryMetadata` was introduced in Kafka Streams 2.5 release with getter methods having prefix `get`. The intend of this change is to bring the method names to Kafka custom to not use the `get` prefix for getter methods. The old methods are deprecated and is not effected. (Cf. [KIP-648](https://cwiki.apache.org/confluence/x/vyd4CQ).) + +The `StreamsConfig` variable for configuration parameter `"topology.optimization"` is renamed from `TOPOLOGY_OPTIMIZATION` to `TOPOLOGY_OPTIMIZATION_CONFIG`. The old variable is deprecated. Note, that the parameter name itself is not affected. (Cf. [KIP-626](https://cwiki.apache.org/confluence/x/gBB4CQ).) + +The configuration parameter `retries` is deprecated in favor of the new parameter `task.timeout.ms`. Kafka Streams' runtime ignores `retries` if set, however, it would still forward the parameter to its internal clients. + +We added `SlidingWindows` as an option for `windowedBy()` windowed aggregations as described in [KIP-450](https://cwiki.apache.org/confluence/x/nAqZBg). Sliding windows are fixed-time and data-aligned windows that allow for flexible and efficient windowed aggregations. + +The end-to-end latency metrics introduced in 2.6 have been expanded to include store-level metrics. The new store-level metrics are recorded at the TRACE level, a new metrics recording level. Enabling TRACE level metrics will automatically turn on all higher levels, ie INFO and DEBUG. See [KIP-613](https://cwiki.apache.org/confluence/x/gBkRCQ) for more information. + +# Streams API changes in 2.6.0 + +We added a new processing mode, EOS version 2, that improves application scalability using exactly-once guarantees (via [KIP-447](https://cwiki.apache.org/confluence/x/vhYlBg)). You can enable this new feature by setting the configuration parameter `processing.guarantee` to the new value `"exactly_once_beta"`. Note that you need brokers with version 2.5 or newer to use this feature. + +For more highly available stateful applications, we've modified the task assignment algorithm to delay the movement of stateful active tasks to instances that aren't yet caught up with that task's state. Instead, to migrate a task from one instance to another (eg when scaling out), Streams will assign a warmup replica to the target instance so it can begin restoring the state while the active task stays available on an instance that already had the task. The instances warming up tasks will communicate their progress to the group so that, once ready, Streams can move active tasks to their new owners in the background. Check out [KIP-441](https://cwiki.apache.org/confluence/x/0i4lBg) for full details, including several new configs for control over this new feature. + +New end-to-end latency metrics have been added. These task-level metrics will be logged at the INFO level and report the min and max end-to-end latency of a record at the beginning/source node(s) and end/terminal node(s) of a task. See [KIP-613](https://cwiki.apache.org/confluence/x/gBkRCQ) for more information. + +As of 2.6.0 Kafka Streams deprecates `KStream.through()` in favor of the new `KStream.repartition()` operator (as per [KIP-221](https://cwiki.apache.org/confluence/x/i55zB)). `KStream.repartition()` is similar to `KStream.through()`, however Kafka Streams will manage the topic for you. If you need to write into and read back from a topic that you manage, you can fall back to use `KStream.to()` in combination with `StreamsBuilder#stream()`. Please refer to the [developer guide](/41/streams/developer-guide/dsl-api.html) for more details about `KStream.repartition()`. + +The usability of `StateStore`s within the Processor API is improved: `ProcessorSupplier` and `TransformerSupplier` now extend `ConnectedStoreProvider` as per [KIP-401](https://cwiki.apache.org/confluence/x/XI3QBQ), enabling a user to provide `StateStore`s with alongside Processor/Transformer logic so that they are automatically added and connected to the processor. + +We added a `--force` option in StreamsResetter to force remove left-over members on broker side when long session time out was configured as per [KIP-571](https://cwiki.apache.org/confluence/x/8I7JC). + +We added `Suppressed.withLoggingDisabled()` and `Suppressed.withLoggingEnabled(config)` methods to allow disabling or configuring of the changelog topic and allows for configuration of the changelog topic as per [KIP-446](https://cwiki.apache.org/confluence/x/RBiGBg). + +# Streams API changes in 2.5.0 + +We add a new `cogroup()` operator (via [KIP-150](https://cwiki.apache.org/confluence/x/YxcjB)) that allows to aggregate multiple streams in a single operation. Cogrouped streams can also be windowed before they are aggregated. Please refer to the [developer guide](/41/streams/developer-guide/dsl-api.html) for more details. + +We added a new `KStream.toTable()` API to translate an input event stream into a changelog stream as per [KIP-523](https://cwiki.apache.org/confluence/x/IBKrBw). + +We added a new Serde type `Void` in [KIP-527](https://cwiki.apache.org/confluence/x/3QvABw) to represent null keys or null values from input topic. + +Deprecated `UsePreviousTimeOnInvalidTimestamp` and replaced it with `UsePartitionTimeOnInvalidTimeStamp` as per [KIP-530](https://cwiki.apache.org/confluence/x/BxXABw). + +Deprecated `KafkaStreams.store(String, QueryableStoreType)` and replaced it with `KafkaStreams.store(StoreQueryParameters)` to allow querying for a store with variety of parameters, including querying a specific task and stale stores, as per [KIP-562](https://cwiki.apache.org/confluence/x/QYyvC) and [KIP-535](https://cwiki.apache.org/confluence/x/Xg-jBw) respectively. + +# Streams API changes in 2.4.0 + +As of 2.4.0 Kafka Streams offers a KTable-KTable foreign-key join (as per [KIP-213](https://cwiki.apache.org/confluence/x/pJlzB)). This joiner allows for records to be joined between two KTables with different keys. Both [INNER and LEFT foreign-key joins](/41/streams/developer-guide/dsl-api.html#ktable-ktable-fk-join) are supported. + +In the 2.4 release, you now can name all operators in a Kafka Streams DSL topology via [KIP-307](https://cwiki.apache.org/confluence/x/xikYBQ). Giving your operators meaningful names makes it easier to understand the topology description (`Topology#describe()#toString()`) and understand the full context of what your Kafka Streams application is doing. +There are new overloads on most `KStream` and `KTable` methods that accept a `Named` object. Typically you'll provide a name for the DSL operation by using `Named.as("my operator name")`. Naming of repartition topics for aggregation operations will still use `Grouped` and join operations will use either `Joined` or the new `StreamJoined` object. + +Before the 2.4.0 version of Kafka Streams, users of the DSL could not name the state stores involved in a stream-stream join. If users changed their topology and added a operator before the join, the internal names of the state stores would shift, requiring an application reset when redeploying. In the 2.4.0 release, Kafka Streams adds the `StreamJoined` class, which gives users the ability to name the join processor, repartition topic(s) (if a repartition is required), and the state stores involved in the join. Also, by naming the state stores, the changelog topics backing the state stores are named as well. It's important to note that naming the stores **will not** make them queryable via Interactive Queries. +Another feature delivered by `StreamJoined` is that you can now configure the type of state store used in the join. You can elect to use in-memory stores or custom state stores for a stream-stream join. Note that the provided stores will not be available for querying via Interactive Queries. With the addition of `StreamJoined`, stream-stream join operations using `Joined` have been deprecated. Please switch over to stream-stream join methods using the new overloaded methods. You can get more details from [KIP-479](https://cwiki.apache.org/confluence/x/EBEgBw). + +With the introduction of incremental cooperative rebalancing, Streams no longer requires all tasks be revoked at the beginning of a rebalance. Instead, at the completion of the rebalance only those tasks which are to be migrated to another consumer for overall load balance will need to be closed and revoked. This changes the semantics of the `StateListener` a bit, as it will not necessarily transition to `REBALANCING` at the beginning of a rebalance anymore. Note that this means IQ will now be available at all times except during state restoration, including while a rebalance is in progress. If restoration is occurring when a rebalance begins, we will continue to actively restore the state stores and/or process standby tasks during a cooperative rebalance. Note that with this new rebalancing protocol, you may sometimes see a rebalance be followed by a second short rebalance that ensures all tasks are safely distributed. For details on please see [KIP-429](https://cwiki.apache.org/confluence/x/vAclBg). + +The 2.4.0 release contains newly added and reworked metrics. [KIP-444](https://cwiki.apache.org/confluence/x/CiiGBg) adds new _client level_ (i.e., `KafkaStreams` instance level) metrics to the existing thread-level, task-level, and processor-/state-store-level metrics. For a full list of available client level metrics, see the [KafkaStreams monitoring](/41/#kafka_streams_client_monitoring) section in the operations guide. +Furthermore, RocksDB metrics are exposed via [KIP-471](https://cwiki.apache.org/confluence/x/A5LiBg). For a full list of available RocksDB metrics, see the [RocksDB monitoring](/41/#kafka_streams_rocksdb_monitoring) section in the operations guide. + +Kafka Streams `test-utils` got improved via [KIP-470](https://cwiki.apache.org/confluence/x/tI-iBg) to simplify the process of using `TopologyTestDriver` to test your application code. We deprecated `ConsumerRecordFactory`, `TopologyTestDriver#pipeInput()`, `OutputVerifier`, as well as `TopologyTestDriver#readOutput()` and replace them with `TestInputTopic` and `TestOutputTopic`, respectively. We also introduced a new class `TestRecord` that simplifies assertion code. For full details see the [Testing section](/41/streams/developer-guide/testing.html) in the developer guide. + +In 2.4.0, we deprecated `WindowStore#put(K key, V value)` that should never be used. Instead the existing `WindowStore#put(K key, V value, long windowStartTimestamp)` should be used ([KIP-474](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=115526545)). + +Furthermore, the `PartitionGrouper` interface and its corresponding configuration parameter `partition.grouper` were deprecated ([KIP-528](https://cwiki.apache.org/confluence/x/BwzABw)) and will be removed in the next major release ([KAFKA-7785](https://issues.apache.org/jira/browse/KAFKA-7785). Hence, this feature won't be supported in the future any longer and you need to updated your code accordingly. If you use a custom `PartitionGrouper` and stop to use it, the created tasks might change. Hence, you will need to reset your application to upgrade it. + +# Streams API changes in 2.3.0 + +Version 2.3.0 adds the Suppress operator to the `kafka-streams-scala` Ktable API. + +As of 2.3.0 Streams now offers an in-memory version of the window ([KIP-428](https://cwiki.apache.org/confluence/x/6AQlBg)) and the session ([KIP-445](https://cwiki.apache.org/confluence/x/DiqGBg)) store, in addition to the persistent ones based on RocksDB. The new public interfaces `inMemoryWindowStore()` and `inMemorySessionStore()` are added to `Stores` and provide the built-in in-memory window or session store. + +As of 2.3.0 we've updated how to turn on optimizations. Now to enable optimizations, you need to do two things. First add this line to your properties `properties.setProperty(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.OPTIMIZE);`, as you have done before. Second, when constructing your `KafkaStreams` instance, you'll need to pass your configuration properties when building your topology by using the overloaded `StreamsBuilder.build(Properties)` method. For example `KafkaStreams myStream = new KafkaStreams(streamsBuilder.build(properties), properties)`. + +In 2.3.0 we have added default implementation to `close()` and `configure()` for `Serializer`, `Deserializer` and `Serde` so that they can be implemented by lambda expression. For more details please read [KIP-331](https://cwiki.apache.org/confluence/x/fgw0BQ). + +To improve operator semantics, new store types are added that allow storing an additional timestamp per key-value pair or window. Some DSL operators (for example KTables) are using those new stores. Hence, you can now retrieve the last update timestamp via Interactive Queries if you specify `TimestampedKeyValueStoreType` or `TimestampedWindowStoreType` as your `QueryableStoreType`. While this change is mainly transparent, there are some corner cases that may require code changes: **Caution: If you receive an untyped store and use a cast, you might need to update your code to cast to the correct type. Otherwise, you might get an exception similar to`java.lang.ClassCastException: class org.apache.kafka.streams.state.ValueAndTimestamp cannot be cast to class YOUR-VALUE-TYPE` upon getting a value from the store.** Additionally, `TopologyTestDriver#getStateStore()` only returns non-built-in stores and throws an exception if a built-in store is accessed. For more details please read [KIP-258](https://cwiki.apache.org/confluence/x/0j6HB). + +To improve type safety, a new operator `KStream#flatTransformValues` is added. For more details please read [KIP-313](https://cwiki.apache.org/confluence/x/bUgYBQ). + +Kafka Streams used to set the configuration parameter `max.poll.interval.ms` to `Integer.MAX_VALUE`. This default value is removed and Kafka Streams uses the consumer default value now. For more details please read [KIP-442](https://cwiki.apache.org/confluence/x/1COGBg). + +Default configuration for repartition topic was changed: The segment size for index files (`segment.index.bytes`) is no longer 50MB, but uses the cluster default. Similarly, the configuration `segment.ms` in no longer 10 minutes, but uses the cluster default configuration. Lastly, the retention period (`retention.ms`) is changed from `Long.MAX_VALUE` to `-1` (infinite). For more details please read [KIP-443](https://cwiki.apache.org/confluence/x/4iOGBg). + +To avoid memory leaks, `RocksDBConfigSetter` has a new `close()` method that is called on shutdown. Users should implement this method to release any memory used by RocksDB config objects, by closing those objects. For more details please read [KIP-453](https://cwiki.apache.org/confluence/x/QhaZBg). + +RocksDB dependency was updated to version `5.18.3`. The new version allows to specify more RocksDB configurations, including `WriteBufferManager` which helps to limit RocksDB off-heap memory usage. For more details please read [KAFKA-8215](https://issues.apache.org/jira/browse/KAFKA-8215). + +# Streams API changes in 2.2.0 + +We've simplified the `KafkaStreams#state` transition diagram during the starting up phase a bit in 2.2.0: in older versions the state will transit from `CREATED` to `RUNNING`, and then to `REBALANCING` to get the first stream task assignment, and then back to `RUNNING`; starting in 2.2.0 it will transit from `CREATED` directly to `REBALANCING` and then to `RUNNING`. If you have registered a `StateListener` that captures state transition events, you may need to adjust your listener implementation accordingly for this simplification (in practice, your listener logic should be very unlikely to be affected at all). + +In `WindowedSerdes`, we've added a new static constructor to return a `TimeWindowSerde` with configurable window size. This is to help users to construct time window serdes to read directly from a time-windowed store's changelog. More details can be found in [KIP-393](https://cwiki.apache.org/confluence/x/WYTQBQ). + +In 2.2.0 we have extended a few public interfaces including `KafkaStreams` to extend `AutoCloseable` so that they can be used in a try-with-resource statement. For a full list of public interfaces that get impacted please read [KIP-376](https://cwiki.apache.org/confluence/x/-AeQBQ). + +# Streams API changes in 2.1.0 + +We updated `TopologyDescription` API to allow for better runtime checking. Users are encouraged to use `#topicSet()` and `#topicPattern()` accordingly on `TopologyDescription.Source` nodes, instead of using `#topics()`, which has since been deprecated. Similarly, use `#topic()` and `#topicNameExtractor()` to get descriptions of `TopologyDescription.Sink` nodes. For more details, see [KIP-321](https://cwiki.apache.org/confluence/x/NQU0BQ). + +We've added a new class `Grouped` and deprecated `Serialized`. The intent of adding `Grouped` is the ability to name repartition topics created when performing aggregation operations. Users can name the potential repartition topic using the `Grouped#as()` method which takes a `String` and is used as part of the repartition topic name. The resulting repartition topic name will still follow the pattern of `${application-id}->name<-repartition`. The `Grouped` class is now favored over `Serialized` in `KStream#groupByKey()`, `KStream#groupBy()`, and `KTable#groupBy()`. Note that Kafka Streams does not automatically create repartition topics for aggregation operations. Additionally, we've updated the `Joined` class with a new method `Joined#withName` enabling users to name any repartition topics required for performing Stream/Stream or Stream/Table join. For more details repartition topic naming, see [KIP-372](https://cwiki.apache.org/confluence/x/mgJ1BQ). As a result we've updated the Kafka Streams Scala API and removed the `Serialized` class in favor of adding `Grouped`. If you just rely on the implicit `Serialized`, you just need to recompile; if you pass in `Serialized` explicitly, sorry you'll have to make code changes. + +We've added a new config named `max.task.idle.ms` to allow users specify how to handle out-of-order data within a task that may be processing multiple topic-partitions (see [Out-of-Order Handling](/41/streams/core-concepts.html#streams_out_of_ordering) section for more details). The default value is set to `0`, to favor minimized latency over synchronization between multiple input streams from topic-partitions. If users would like to wait for longer time when some of the topic-partitions do not have data available to process and hence cannot determine its corresponding stream time, they can override this config to a larger value. + +We've added the missing `SessionBytesStoreSupplier#retentionPeriod()` to be consistent with the `WindowBytesStoreSupplier` which allows users to get the specified retention period for session-windowed stores. We've also added the missing `StoreBuilder#withCachingDisabled()` to allow users to turn off caching for their customized stores. + +We added a new serde for UUIDs (`Serdes.UUIDSerde`) that you can use via `Serdes.UUID()` (cf. [KIP-206](https://cwiki.apache.org/confluence/x/26hjB)). + +We updated a list of methods that take `long` arguments as either timestamp (fix point) or duration (time period) and replaced them with `Instant` and `Duration` parameters for improved semantics. Some old methods base on `long` are deprecated and users are encouraged to update their code. +In particular, aggregation windows (hopping/tumbling/unlimited time windows and session windows) as well as join windows now take `Duration` arguments to specify window size, hop, and gap parameters. Also, window sizes and retention times are now specified as `Duration` type in `Stores` class. The `Window` class has new methods `#startTime()` and `#endTime()` that return window start/end timestamp as `Instant`. For interactive queries, there are new `#fetch(...)` overloads taking `Instant` arguments. Additionally, punctuations are now registered via `ProcessorContext#schedule(Duration interval, ...)`. For more details, see [KIP-358](https://cwiki.apache.org/confluence/x/IBNPBQ). + +We deprecated `KafkaStreams#close(...)` and replaced it with `KafkaStreams#close(Duration)` that accepts a single timeout argument Note: the new `#close(Duration)` method has improved (but slightly different) semantics. For more details, see [KIP-358](https://cwiki.apache.org/confluence/x/IBNPBQ). + +The newly exposed `AdminClient` metrics are now available when calling the `KafkaStream#metrics()` method. For more details on exposing `AdminClients` metrics see [KIP-324](https://cwiki.apache.org/confluence/x/lQg0BQ) + +We deprecated the notion of segments in window stores as those are intended to be an implementation details. Thus, method `Windows#segments()` and variable `Windows#segments` were deprecated. If you implement custom windows, you should update your code accordingly. Similarly, `WindowBytesStoreSupplier#segments()` was deprecated and replaced with `WindowBytesStoreSupplier#segmentInterval()`. If you implement custom window store, you need to update your code accordingly. Finally, `Stores#persistentWindowStore(...)` were deprecated and replaced with a new overload that does not allow to specify the number of segments any longer. For more details, see [KIP-319](https://cwiki.apache.org/confluence/x/mQU0BQ) (note: [KIP-328](https://cwiki.apache.org/confluence/x/sQU0BQ) and [KIP-358](https://cwiki.apache.org/confluence/x/IBNPBQ) 'overlap' with KIP-319). + +We've added an overloaded `StreamsBuilder#build` method that accepts an instance of `java.util.Properties` with the intent of using the `StreamsConfig#TOPOLOGY_OPTIMIZATION_CONFIG` config added in Kafka Streams 2.0. Before 2.1, when building a topology with the DSL, Kafka Streams writes the physical plan as the user makes calls on the DSL. Now by providing a `java.util.Properties` instance when executing a `StreamsBuilder#build` call, Kafka Streams can optimize the physical plan of the topology, provided the `StreamsConfig#TOPOLOGY_OPTIMIZATION_CONFIG` config is set to `StreamsConfig#OPTIMIZE`. By setting `StreamsConfig#OPTIMIZE` in addition to the `KTable` optimization of reusing the source topic as the changelog topic, the topology may be optimized to merge redundant repartition topics into one repartition topic. The original no parameter version of `StreamsBuilder#build` is still available for those who wish to not optimize their topology. Note that enabling optimization of the topology may require you to do an application reset when redeploying the application. For more details, see [KIP-312](https://cwiki.apache.org/confluence/x/CkcYBQ) + +We are introducing static membership towards Kafka Streams user. This feature reduces unnecessary rebalances during normal application upgrades or rolling bounces. For more details on how to use it, checkout [static membership design](/41/#static_membership). Note, Kafka Streams uses the same `ConsumerConfig#GROUP_INSTANCE_ID_CONFIG`, and you only need to make sure it is uniquely defined across different stream instances in one application. + +# Streams API changes in 2.0.0 + +In 2.0.0 we have added a few new APIs on the `ReadOnlyWindowStore` interface (for details please read Streams API changes below). If you have customized window store implementations that extends the `ReadOnlyWindowStore` interface you need to make code changes. + +In addition, if you using Java 8 method references in your Kafka Streams code you might need to update your code to resolve method ambiguities. Hot-swapping the jar-file only might not work for this case. See below a complete list of 2.0.0 API and semantic changes that allow you to advance your application and/or simplify your code base. + +We moved `Consumed` interface from `org.apache.kafka.streams` to `org.apache.kafka.streams.kstream` as it was mistakenly placed in the previous release. If your code has already used it there is a simple one-liner change needed in your import statement. + +We have also removed some public APIs that are deprecated prior to 1.0.x in 2.0.0. See below for a detailed list of removed APIs. + +We have removed the `skippedDueToDeserializationError-rate` and `skippedDueToDeserializationError-total` metrics. Deserialization errors, and all other causes of record skipping, are now accounted for in the pre-existing metrics `skipped-records-rate` and `skipped-records-total`. When a record is skipped, the event is now logged at WARN level. If these warnings become burdensome, we recommend explicitly filtering out unprocessable records instead of depending on record skipping semantics. For more details, see [KIP-274](https://cwiki.apache.org/confluence/x/gFOHB). As of right now, the potential causes of skipped records are: + + * `null` keys in table sources + * `null` keys in table-table inner/left/outer/right joins + * `null` keys or values in stream-table joins + * `null` keys or values in stream-stream joins + * `null` keys or values in aggregations on grouped streams + * `null` keys or values in reductions on grouped streams + * `null` keys in aggregations on windowed streams + * `null` keys in reductions on windowed streams + * `null` keys in aggregations on session-windowed streams + * Errors producing results, when the configured `default.production.exception.handler` decides to `CONTINUE` (the default is to `FAIL` and throw an exception). + * Errors deserializing records, when the configured `default.deserialization.exception.handler` decides to `CONTINUE` (the default is to `FAIL` and throw an exception). This was the case previously captured in the `skippedDueToDeserializationError` metrics. + * Fetched records having a negative timestamp. + + + +We've also fixed the metrics name for time and session windowed store operations in 2.0. As a result, our current built-in stores will have their store types in the metric names as `in-memory-state`, `in-memory-lru-state`, `rocksdb-state`, `rocksdb-window-state`, and `rocksdb-session-state`. For example, a RocksDB time windowed store's put operation metrics would now be `kafka.streams:type=stream-rocksdb-window-state-metrics,client-id=([-.\w]+),task-id=([-.\w]+),rocksdb-window-state-id=([-.\w]+)`. Users need to update their metrics collecting and reporting systems for their time and session windowed stores accordingly. For more details, please read the [State Store Metrics](/41/#kafka_streams_store_monitoring) section. + +We have added support for methods in `ReadOnlyWindowStore` which allows for querying a single window's key-value pair. For users who have customized window store implementations on the above interface, they'd need to update their code to implement the newly added method as well. For more details, see [KIP-261](https://cwiki.apache.org/confluence/x/UUSHB). + +We have added public `WindowedSerdes` to allow users to read from / write to a topic storing windowed table changelogs directly. In addition, in `StreamsConfig` we have also added `default.windowed.key.serde.inner` and `default.windowed.value.serde.inner` to let users specify inner serdes if the default serde classes are windowed serdes. For more details, see [KIP-265](https://cwiki.apache.org/confluence/x/_keHB). + +We've added message header support in the `Processor API` in Kafka 2.0.0. In particular, we have added a new API `ProcessorContext#headers()` which returns a `Headers` object that keeps track of the headers of the source topic's message that is being processed. Through this object, users can manipulate the headers map that is being propagated throughout the processor topology as well. For more details please feel free to read the [Developer Guide](/41/streams/developer-guide/processor-api.html#accessing-processor-context) section. + +We have deprecated constructors of `KafkaStreams` that take a `StreamsConfig` as parameter. Please use the other corresponding constructors that accept `java.util.Properties` instead. For more details, see [KIP-245](https://cwiki.apache.org/confluence/x/KLRzB). + +Kafka 2.0.0 allows to manipulate timestamps of output records using the Processor API ([KIP-251](https://cwiki.apache.org/confluence/x/Ih6HB)). To enable this new feature, `ProcessorContext#forward(...)` was modified. The two existing overloads `#forward(Object key, Object value, String childName)` and `#forward(Object key, Object value, int childIndex)` were deprecated and a new overload `#forward(Object key, Object value, To to)` was added. The new class `To` allows you to send records to all or specific downstream processors by name and to set the timestamp for the output record. Forwarding based on child index is not supported in the new API any longer. + +We have added support to allow routing records dynamically to Kafka topics. More specifically, in both the lower-level `Topology#addSink` and higher-level `KStream#to` APIs, we have added variants that take a `TopicNameExtractor` instance instead of a specific `String` typed topic name, such that for each received record from the upstream processor, the library will dynamically determine which Kafka topic to write to based on the record's key and value, as well as record context. Note that all the Kafka topics that may possibly be used are still considered as user topics and hence required to be pre-created. In addition to that, we have modified the `StreamPartitioner` interface to add the topic name parameter since the topic name now may not be known beforehand; users who have customized implementations of this interface would need to update their code while upgrading their application to use Kafka Streams 2.0.0. + +[KIP-284](https://cwiki.apache.org/confluence/x/DVyHB) changed the retention time for repartition topics by setting its default value to `Long.MAX_VALUE`. Instead of relying on data retention Kafka Streams uses the new purge data API to delete consumed data from those topics and to keep used storage small now. + +We have modified the `ProcessorStateManger#register(...)` signature and removed the deprecated `loggingEnabled` boolean parameter as it is specified in the `StoreBuilder`. Users who used this function to register their state stores into the processor topology need to simply update their code and remove this parameter from the caller. + +Kafka Streams DSL for Scala is a new Kafka Streams client library available for developers authoring Kafka Streams applications in Scala. It wraps core Kafka Streams DSL types to make it easier to call when interoperating with Scala code. For example, it includes higher order functions as parameters for transformations avoiding the need anonymous classes in Java 7 or experimental SAM type conversions in Scala 2.11, automatic conversion between Java and Scala collection types, a way to implicitly provide Serdes to reduce boilerplate from your application and make it more typesafe, and more! For more information see the [Kafka Streams DSL for Scala documentation](/41/streams/developer-guide/dsl-api.html#scala-dsl) and [KIP-270](https://cwiki.apache.org/confluence/x/c06HB). + +We have removed these deprecated APIs: + + * `KafkaStreams#toString` no longer returns the topology and runtime metadata; to get topology metadata users can call `Topology#describe()` and to get thread runtime metadata users can call `KafkaStreams#localThreadsMetadata` (they are deprecated since 1.0.0). For detailed guidance on how to update your code please read here + * `TopologyBuilder` and `KStreamBuilder` are removed and replaced by `Topology` and `StreamsBuidler` respectively (they are deprecated since 1.0.0). For detailed guidance on how to update your code please read here + * `StateStoreSupplier` are removed and replaced with `StoreBuilder` (they are deprecated since 1.0.0); and the corresponding `Stores#create` and `KStream, KTable, KGroupedStream` overloaded functions that use it have also been removed. For detailed guidance on how to update your code please read here + * `KStream, KTable, KGroupedStream` overloaded functions that requires serde and other specifications explicitly are removed and replaced with simpler overloaded functions that use `Consumed, Produced, Serialized, Materialized, Joined` (they are deprecated since 1.0.0). For detailed guidance on how to update your code please read here + * `Processor#punctuate`, `ValueTransformer#punctuate`, `ValueTransformer#punctuate` and `ProcessorContext#schedule(long)` are removed and replaced by `ProcessorContext#schedule(long, PunctuationType, Punctuator)` (they are deprecated in 1.0.0). + * The second `boolean` typed parameter "loggingEnabled" in `ProcessorContext#register` has been removed; users can now use `StoreBuilder#withLoggingEnabled, withLoggingDisabled` to specify the behavior when they create the state store. + * `KTable#writeAs, print, foreach, to, through` are removed, users can call `KTable#tostream()#writeAs` instead for the same purpose (they are deprecated since 0.11.0.0). For detailed list of removed APIs please read here + * `StreamsConfig#KEY_SERDE_CLASS_CONFIG, VALUE_SERDE_CLASS_CONFIG, TIMESTAMP_EXTRACTOR_CLASS_CONFIG` are removed and replaced with `StreamsConfig#DEFAULT_KEY_SERDE_CLASS_CONFIG, DEFAULT_VALUE_SERDE_CLASS_CONFIG, DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG` respectively (they are deprecated since 0.11.0.0). + * `StreamsConfig#ZOOKEEPER_CONNECT_CONFIG` are removed as we do not need ZooKeeper dependency in Streams any more (it is deprecated since 0.10.2.0). + + + +# Streams API changes in 1.1.0 + +We have added support for methods in `ReadOnlyWindowStore` which allows for querying `WindowStore`s without the necessity of providing keys. For users who have customized window store implementations on the above interface, they'd need to update their code to implement the newly added method as well. For more details, see [KIP-205](https://cwiki.apache.org/confluence/x/6qdjB). + +There is a new artifact `kafka-streams-test-utils` providing a `TopologyTestDriver`, `ConsumerRecordFactory`, and `OutputVerifier` class. You can include the new artifact as a regular dependency to your unit tests and use the test driver to test your business logic of your Kafka Streams application. For more details, see [KIP-247](https://cwiki.apache.org/confluence/x/EQOHB). + +The introduction of [KIP-220](https://cwiki.apache.org/confluence/x/QJ5zB) enables you to provide configuration parameters for the embedded admin client created by Kafka Streams, similar to the embedded producer and consumer clients. You can provide the configs via `StreamsConfig` by adding the configs with the prefix `admin.` as defined by `StreamsConfig#adminClientPrefix(String)` to distinguish them from configurations of other clients that share the same config names. + +New method in `KTable` + + * `transformValues` methods have been added to `KTable`. Similar to those on `KStream`, these methods allow for richer, stateful, value transformation similar to the Processor API. + + + +New method in `GlobalKTable` + + * A method has been provided such that it will return the store name associated with the `GlobalKTable` or `null` if the store name is non-queryable. + + + +New methods in `KafkaStreams`: + + * added overload for the constructor that allows overriding the `Time` object used for tracking system wall-clock time; this is useful for unit testing your application code. + + + +New methods in `KafkaClientSupplier`: + + * added `getAdminClient(config)` that allows to override an `AdminClient` used for administrative requests such as internal topic creations, etc. + + + +New error handling for exceptions during production: + + * added interface `ProductionExceptionHandler` that allows implementors to decide whether or not Streams should `FAIL` or `CONTINUE` when certain exception occur while trying to produce. + * provided an implementation, `DefaultProductionExceptionHandler` that always fails, preserving the existing behavior by default. + * changing which implementation is used can be done by settings `default.production.exception.handler` to the fully qualified name of a class implementing this interface. + + + +Changes in `StreamsResetter`: + + * added options to specify input topics offsets to reset according to [KIP-171](https://cwiki.apache.org/confluence/x/ApI7B) + + + +# Streams API changes in 1.0.0 + +With 1.0 a major API refactoring was accomplished and the new API is cleaner and easier to use. This change includes the five main classes `KafkaStreams`, `KStreamBuilder`, `KStream`, `KTable`, and `TopologyBuilder` (and some more others). All changes are fully backward compatible as old API is only deprecated but not removed. We recommend to move to the new API as soon as you can. We will summarize all API changes in the next paragraphs. + +The two main classes to specify a topology via the DSL (`KStreamBuilder`) or the Processor API (`TopologyBuilder`) were deprecated and replaced by `StreamsBuilder` and `Topology` (both new classes are located in package `org.apache.kafka.streams`). Note, that `StreamsBuilder` does not extend `Topology`, i.e., the class hierarchy is different now. The new classes have basically the same methods as the old ones to build a topology via DSL or Processor API. However, some internal methods that were public in `KStreamBuilder` and `TopologyBuilder` but not part of the actual API are not present in the new classes any longer. Furthermore, some overloads were simplified compared to the original classes. See [KIP-120](https://cwiki.apache.org/confluence/x/uR8IB) and [KIP-182](https://cwiki.apache.org/confluence/x/TYZjB) for full details. + +Changing how a topology is specified also affects `KafkaStreams` constructors, that now only accept a `Topology`. Using the DSL builder class `StreamsBuilder` one can get the constructed `Topology` via `StreamsBuilder#build()`. Additionally, a new class `org.apache.kafka.streams.TopologyDescription` (and some more dependent classes) were added. Those can be used to get a detailed description of the specified topology and can be obtained by calling `Topology#describe()`. An example using this new API is shown in the [quickstart section](/41/streams/quickstart). + +New methods in `KStream`: + + * With the introduction of [KIP-202](https://cwiki.apache.org/confluence/x/66JjB) a new method `merge()` has been created in `KStream` as the StreamsBuilder class's `StreamsBuilder#merge()` has been removed. The method signature was also changed, too: instead of providing multiple `KStream`s into the method at the once, only a single `KStream` is accepted. + + + +New methods in `KafkaStreams`: + + * retrieve the current runtime information about the local threads via `localThreadsMetadata()` + * observe the restoration of all state stores via `setGlobalStateRestoreListener()`, in which users can provide their customized implementation of the `org.apache.kafka.streams.processor.StateRestoreListener` interface + + + +Deprecated / modified methods in `KafkaStreams`: + + * `toString()`, `toString(final String indent)` were previously used to return static and runtime information. They have been deprecated in favor of using the new classes/methods `localThreadsMetadata()` / `ThreadMetadata` (returning runtime information) and `TopologyDescription` / `Topology#describe()` (returning static information). + * With the introduction of [KIP-182](https://cwiki.apache.org/confluence/x/TYZjB) you should no longer pass in `Serde` to `KStream#print` operations. If you can't rely on using `toString` to print your keys an values, you should instead you provide a custom `KeyValueMapper` via the `Printed#withKeyValueMapper` call. + * `setStateListener()` now can only be set before the application start running, i.e. before `KafkaStreams.start()` is called. + + + +Deprecated methods in `KGroupedStream` + + * Windowed aggregations have been deprecated from `KGroupedStream` and moved to `WindowedKStream`. You can now perform a windowed aggregation by, for example, using `KGroupedStream#windowedBy(Windows)#reduce(Reducer)`. + + + +Modified methods in `Processor`: + + * The Processor API was extended to allow users to schedule `punctuate` functions either based on data-driven **stream time** or wall-clock time. As a result, the original `ProcessorContext#schedule` is deprecated with a new overloaded function that accepts a user customizable `Punctuator` callback interface, which triggers its `punctuate` API method periodically based on the `PunctuationType`. The `PunctuationType` determines what notion of time is used for the punctuation scheduling: either [stream time](/41/streams/core-concepts#streams_time) or wall-clock time (by default, **stream time** is configured to represent event time via `TimestampExtractor`). In addition, the `punctuate` function inside `Processor` is also deprecated. + +Before this, users could only schedule based on stream time (i.e. `PunctuationType.STREAM_TIME`) and hence the `punctuate` function was data-driven only because stream time is determined (and advanced forward) by the timestamps derived from the input data. If there is no data arriving at the processor, the stream time would not advance and hence punctuation will not be triggered. On the other hand, When wall-clock time (i.e. `PunctuationType.WALL_CLOCK_TIME`) is used, `punctuate` will be triggered purely based on wall-clock time. So for example if the `Punctuator` function is scheduled based on `PunctuationType.WALL_CLOCK_TIME`, if these 60 records were processed within 20 seconds, `punctuate` would be called 2 times (one time every 10 seconds); if these 60 records were processed within 5 seconds, then no `punctuate` would be called at all. Users can schedule multiple `Punctuator` callbacks with different `PunctuationType`s within the same processor by simply calling `ProcessorContext#schedule` multiple times inside processor's `init()` method. + + + + +If you are monitoring on task level or processor-node / state store level Streams metrics, please note that the metrics sensor name and hierarchy was changed: The task ids, store names and processor names are no longer in the sensor metrics names, but instead are added as tags of the sensors to achieve consistent metrics hierarchy. As a result you may need to make corresponding code changes on your metrics reporting and monitoring tools when upgrading to 1.0.0. Detailed metrics sensor can be found in the [Streams Monitoring](/41/#kafka_streams_monitoring) section. + +The introduction of [KIP-161](https://cwiki.apache.org/confluence/x/WQgwB) enables you to provide a default exception handler for deserialization errors when reading data from Kafka rather than throwing the exception all the way out of your streams application. You can provide the configs via the `StreamsConfig` as `StreamsConfig#DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG`. The specified handler must implement the `org.apache.kafka.streams.errors.DeserializationExceptionHandler` interface. + +The introduction of [KIP-173](https://cwiki.apache.org/confluence/x/aZM7B) enables you to provide topic configuration parameters for any topics created by Kafka Streams. This includes repartition and changelog topics. You can provide the configs via the `StreamsConfig` by adding the configs with the prefix as defined by `StreamsConfig#topicPrefix(String)`. Any properties in the `StreamsConfig` with the prefix will be applied when creating internal topics. Any configs that aren't topic configs will be ignored. If you already use `StateStoreSupplier` or `Materialized` to provide configs for changelogs, then they will take precedence over those supplied in the config. + +# Streams API changes in 0.11.0.0 + +Updates in `StreamsConfig`: + + * new configuration parameter `processing.guarantee` is added + * configuration parameter `key.serde` was deprecated and replaced by `default.key.serde` + * configuration parameter `value.serde` was deprecated and replaced by `default.value.serde` + * configuration parameter `timestamp.extractor` was deprecated and replaced by `default.timestamp.extractor` + * method `keySerde()` was deprecated and replaced by `defaultKeySerde()` + * method `valueSerde()` was deprecated and replaced by `defaultValueSerde()` + * new method `defaultTimestampExtractor()` was added + + + +New methods in `TopologyBuilder`: + + * added overloads for `addSource()` that allow to define a `TimestampExtractor` per source node + * added overloads for `addGlobalStore()` that allow to define a `TimestampExtractor` per source node associated with the global store + + + +New methods in `KStreamBuilder`: + + * added overloads for `stream()` that allow to define a `TimestampExtractor` per input stream + * added overloads for `table()` that allow to define a `TimestampExtractor` per input table + * added overloads for `globalKTable()` that allow to define a `TimestampExtractor` per global table + + + +Deprecated methods in `KTable`: + + * `void foreach(final ForeachAction action)` + * `void print()` + * `void print(final String streamName)` + * `void print(final Serde keySerde, final Serde valSerde)` + * `void print(final Serde keySerde, final Serde valSerde, final String streamName)` + * `void writeAsText(final String filePath)` + * `void writeAsText(final String filePath, final String streamName)` + * `void writeAsText(final String filePath, final Serde keySerde, final Serde valSerde)` + * `void writeAsText(final String filePath, final String streamName, final Serde keySerde, final Serde valSerde)` + + + +The above methods have been deprecated in favor of using the Interactive Queries API. If you want to query the current content of the state store backing the KTable, use the following approach: + + * Make a call to `KafkaStreams.store(final String storeName, final QueryableStoreType queryableStoreType)` + * Then make a call to `ReadOnlyKeyValueStore.all()` to iterate over the keys of a `KTable`. + + + +If you want to view the changelog stream of the `KTable` then you could call `KTable.toStream().print(Printed.toSysOut)`. + +Metrics using exactly-once semantics: + +If `"exactly_once"` processing (EOS version 1) is enabled via the `processing.guarantee` parameter, internally Streams switches from a producer-per-thread to a producer-per-task runtime model. Using `"exactly_once_beta"` (EOS version 2) does use a producer-per-thread, so `client.id` doesn't change, compared with `"at_least_once"` for this case). In order to distinguish the different producers, the producer's `client.id` additionally encodes the task-ID for this case. Because the producer's `client.id` is used to report JMX metrics, it might be required to update tools that receive those metrics. + +Producer's `client.id` naming schema: + + * at-least-once (default): `[client.Id]-StreamThread-[sequence-number]` + * exactly-once: `[client.Id]-StreamThread-[sequence-number]-[taskId]` + * exactly-once-beta: `[client.Id]-StreamThread-[sequence-number]` + + + +`[client.Id]` is either set via Streams configuration parameter `client.id` or defaults to `[application.id]-[processId]` (`[processId]` is a random UUID). + +# Notable changes in 0.10.2.1 + +Parameter updates in `StreamsConfig`: + + * The default config values of embedded producer's `retries` and consumer's `max.poll.interval.ms` have been changed to improve the resiliency of a Kafka Streams application + + + +# Streams API changes in 0.10.2.0 + +New methods in `KafkaStreams`: + + * set a listener to react on application state change via `setStateListener(StateListener listener)` + * retrieve the current application state via `state()` + * retrieve the global metrics registry via `metrics()` + * apply a timeout when closing an application via `close(long timeout, TimeUnit timeUnit)` + * specify a custom indent when retrieving Kafka Streams information via `toString(String indent)` + + + +Parameter updates in `StreamsConfig`: + + * parameter `zookeeper.connect` was deprecated; a Kafka Streams application does no longer interact with ZooKeeper for topic management but uses the new broker admin protocol (cf. [KIP-4, Section "Topic Admin Schema"](https://cwiki.apache.org/confluence/x/vBEIAw)) + * added many new parameters for metrics, security, and client configurations + + + +Changes in `StreamsMetrics` interface: + + * removed methods: `addLatencySensor()` + * added methods: `addLatencyAndThroughputSensor()`, `addThroughputSensor()`, `recordThroughput()`, `addSensor()`, `removeSensor()` + + + +New methods in `TopologyBuilder`: + + * added overloads for `addSource()` that allow to define a `auto.offset.reset` policy per source node + * added methods `addGlobalStore()` to add global `StateStore`s + + + +New methods in `KStreamBuilder`: + + * added overloads for `stream()` and `table()` that allow to define a `auto.offset.reset` policy per input stream/table + * added method `globalKTable()` to create a `GlobalKTable` + + + +New joins for `KStream`: + + * added overloads for `join()` to join with `KTable` + * added overloads for `join()` and `leftJoin()` to join with `GlobalKTable` + * note, join semantics in 0.10.2 were improved and thus you might see different result compared to 0.10.0.x and 0.10.1.x (cf. [Kafka Streams Join Semantics](https://cwiki.apache.org/confluence/x/EzPtAw) in the Apache Kafka wiki) + + +Aligned `null`-key handling for `KTable` joins: + + * like all other KTable operations, `KTable-KTable` joins do not throw an exception on `null` key records anymore, but drop those records silently + + + +New window type _Session Windows_ : + + * added class `SessionWindows` to specify session windows + * added overloads for `KGroupedStream` methods `count()`, `reduce()`, and `aggregate()` to allow session window aggregations + + + +Changes to `TimestampExtractor`: + + * method `extract()` has a second parameter now + * new default timestamp extractor class `FailOnInvalidTimestamp` (it gives the same behavior as old (and removed) default extractor `ConsumerRecordTimestampExtractor`) + * new alternative timestamp extractor classes `LogAndSkipOnInvalidTimestamp` and `UsePreviousTimeOnInvalidTimestamps` + + + +Relaxed type constraints of many DSL interfaces, classes, and methods (cf. [KIP-100](https://cwiki.apache.org/confluence/x/dQMIB)). + +# Streams API changes in 0.10.1.0 + +Stream grouping and aggregation split into two methods: + + * old: KStream #aggregateByKey(), #reduceByKey(), and #countByKey() + * new: KStream#groupByKey() plus KGroupedStream #aggregate(), #reduce(), and #count() + * Example: stream.countByKey() changes to stream.groupByKey().count() + + + +Auto Repartitioning: + + * a call to through() after a key-changing operator and before an aggregation/join is no longer required + * Example: stream.selectKey(...).through(...).countByKey() changes to stream.selectKey().groupByKey().count() + + + +TopologyBuilder: + + * methods #sourceTopics(String applicationId) and #topicGroups(String applicationId) got simplified to #sourceTopics() and #topicGroups() + + + +DSL: new parameter to specify state store names: + + * The new Interactive Queries feature requires to specify a store name for all source KTables and window aggregation result KTables (previous parameter "operator/window name" is now the storeName) + * KStreamBuilder#table(String topic) changes to #topic(String topic, String storeName) + * KTable#through(String topic) changes to #through(String topic, String storeName) + * KGroupedStream #aggregate(), #reduce(), and #count() require additional parameter "String storeName" + * Example: stream.countByKey(TimeWindows.of("windowName", 1000)) changes to stream.groupByKey().count(TimeWindows.of(1000), "countStoreName") + + + +Windowing: + + * Windows are not named anymore: TimeWindows.of("name", 1000) changes to TimeWindows.of(1000) (cf. DSL: new parameter to specify state store names) + * JoinWindows has no default size anymore: JoinWindows.of("name").within(1000) changes to JoinWindows.of(1000) + + + +# Streams API broker compatibility + +The following table shows which versions of the Kafka Streams API are compatible with various Kafka broker versions. For Kafka Stream version older than 2.4.x, please check [3.9 upgrade document](/39/streams/upgrade-guide). + +| Kafka Broker (columns) +---|--- +Kafka Streams API (rows) | 2.1.x and +2.2.x and +2.3.x and +2.4.x and +2.5.x and +2.6.x and +2.7.x and +2.8.x and +3.0.x and +3.1.x and +3.2.x and +3.3.x and +3.4.x and +3.5.x and +3.6.x and +3.7.x and +3.8.x and +3.9.x | 4.0.x +2.4.x and +2.5.x | compatible | compatible +2.6.x and +2.7.x and +2.8.x and +3.0.x and +3.1.x and +3.2.x and +3.3.x and +3.4.x and +3.5.x and +3.6.x and +3.7.x and +3.8.x and +3.9.x and +4.0.x | compatible; enabling exactly-once v2 requires broker version 2.5.x or higher | compatible + +[Previous](/41/streams/developer-guide/app-reset-tool) Next + + * [Documentation](/documentation) + * [Kafka Streams](/streams) + + diff --git a/content/en/blog/releases/ak-4.1.0.md b/content/en/blog/releases/ak-4.1.0.md new file mode 100644 index 000000000..97773d671 --- /dev/null +++ b/content/en/blog/releases/ak-4.1.0.md @@ -0,0 +1,68 @@ +--- +date: 2025-09-04 +title: Apache Kafka 4.1.0 Release Announcement +linkTitle: AK 4.1.0 +author: Mickael Maison (@MickaelMaison) +--- + + + +We are proud to announce the release of Apache Kafka® 4.1.0. This release contains many new features and improvements. This blog post will highlight some of the more prominent ones. For a full list of changes, be sure to check the [release notes](https://downloads.apache.org/kafka/4.1.0/RELEASE_NOTES.html). + +Queues for Kafka ([KIP-932](https://cwiki.apache.org/confluence/x/4hA0Dw)) is now in preview. It's still not ready for production but you can start evaluating and testing it. See the [preview release notes](https://cwiki.apache.org/confluence/x/CIq3FQ) for more details. + +This release also introduces a new Streams Rebalance Protocol ([KIP-1071](https://cwiki.apache.org/confluence/x/2BCTEg)) in early access. It is based on the new consumer group protocol ([KIP-848](https://cwiki.apache.org/confluence/x/HhD1D)). + +See the [Upgrading to 4.1](https://kafka.apache.org/documentation.html#upgrade_4_1_0) section in the documentation for the list of notable changes and detailed upgrade steps. + +## Kafka Broker, Controller, Producer, Consumer and Admin Client + + * [KIP-877: Mechanism for plugins and connectors to register metrics](https://cwiki.apache.org/confluence/x/lY3GDQ) +Many client-side plugins can now implement the `Monitorable` interface to easily register their own metrics. Tags identifying the plugin are automatically injected and the metrics use the `kafka.CLIENT:type=plugins` naming where CLIENT is either producer, consumer or admin. + * [KIP-1050: Consistent error handling for Transactions](https://cwiki.apache.org/confluence/x/8ItyEg) +This KIP updates the error handling logic and documentation of all the transaction APIs to make it simpler to build robust applications and build third-party Kafka clients that behave the same way as the Java client. + * [KIP-1092: Extend Consumer#close with an option to leave the group or not](https://cwiki.apache.org/confluence/x/JQstEw) +This adds a new `Consumer.close(CloseOptions)` method which indicates whether the consumer should explicitly leave its group when it's shutting down. This enables Streams to control when to trigger group rebalances. The `Consumer.close(Duration)` method is now deprecated. + * [KIP-1101: Trigger rebalance on rack topology changes](https://cwiki.apache.org/confluence/x/FouMEw) +This KIP updates the rack-aware partition assignment from the consumer rebalance protocol and makes it a lot more memory efficient, allowing to have hundreds of members in a consumer group. + * [KIP-1109: Unifying Kafka Consumer Topic Metrics](https://cwiki.apache.org/confluence/x/-42MEw) +The consumer used to replace dots in topic names by underscore it its metric names. In this release, topic metrics are also emitted with the topic names unchanged. Users should transition to these new metrics. In 5.0, the metrics with the changed topic names will be removed. + * [KIP-1118: Add Deadlock Protection on Producer Network Thread](https://cwiki.apache.org/confluence/x/LorREw) +From 4.1, if `KafkaProducer.flush()` is called from the `KafkaProducer.send()` callback, then an exception is raised. Previously this could lead to a deadlock in the producer. + * [KIP-1139: Add support for OAuth jwt-bearer grant type](https://cwiki.apache.org/confluence/x/uIxEF) +In addition to the client_credentials grant type, Kafka now supports the jwt-bearer grant type for OAuth. This grant type avoids putting secrets in clear in the configuration and is also supported by many OAuth providers. + * [KIP-1143: Deprecated Optional and return String from public Endpoint#listenerName](https://cwiki.apache.org/confluence/x/LwqWF) +This is a cleanup in the `Endpoint` class. The existing `listenerName()` method which returns `Optional` is now deprecated and users should transition to the new `listenerName()` method which returns `String`. + * [KIP-1152: Add transactional ID pattern filter to ListTransactions API](https://cwiki.apache.org/confluence/x/4gm9F) +When listing transactions you can now provide a pattern to filter based on the transactional ID. In environments with many transactional IDs, this avoids having to list all transactions and filter them on the client-side. + + + +## Kafka Streams + + * [KIP-1020: Move window.size.ms and windowed.inner.class.serde from StreamsConfig to TimeWindowedDe/Serializer and SessionWindowedDe/Serializer class](https://cwiki.apache.org/confluence/x/lAtYEQ) +The `window.size.ms` and `windowed.inner.class.serde` configurations are now defined in TimeWindowed and SessionWindowed SerDes. + * [KIP-1071: Streams Rebalance Protocol](https://cwiki.apache.org/confluence/x/2BCTEg) +This builds on KIP-848 and makes Streams task assignment a first-class citizen in the Kafka protocol. A lot of logic also moves to the coordinator such a task assignments, internal topic creations. This is currently in early access and not ready for production use. See the [upgrade guide](/documentation/streams/upgrade-guide#streams_api_changes_410) for more details. + * [KIP-1111: Enforcing Explicit Naming for Kafka Streams Internal Topics](https://cwiki.apache.org/confluence/x/4Y_MEw) +Streams stores its state in internal topics whose names are generated. A new configuration, `ensure.explicit.internal.resource.naming` allows to enforce explicit naming of all internal resources to make topic names predictable and allow altering a topology and still conserve the existing topics. + + + +## Kafka Connect + + * [KIP-877: Mechanism for plugins and connectors to register metrics](https://cwiki.apache.org/confluence/x/lY3GDQ) +All worker and connector plugins can now register their own metrics. For connectors and tasks this is done via their context. Other plugins can implement the `Monitorable` interface to do so. + * [KIP-891: Running multiple versions of Connector plugins](https://cwiki.apache.org/confluence/x/qY0ODg) +Connect now supports installing and running multiple versions of the same connector plugins (Connectors, Converters, Transformations and Predicates). This make it easier to upgrade, and downgrade in case of issues, plugins without needing to use separate Connect clusters. + + + +## Summary + +Ready to get started with Apache Kafka 4.1.0? Check out all the details in the [upgrade notes](https://kafka.apache.org/documentation.html#upgrade_4_1_0) and the [release notes](https://downloads.apache.org/kafka/4.1.0/RELEASE_NOTES.html), and [download](https://kafka.apache.org/downloads) Apache Kafka 4.1.0. + +This was a community effort, so thank you to everyone who contributed to this release, including all our users and our 167 contributors: +陳昱霖(Yu-Lin Chen), A. Sophie Blee-Goldman, Abhinav Dixit, Albert, Alieh Saeedi, Almog Gavra, Alyssa Huang, Andrew Schofield, Andy Li, Ao Li, Apoorv Mittal, Artem Livshits, Ayoub Omari, Azhar Ahmed, Bill Bejeck, Bolin Lin, Bruno Cadonna, Calvin Liu, Cheryl Simmons, Chia-Ping Tsai, ChickenchickenLove, Chih-Yuan Chien, Chirag Wadhwa, Chris Flood, Christo Lolov, ClarkChen, Clay Johnson, co63oc, Colin P. McCabe, Colt McNealy, Damien Gasparina, Dániel Urbán, Dave Troiano, David Arthur, David Jacot, David Mao, Dejan Stojadinović, dengziming, Dimitar Dimitrov, Divij Vaidya, DL1231, Dmitry Werner, Dongnuo Lyu, Edoardo Comar, fangxiaobing, Federico Valeri, Florian Hussonnois, Fred Zheng, Gantigmaa Selenge, Gaurav Narula, Gerard Klijs-Nefkens, Goooler, grace, Greg Harris, Guang, Guozhang Wang, Gyeongwon, Do, Hailey Ni, hgh1472, Hong-Yi Chen, Iamoshione, Ismael Juma, Istvan Toth, Janindu Pathirana, Jared Harley, Jason Taylor, Jeff Kim, Jhen-Yung Hsu, Ji-Seung Ryu, jimmy, Jimmy Wang, Jing-Jia Hung, Joao Pedro Fonseca Dantas, John Huang, John Roesler, Jonah Hooper, Jorge Esteban Quilcate Otoya, Josep Prat, José Armando García Sancio, Jun Rao, Justine Olshan, Kamal Chandraprakash, Karsten Spang, Kaushik Raina, Ken Huang, Kevin Wu, Kirk True, Kondrat Bertalan, Kuan-Po Tseng, Lan Ding, leaf-soba, Liam Miller-Cushon, Lianet Magrans, Logan Zhu, Loïc GREFFIER, Lorcan, Lucas Brutschy, lucliu1108, Luke Chen, Mahsa Seifikar, Manikumar Reddy, Manoj, Martin Sillence, Matthias J. Sax, Mehari Beyene, Mickael Maison, Milly, Ming-Yen Chung, mingdaoy, Nick Guo, Nick Telford, NICOLAS GUYOMAR, nilmadhab mondal, Okada Haruki, Omnia Ibrahim, Parker Chang, Peter Lee, Piotr P. Karwasz, PoAn Yang, Pramithas Dhakal, qingbozhang, Rajini Sivaram, Rich Chen, Ritika Reddy, Rohan, S.Y. Wang, Sanskar Jhajharia, santhoshct, Satish Duggana, Sean Quah, Sebastien Viale, Shaan, Shahbaz Aamir, ShihYuan Lin, Shivsundar R, snehashisp, Stanislav Kozlovski, Steven Schlansker, Sushant Mahajan, Swikar Patel, TaiJuWu, Ted Yan, TengYao Chi, Thomas Gebert, Thomas Thornton, Tsung-Han Ho (Miles Ho), u0184996, Uladzislau Blok, Vadym Zhytkevych, Vedarth Sharma, Vikas Singh, Viktor Somogyi-Vass, Vincent PÉRICART, Xiaobing Fang, xijiu, Xuan-Zhang Gong, yangjf2019, Yaroslav Kutsela, Yu-Syuan Jheng, YuChia Ma, Yunchi Pang, Yung, YunKui Lu, yx9o, Zachary Hamilton, Zhihong Yu + + diff --git a/content/en/community/downloads.md b/content/en/community/downloads.md index e619612a5..51c7c7bb6 100644 --- a/content/en/community/downloads.md +++ b/content/en/community/downloads.md @@ -10,7 +10,21 @@ The project goal is to have 3 releases a year, which means a release every 4 mon ## Supported releases +### 4.1.0 + + * Released September 2, 2025 + * [Release Notes](https://dlcdn.apache.org/kafka/4.1.0/RELEASE_NOTES.html) + * Docker image: [apache/kafka:4.1.0](https://hub.docker.com/layers/apache/kafka/4.1.0/images/sha256-bce21a1a56707320ce239d743143bf4359ba45da27d31a5236f2169001eec751). + * Docker Native image: [apache/kafka-native:4.1.0](https://hub.docker.com/layers/apache/kafka-native/4.1.0/images/sha256-65d2078ae11c42356d3da7a088b96faab3faca3e738856df1a1829ae3829d9e2). + * Source download: [kafka-4.1.0-src.tgz](https://dlcdn.apache.org/kafka/4.1.0/kafka-4.1.0-src.tgz) ([asc](https://downloads.apache.org/kafka/4.1.0/kafka-4.1.0-src.tgz.asc), [sha512](https://downloads.apache.org/kafka/4.1.0/kafka-4.1.0-src.tgz.sha512)) + * Binary download: [kafka_2.13-4.1.0.tgz](https://dlcdn.apache.org/kafka/4.1.0/kafka_2.13-4.1.0.tgz) ([asc](https://downloads.apache.org/kafka/4.1.0/kafka_2.13-4.1.0.tgz.asc), [sha512](https://downloads.apache.org/kafka/4.1.0/kafka_2.13-4.1.0.tgz.sha512)) + + + +Kafka 4.1.0 includes a significant number of new features and fixes. For more information, please read our [blog post](https://kafka.apache.org/blog#apache_kafka_410_release_announcement), the detailed [Upgrade Notes](https://kafka.apache.org/documentation#upgrade_4_1_0) and the [Release Notes](https://dlcdn.apache.org/kafka/4.1.0/RELEASE_NOTES.html). + ### 3.9.1 + * Released May 21, 2025 * [Release Notes](https://dlcdn.apache.org/kafka/3.9.1/RELEASE_NOTES.html) * Docker image: [apache/kafka:3.9.1](https://hub.docker.com/layers/apache/kafka/3.9.1/images/sha256-5862db4a63a6dd7d46fd14771b10a1b39e069c2c47f17d8e4640f960720a0ead). diff --git a/hugo.yaml b/hugo.yaml index 5289d5163..39da0a1e5 100644 --- a/hugo.yaml +++ b/hugo.yaml @@ -89,7 +89,7 @@ menu: main: # Point this to the latest documentation version. - name: "Documentation" - url: "/40/" + url: "/41/" weight: 20 # Add a top level menu pointing to downloads page - name: "Download Kafka" @@ -127,10 +127,13 @@ params: # The version number for the version of the docs represented in this doc set. # Used in the "version-banner" partial to display a version number for the # current doc set. - version: 4.0 + version: 4.1 versions: + - version: "4.1" + url: /41/ - version: "4.0" url: /40/ + archived_version: true - version: "3.9" url: /39/ archived_version: true @@ -224,10 +227,10 @@ params: # A link to latest version of the docs. Used in the "version-banner" partial to # point people to the main doc site. - url_latest_version: /40/ + url_latest_version: /41/ # Repository configuration (URLs for in-page links to opening issues and suggesting changes) - github_repo: https://github.com/hvishwanath/kafka-site-md + github_repo: https://github.com/apache/kafka-site/ # An optional link to a related project repo. For example, the sibling repository where your product code lives. # github_project_repo: https://github.com/google/docsy @@ -237,7 +240,7 @@ params: # Uncomment this if your GitHub repo does not have "main" as the default branch, # or specify a new value if you want to reference another branch in your GitHub links - github_branch: main + github_branch: markdown # Google Custom Search Engine ID. Remove or comment out to disable search. # gcs_engine_id: d72aa9b2712488cc3 diff --git a/static/41/generated/admin_client_config.html b/static/41/generated/admin_client_config.html new file mode 100644 index 000000000..d09ea7b11 --- /dev/null +++ b/static/41/generated/admin_client_config.html @@ -0,0 +1,903 @@ +
    +
  • +

    bootstrap.controllers

    +

    A list of host/port pairs to use for establishing the initial connection to the KRaft controller quorum. This list should be in the form host1:port1,host2:port2,....

    + + + + + +
    Type:list
    Default:""
    Valid Values:
    Importance:high
    +
  • +
  • +

    bootstrap.servers

    +

    A list of host/port pairs used to establish the initial connection to the Kafka cluster. Clients use this list to bootstrap and discover the full set of Kafka brokers. While the order of servers in the list does not matter, we recommend including more than one server to ensure resilience if any servers are down. This list does not need to contain the entire set of brokers, as Kafka clients automatically manage and update connections to the cluster efficiently. This list must be in the form host1:port1,host2:port2,....

    + + + + + +
    Type:list
    Default:""
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.key.password

    +

    The password of the private key in the key store file or the PEM key specified in 'ssl.keystore.key'.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.certificate.chain

    +

    Certificate chain in the format specified by 'ssl.keystore.type'. Default SSL engine factory supports only PEM format with a list of X.509 certificates

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.key

    +

    Private key in the format specified by 'ssl.keystore.type'. Default SSL engine factory supports only PEM format with PKCS#8 keys. If the key is encrypted, key password must be specified using 'ssl.key.password'

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.location

    +

    The location of the key store file. This is optional for client and can be used for two-way authentication for client.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.password

    +

    The store password for the key store file. This is optional for client and only needed if 'ssl.keystore.location' is configured. Key store password is not supported for PEM format.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.truststore.certificates

    +

    Trusted certificates in the format specified by 'ssl.truststore.type'. Default SSL engine factory supports only PEM format with X.509 certificates.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.truststore.location

    +

    The location of the trust store file.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.truststore.password

    +

    The password for the trust store file. If a password is not set, trust store file configured will still be used, but integrity checking is disabled. Trust store password is not supported for PEM format.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    client.dns.lookup

    +

    Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again (both the JVM and the OS cache DNS name lookups, however). If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips.

    + + + + + +
    Type:string
    Default:use_all_dns_ips
    Valid Values:[use_all_dns_ips, resolve_canonical_bootstrap_servers_only]
    Importance:medium
    +
  • +
  • +

    client.id

    +

    An id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.

    + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:medium
    +
  • +
  • +

    connections.max.idle.ms

    +

    Close idle connections after the number of milliseconds specified by this config.

    + + + + + +
    Type:long
    Default:300000 (5 minutes)
    Valid Values:
    Importance:medium
    +
  • +
  • +

    default.api.timeout.ms

    +

    Specifies the timeout (in milliseconds) for client APIs. This configuration is used as the default timeout for all client operations that do not specify a timeout parameter.

    + + + + + +
    Type:int
    Default:60000 (1 minute)
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    receive.buffer.bytes

    +

    The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.

    + + + + + +
    Type:int
    Default:65536 (64 kibibytes)
    Valid Values:[-1,...]
    Importance:medium
    +
  • +
  • +

    request.timeout.ms

    +

    The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.

    + + + + + +
    Type:int
    Default:30000 (30 seconds)
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    sasl.client.callback.handler.class

    +

    The fully qualified name of a SASL client callback handler class that implements the AuthenticateCallbackHandler interface.

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.jaas.config

    +

    JAAS login context parameters for SASL connections in the format used by JAAS configuration files. JAAS configuration file format is described here. The format for the value is: loginModuleClass controlFlag (optionName=optionValue)*;. For brokers, the config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.jaas.config=com.example.ScramLoginModule required;

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.kerberos.service.name

    +

    The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.login.callback.handler.class

    +

    The fully qualified name of a SASL login callback handler class that implements the AuthenticateCallbackHandler interface. For brokers, login callback handler config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.login.class

    +

    The fully qualified name of a class that implements the Login interface. For brokers, login config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.mechanism

    +

    SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. GSSAPI is the default mechanism.

    + + + + + +
    Type:string
    Default:GSSAPI
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.algorithm

    +

    The algorithm the Apache Kafka client should use to sign the assertion sent to the identity provider. It is also used as the value of the OAuth alg (Algorithm) header in the JWT assertion.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:RS256
    Valid Values:(case insensitive) [ES256, RS256]
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.aud

    +

    The JWT aud (Audience) claim which will be included in the client JWT assertion created locally.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.iss

    +

    The value to be used as the iss (Issuer) claim which will be included in the client JWT assertion created locally.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.jti.include

    +

    Flag that determines if the JWT assertion should generate a unique ID for the JWT and include it in the jti (JWT ID) claim.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.sub

    +

    The value to be used as the sub (Subject) claim which will be included in the client JWT assertion created locally.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.file

    +

    File that contains a pre-generated JWT assertion.

    The underlying implementation caches the file contents to avoid the performance hit of loading the file on each access. The caching mechanism will detect whenthe file changes to allow for the file to be reloaded on modifications. This allows for "live" assertion rotation without restarting the Kafka client.

    The file contains the assertion in the serialized, three part JWT format:

    1. The header section is a base 64-encoded JWT header that contains values like alg (Algorithm), typ (Type, always the literal value JWT), etc.
    2. The payload section includes the base 64-encoded set of JWT claims, such as aud (Audience), iss (Issuer), sub (Subject), etc.
    3. The signature section is the concatenated header and payload sections that was signed using a private key

    See RFC 7519 and RFC 7515 for more details on the JWT and JWS formats.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, all other sasl.oauthbearer.assertion.* configurations are ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.private.key.file

    +

    File that contains a private key in the standard PEM format which is used to sign the JWT assertion sent to the identity provider.

    The underlying implementation caches the file contents to avoid the performance hit of loading the file on each access. The caching mechanism will detect when the file changes to allow for the file to be reloaded on modifications. This allows for "live" private key rotation without restarting the Kafka client.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.private.key.passphrase

    +

    The optional passphrase to decrypt the private key file specified by sasl.oauthbearer.assertion.private.key.file.

    Note: If the file referred to by sasl.oauthbearer.assertion.private.key.file is modified on the file system at runtime and it was created with a different passphrase than it was previously, the client will not be able to access the private key file because the passphrase is now out of date. For that reason, when using private key passphrases, either use the same passphrase each time, or—for improved security—restart the Kafka client using the new passphrase configuration.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.template.file

    +

    This optional configuration specifies the file containing the JWT headers and/or payload claims to be used when creating the JWT assertion.

    Not all identity providers require the same set of claims; some may require a given claim while others may prohibit it. In order to provide the most flexibility, this configuration allows the user to provide the static header values and claims that are to be included in the JWT.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.client.credentials.client.id

    +

    The ID (defined in/by the OAuth identity provider) to identify the client requesting the token.

    The client ID was previously stored as part of the sasl.jaas.config configuration with the key clientId. For backward compatibility, the clientId JAAS option can still be used, but it is deprecated and will be removed in a future version.

    Order of precedence:

    • sasl.oauthbearer.client.credentials.client.id from configuration
    • clientId from JAAS

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.client.credentials.client.secret

    +

    The secret (defined by either the user or preassigned, depending on the identity provider) of the client requesting the token.

    The client secret was previously stored as part of the sasl.jaas.config configuration with the key clientSecret. For backward compatibility, the clientSecret JAAS option can still be used, but it is deprecated and will be removed in a future version.

    Order of precedence:

    • sasl.oauthbearer.client.credentials.client.secret from configuration
    • clientSecret from JAAS

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.url

    +

    The OAuth/OIDC provider URL from which the provider's JWKS (JSON Web Key Set) can be retrieved. The URL can be HTTP(S)-based or file-based. If the URL is HTTP(S)-based, the JWKS data will be retrieved from the OAuth/OIDC provider via the configured URL on broker startup. All then-current keys will be cached on the broker for incoming requests. If an authentication request is received for a JWT that includes a "kid" header claim value that isn't yet in the cache, the JWKS endpoint will be queried again on demand. However, the broker polls the URL every sasl.oauthbearer.jwks.endpoint.refresh.ms milliseconds to refresh the cache with any forthcoming keys before any JWT requests that include them are received. If the URL is file-based, the broker will load the JWKS file from a configured location on startup. In the event that the JWT includes a "kid" header value that isn't in the JWKS file, the broker will reject the JWT and authentication will fail.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.jwt.retriever.class

    +

    The fully-qualified class name of a JwtRetriever implementation used to request tokens from the identity provider.

    The default configuration value represents a class that maintains backward compatibility with previous versions of Apache Kafka. The default implementation uses the configuration to determine which concrete implementation to create.

    Other implementations that are provided include:

    • org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever
    • org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
    • org.apache.kafka.common.security.oauthbearer.FileJwtRetriever
    • org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever

    + + + + + +
    Type:class
    Default:org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.jwt.validator.class

    +

    The fully-qualified class name of a JwtValidator implementation used to validate the JWT from the identity provider.

    The default validator (org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator) maintains backward compatibility with previous versions of Apache Kafka. The default validator uses configuration to determine which concrete implementation to create.

    The built-in JwtValidator implementations are:

    • org.apache.kafka.common.security.oauthbearer.BrokerJwtValidator
    • org.apache.kafka.common.security.oauthbearer.ClientJwtValidator
    • org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator

    + + + + + +
    Type:class
    Default:org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.scope

    +

    This is the level of access a client application is granted to a resource or API which is included in the token request. If provided, it should match one or more scopes configured in the identity provider.

    The scope was previously stored as part of the sasl.jaas.config configuration with the key scope. For backward compatibility, the scope JAAS option can still be used, but it is deprecated and will be removed in a future version.

    Order of precedence:

    • sasl.oauthbearer.scope from configuration
    • scope from JAAS

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.token.endpoint.url

    +

    The URL for the OAuth/OIDC identity provider. If the URL is HTTP(S)-based, it is the issuer's token endpoint URL to which requests will be made to login based on the configuration in sasl.oauthbearer.jwt.retriever.class. If the URL is file-based, it specifies a file containing an access token (in JWT serialized form) issued by the OAuth/OIDC identity provider to use for authorization.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    security.protocol

    +

    Protocol used to communicate with brokers.

    + + + + + +
    Type:string
    Default:PLAINTEXT
    Valid Values:(case insensitive) [SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT]
    Importance:medium
    +
  • +
  • +

    send.buffer.bytes

    +

    The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.

    + + + + + +
    Type:int
    Default:131072 (128 kibibytes)
    Valid Values:[-1,...]
    Importance:medium
    +
  • +
  • +

    socket.connection.setup.timeout.max.ms

    +

    The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. To avoid connection storms, a randomization factor of 0.2 will be applied to the timeout resulting in a random range between 20% below and 20% above the computed value.

    + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:
    Importance:medium
    +
  • +
  • +

    socket.connection.setup.timeout.ms

    +

    The amount of time the client will wait for the socket connection to be established. If the connection is not built before the timeout elapses, clients will close the socket channel. This value is the initial backoff value and will increase exponentially for each consecutive connection failure, up to the socket.connection.setup.timeout.max.ms value.

    + + + + + +
    Type:long
    Default:10000 (10 seconds)
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.enabled.protocols

    +

    The list of protocols enabled for SSL connections. The default is 'TLSv1.2,TLSv1.3'. This means that clients and servers will prefer TLSv1.3 if both support it and fallback to TLSv1.2 otherwise (assuming both support at least TLSv1.2). This default should be fine for most use cases. Also see the config documentation for `ssl.protocol` to understand how it can impact the TLS version negotiation behavior.

    + + + + + +
    Type:list
    Default:TLSv1.2,TLSv1.3
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.keystore.type

    +

    The file format of the key store file. This is optional for client. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM].

    + + + + + +
    Type:string
    Default:JKS
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.protocol

    +

    The SSL protocol used to generate the SSLContext. The default is 'TLSv1.3', which should be fine for most use cases. A typical alternative to the default is 'TLSv1.2'. Allowed values for this config are dependent on the JVM. Clients using the defaults for this config and 'ssl.enabled.protocols' will downgrade to 'TLSv1.2' if the server does not support 'TLSv1.3'. If this config is set to 'TLSv1.2', however, clients will not use 'TLSv1.3' even if it is one of the values in `ssl.enabled.protocols` and the server only supports 'TLSv1.3'.

    + + + + + +
    Type:string
    Default:TLSv1.3
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.provider

    +

    The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.truststore.type

    +

    The file format of the trust store file. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM].

    + + + + + +
    Type:string
    Default:JKS
    Valid Values:
    Importance:medium
    +
  • +
  • +

    enable.metrics.push

    +

    Whether to enable pushing of client metrics to the cluster, if the cluster has a client metrics subscription which matches this client.

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:low
    +
  • +
  • +

    metadata.max.age.ms

    +

    The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.

    + + + + + +
    Type:long
    Default:300000 (5 minutes)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    metadata.recovery.rebootstrap.trigger.ms

    +

    If a client configured to rebootstrap using metadata.recovery.strategy=rebootstrap is unable to obtain metadata from any of the brokers in the last known metadata for this interval, client repeats the bootstrap process using bootstrap.servers configuration.

    + + + + + +
    Type:long
    Default:300000 (5 minutes)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    metadata.recovery.strategy

    +

    Controls how the client recovers when none of the brokers known to it is available. If set to none, the client fails. If set to rebootstrap, the client repeats the bootstrap process using bootstrap.servers. Rebootstrapping is useful when a client communicates with brokers so infrequently that the set of brokers may change entirely before the client refreshes metadata. Metadata recovery is triggered when all last-known brokers appear unavailable simultaneously. Brokers appear unavailable when disconnected and no current retry attempt is in-progress. Consider increasing reconnect.backoff.ms and reconnect.backoff.max.ms and decreasing socket.connection.setup.timeout.ms and socket.connection.setup.timeout.max.ms for the client. Rebootstrap is also triggered if connection cannot be established to any of the brokers for metadata.recovery.rebootstrap.trigger.ms milliseconds or if server requests rebootstrap.

    + + + + + +
    Type:string
    Default:rebootstrap
    Valid Values:(case insensitive) [REBOOTSTRAP, NONE]
    Importance:low
    +
  • +
  • +

    metric.reporters

    +

    A list of classes to use as metrics reporters. Implementing the org.apache.kafka.common.metrics.MetricsReporter interface allows plugging in classes that will be notified of new metric creation. When custom reporters are set and org.apache.kafka.common.metrics.JmxReporter is needed, it has to be explicitly added to the list.

    + + + + + +
    Type:list
    Default:org.apache.kafka.common.metrics.JmxReporter
    Valid Values:
    Importance:low
    +
  • +
  • +

    metrics.num.samples

    +

    The number of samples maintained to compute metrics.

    + + + + + +
    Type:int
    Default:2
    Valid Values:[1,...]
    Importance:low
    +
  • +
  • +

    metrics.recording.level

    +

    The highest recording level for metrics. It has three levels for recording metrics - info, debug, and trace.

    INFO level records only essential metrics necessary for monitoring system performance and health. It collects vital data without gathering too much detail, making it suitable for production environments where minimal overhead is desired.

    DEBUG level records most metrics, providing more detailed information about the system's operation. It's useful for development and testing environments where you need deeper insights to debug and fine-tune the application.

    TRACE level records all possible metrics, capturing every detail about the system's performance and operation. It's best for controlled environments where in-depth analysis is required, though it can introduce significant overhead.

    + + + + + +
    Type:string
    Default:INFO
    Valid Values:[INFO, DEBUG, TRACE]
    Importance:low
    +
  • +
  • +

    metrics.sample.window.ms

    +

    The window of time a metrics sample is computed over.

    + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    reconnect.backoff.max.ms

    +

    The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.

    + + + + + +
    Type:long
    Default:1000 (1 second)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    reconnect.backoff.ms

    +

    The base amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all connection attempts by the client to a broker. This value is the initial backoff value and will increase exponentially for each consecutive connection failure, up to the reconnect.backoff.max.ms value.

    + + + + + +
    Type:long
    Default:50
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    retries

    +

    It is recommended to set the value to either MAX_VALUE or zero, and use corresponding timeout parameters to control how long a client should retry a request. Setting a value greater than zero will cause the client to resend any request that fails with a potentially transient error. Setting a value of zero will lead to transient errors not being retried, and they will be propagated to the application to be handled.

    + + + + + +
    Type:int
    Default:2147483647
    Valid Values:[0,...,2147483647]
    Importance:low
    +
  • +
  • +

    retry.backoff.max.ms

    +

    The maximum amount of time in milliseconds to wait when retrying a request to the broker that has repeatedly failed. If provided, the backoff per client will increase exponentially for each failed request, up to this maximum. To prevent all clients from being synchronized upon retry, a randomized jitter with a factor of 0.2 will be applied to the backoff, resulting in the backoff falling within a range between 20% below and 20% above the computed value. If retry.backoff.ms is set to be higher than retry.backoff.max.ms, then retry.backoff.max.ms will be used as a constant backoff from the beginning without any exponential increase

    + + + + + +
    Type:long
    Default:1000 (1 second)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    retry.backoff.ms

    +

    The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios. This value is the initial backoff value and will increase exponentially for each failed request, up to the retry.backoff.max.ms value.

    + + + + + +
    Type:long
    Default:100
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    sasl.kerberos.kinit.cmd

    +

    Kerberos kinit command path.

    + + + + + +
    Type:string
    Default:/usr/bin/kinit
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.min.time.before.relogin

    +

    Login thread sleep time between refresh attempts.

    + + + + + +
    Type:long
    Default:60000
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.ticket.renew.jitter

    +

    Percentage of random jitter added to the renewal time.

    + + + + + +
    Type:double
    Default:0.05
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.ticket.renew.window.factor

    +

    Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.

    + + + + + +
    Type:double
    Default:0.8
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.connect.timeout.ms

    +

    The (optional) value in milliseconds for the external authentication provider connection timeout. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.read.timeout.ms

    +

    The (optional) value in milliseconds for the external authentication provider read timeout. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.buffer.seconds

    +

    The amount of buffer time before credential expiration to maintain when refreshing a credential, in seconds. If a refresh would otherwise occur closer to expiration than the number of buffer seconds then the refresh will be moved up to maintain as much of the buffer time as possible. Legal values are between 0 and 3600 (1 hour); a default value of 300 (5 minutes) is used if no value is specified. This value and sasl.login.refresh.min.period.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:short
    Default:300
    Valid Values:[0,...,3600]
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.min.period.seconds

    +

    The desired minimum time for the login refresh thread to wait before refreshing a credential, in seconds. Legal values are between 0 and 900 (15 minutes); a default value of 60 (1 minute) is used if no value is specified. This value and sasl.login.refresh.buffer.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:short
    Default:60
    Valid Values:[0,...,900]
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.window.factor

    +

    Login refresh thread will sleep until the specified window factor relative to the credential's lifetime has been reached, at which time it will try to refresh the credential. Legal values are between 0.5 (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used if no value is specified. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:double
    Default:0.8
    Valid Values:[0.5,...,1.0]
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.window.jitter

    +

    The maximum amount of random jitter relative to the credential's lifetime that is added to the login refresh thread's sleep time. Legal values are between 0 and 0.25 (25%) inclusive; a default value of 0.05 (5%) is used if no value is specified. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:double
    Default:0.05
    Valid Values:[0.0,...,0.25]
    Importance:low
    +
  • +
  • +

    sasl.login.retry.backoff.max.ms

    +

    The (optional) value in milliseconds for the maximum wait between login attempts to the external authentication provider. Login uses an exponential backoff algorithm with an initial wait based on the sasl.login.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.login.retry.backoff.max.ms setting. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:long
    Default:10000 (10 seconds)
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.retry.backoff.ms

    +

    The (optional) value in milliseconds for the initial wait between login attempts to the external authentication provider. Login uses an exponential backoff algorithm with an initial wait based on the sasl.login.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.login.retry.backoff.max.ms setting. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:long
    Default:100
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.exp.seconds

    +

    The number of seconds in the future for which the JWT is valid. The value is used to determine the JWT exp (Expiration) claim based on the current system time when the JWT is created.

    The formula to generate the exp claim is very simple:

    Let:

    x = the current timestamp in seconds, on client
    y = the value of this configuration

    Then:

    exp = x + y

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:int
    Default:300
    Valid Values:[0,...,86400]
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.nbf.seconds

    +

    The number of seconds in the past from which the JWT is valid. The value is used to determine the JWT nbf (Not Before) claim based on the current system time when the JWT is created.

    The formula to generate the nbf claim is very simple:

    Let:

    x = the current timestamp in seconds, on client
    y = the value of this configuration

    Then:

    nbf = x - y

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:int
    Default:60
    Valid Values:[0,...,3600]
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.clock.skew.seconds

    +

    The (optional) value in seconds to allow for differences between the time of the OAuth/OIDC identity provider and the broker.

    + + + + + +
    Type:int
    Default:30
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.expected.audience

    +

    The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. The JWT will be inspected for the standard OAuth "aud" claim and if this value is set, the broker will match the value from JWT's "aud" claim to see if there is an exact match. If there is no match, the broker will reject the JWT and authentication will fail.

    + + + + + +
    Type:list
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.expected.issuer

    +

    The (optional) setting for the broker to use to verify that the JWT was created by the expected issuer. The JWT will be inspected for the standard OAuth "iss" claim and if this value is set, the broker will match it exactly against what is in the JWT's "iss" claim. If there is no match, the broker will reject the JWT and authentication will fail.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.header.urlencode

    +

    The (optional) setting to enable the OAuth client to URL-encode the client_id and client_secret in the authorization header in accordance with RFC6749, see here for more details. The default value is set to 'false' for backward compatibility

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.refresh.ms

    +

    The (optional) value in milliseconds for the broker to wait between refreshing its JWKS (JSON Web Key Set) cache that contains the keys to verify the signature of the JWT.

    + + + + + +
    Type:long
    Default:3600000 (1 hour)
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms

    +

    The (optional) value in milliseconds for the maximum wait between attempts to retrieve the JWKS (JSON Web Key Set) from the external authentication provider. JWKS retrieval uses an exponential backoff algorithm with an initial wait based on the sasl.oauthbearer.jwks.endpoint.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms setting.

    + + + + + +
    Type:long
    Default:10000 (10 seconds)
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.retry.backoff.ms

    +

    The (optional) value in milliseconds for the initial wait between JWKS (JSON Web Key Set) retrieval attempts from the external authentication provider. JWKS retrieval uses an exponential backoff algorithm with an initial wait based on the sasl.oauthbearer.jwks.endpoint.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms setting.

    + + + + + +
    Type:long
    Default:100
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.scope.claim.name

    +

    The OAuth claim for the scope is often named "scope", but this (optional) setting can provide a different name to use for the scope included in the JWT payload's claims if the OAuth/OIDC provider uses a different name for that claim.

    + + + + + +
    Type:string
    Default:scope
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.sub.claim.name

    +

    The OAuth claim for the subject is often named "sub", but this (optional) setting can provide a different name to use for the subject included in the JWT payload's claims if the OAuth/OIDC provider uses a different name for that claim.

    + + + + + +
    Type:string
    Default:sub
    Valid Values:
    Importance:low
    +
  • +
  • +

    security.providers

    +

    A list of configurable creator classes each returning a provider implementing security algorithms. These classes should implement the org.apache.kafka.common.security.auth.SecurityProviderCreator interface.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.cipher.suites

    +

    A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. By default all the available cipher suites are supported.

    + + + + + +
    Type:list
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.endpoint.identification.algorithm

    +

    The endpoint identification algorithm to validate server hostname using server certificate.

    + + + + + +
    Type:string
    Default:https
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.engine.factory.class

    +

    The class of type org.apache.kafka.common.security.auth.SslEngineFactory to provide SSLEngine objects. Default value is org.apache.kafka.common.security.ssl.DefaultSslEngineFactory. Alternatively, setting this to org.apache.kafka.common.security.ssl.CommonNameLoggingSslEngineFactory will log the common name of expired SSL certificates used by clients to authenticate at any of the brokers with log level INFO. Note that this will cause a tiny delay during establishment of new connections from mTLS clients to brokers due to the extra code for examining the certificate chain provided by the client. Note further that the implementation uses a custom truststore based on the standard Java truststore and thus might be considered a security risk due to not being as mature as the standard one.

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.keymanager.algorithm

    +

    The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.

    + + + + + +
    Type:string
    Default:SunX509
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.secure.random.implementation

    +

    The SecureRandom PRNG implementation to use for SSL cryptography operations.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.trustmanager.algorithm

    +

    The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.

    + + + + + +
    Type:string
    Default:PKIX
    Valid Values:
    Importance:low
    +
  • +
+ diff --git a/static/41/generated/connect_config.html b/static/41/generated/connect_config.html new file mode 100644 index 000000000..1dd2007bb --- /dev/null +++ b/static/41/generated/connect_config.html @@ -0,0 +1,1333 @@ +
    +
  • +

    config.storage.topic

    +

    The name of the Kafka topic where connector configurations are stored

    + + + + + +
    Type:string
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    group.id

    +

    A unique string that identifies the Connect cluster group this worker belongs to.

    + + + + + +
    Type:string
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    key.converter

    +

    Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the keys in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro.

    + + + + + +
    Type:class
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    offset.storage.topic

    +

    The name of the Kafka topic where source connector offsets are stored

    + + + + + +
    Type:string
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    status.storage.topic

    +

    The name of the Kafka topic where connector and task status are stored

    + + + + + +
    Type:string
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    value.converter

    +

    Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the values in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro.

    + + + + + +
    Type:class
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    bootstrap.servers

    +

    A list of host/port pairs used to establish the initial connection to the Kafka cluster. Clients use this list to bootstrap and discover the full set of Kafka brokers. While the order of servers in the list does not matter, we recommend including more than one server to ensure resilience if any servers are down. This list does not need to contain the entire set of brokers, as Kafka clients automatically manage and update connections to the cluster efficiently. This list must be in the form host1:port1,host2:port2,....

    + + + + + +
    Type:list
    Default:localhost:9092
    Valid Values:
    Importance:high
    +
  • +
  • +

    exactly.once.source.support

    +

    Whether to enable exactly-once support for source connectors in the cluster by using transactions to write source records and their source offsets, and by proactively fencing out old task generations before bringing up new ones.
    To enable exactly-once source support on a new cluster, set this property to 'enabled'. To enable support on an existing cluster, first set to 'preparing' on every worker in the cluster, then set to 'enabled'. A rolling upgrade may be used for both changes. For more information on this feature, see the exactly-once source support documentation.

    + + + + + +
    Type:string
    Default:disabled
    Valid Values:(case insensitive) [DISABLED, ENABLED, PREPARING]
    Importance:high
    +
  • +
  • +

    heartbeat.interval.ms

    +

    The expected time between heartbeats to the group coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the worker's session stays active and to facilitate rebalancing when new members join or leave the group. The value must be set lower than session.timeout.ms, but typically should be set no higher than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances.

    + + + + + +
    Type:int
    Default:3000 (3 seconds)
    Valid Values:
    Importance:high
    +
  • +
  • +

    rebalance.timeout.ms

    +

    The maximum allowed time for each worker to join the group once a rebalance has begun. This is basically a limit on the amount of time needed for all tasks to flush any pending data and commit offsets. If the timeout is exceeded, then the worker will be removed from the group, which will cause offset commit failures.

    + + + + + +
    Type:int
    Default:60000 (1 minute)
    Valid Values:
    Importance:high
    +
  • +
  • +

    session.timeout.ms

    +

    The timeout used to detect worker failures. The worker sends periodic heartbeats to indicate its liveness to the broker. If no heartbeats are received by the broker before the expiration of this session timeout, then the broker will remove the worker from the group and initiate a rebalance. Note that the value must be in the allowable range as configured in the broker configuration by group.min.session.timeout.ms and group.max.session.timeout.ms.

    + + + + + +
    Type:int
    Default:10000 (10 seconds)
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.key.password

    +

    The password of the private key in the key store file or the PEM key specified in 'ssl.keystore.key'.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.certificate.chain

    +

    Certificate chain in the format specified by 'ssl.keystore.type'. Default SSL engine factory supports only PEM format with a list of X.509 certificates

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.key

    +

    Private key in the format specified by 'ssl.keystore.type'. Default SSL engine factory supports only PEM format with PKCS#8 keys. If the key is encrypted, key password must be specified using 'ssl.key.password'

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.location

    +

    The location of the key store file. This is optional for client and can be used for two-way authentication for client.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.password

    +

    The store password for the key store file. This is optional for client and only needed if 'ssl.keystore.location' is configured. Key store password is not supported for PEM format.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.truststore.certificates

    +

    Trusted certificates in the format specified by 'ssl.truststore.type'. Default SSL engine factory supports only PEM format with X.509 certificates.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.truststore.location

    +

    The location of the trust store file.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.truststore.password

    +

    The password for the trust store file. If a password is not set, trust store file configured will still be used, but integrity checking is disabled. Trust store password is not supported for PEM format.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    client.dns.lookup

    +

    Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again (both the JVM and the OS cache DNS name lookups, however). If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips.

    + + + + + +
    Type:string
    Default:use_all_dns_ips
    Valid Values:[use_all_dns_ips, resolve_canonical_bootstrap_servers_only]
    Importance:medium
    +
  • +
  • +

    connections.max.idle.ms

    +

    Close idle connections after the number of milliseconds specified by this config.

    + + + + + +
    Type:long
    Default:540000 (9 minutes)
    Valid Values:
    Importance:medium
    +
  • +
  • +

    connector.client.config.override.policy

    +

    Class name or alias of implementation of ConnectorClientConfigOverridePolicy. Defines what client configurations can be overridden by the connector. The default implementation is `All`, meaning connector configurations can override all client properties. The other possible policies in the framework include `None` to disallow connectors from overriding client properties, and `Principal` to allow connectors to override only client principals.

    + + + + + +
    Type:string
    Default:All
    Valid Values:
    Importance:medium
    +
  • +
  • +

    receive.buffer.bytes

    +

    The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.

    + + + + + +
    Type:int
    Default:32768 (32 kibibytes)
    Valid Values:[-1,...]
    Importance:medium
    +
  • +
  • +

    request.timeout.ms

    +

    The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.

    + + + + + +
    Type:int
    Default:40000 (40 seconds)
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    sasl.client.callback.handler.class

    +

    The fully qualified name of a SASL client callback handler class that implements the AuthenticateCallbackHandler interface.

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.jaas.config

    +

    JAAS login context parameters for SASL connections in the format used by JAAS configuration files. JAAS configuration file format is described here. The format for the value is: loginModuleClass controlFlag (optionName=optionValue)*;. For brokers, the config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.jaas.config=com.example.ScramLoginModule required;

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.kerberos.service.name

    +

    The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.login.callback.handler.class

    +

    The fully qualified name of a SASL login callback handler class that implements the AuthenticateCallbackHandler interface. For brokers, login callback handler config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.login.class

    +

    The fully qualified name of a class that implements the Login interface. For brokers, login config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.mechanism

    +

    SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. GSSAPI is the default mechanism.

    + + + + + +
    Type:string
    Default:GSSAPI
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.algorithm

    +

    The algorithm the Apache Kafka client should use to sign the assertion sent to the identity provider. It is also used as the value of the OAuth alg (Algorithm) header in the JWT assertion.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:RS256
    Valid Values:(case insensitive) [ES256, RS256]
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.aud

    +

    The JWT aud (Audience) claim which will be included in the client JWT assertion created locally.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.iss

    +

    The value to be used as the iss (Issuer) claim which will be included in the client JWT assertion created locally.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.jti.include

    +

    Flag that determines if the JWT assertion should generate a unique ID for the JWT and include it in the jti (JWT ID) claim.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.sub

    +

    The value to be used as the sub (Subject) claim which will be included in the client JWT assertion created locally.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.file

    +

    File that contains a pre-generated JWT assertion.

    The underlying implementation caches the file contents to avoid the performance hit of loading the file on each access. The caching mechanism will detect whenthe file changes to allow for the file to be reloaded on modifications. This allows for "live" assertion rotation without restarting the Kafka client.

    The file contains the assertion in the serialized, three part JWT format:

    1. The header section is a base 64-encoded JWT header that contains values like alg (Algorithm), typ (Type, always the literal value JWT), etc.
    2. The payload section includes the base 64-encoded set of JWT claims, such as aud (Audience), iss (Issuer), sub (Subject), etc.
    3. The signature section is the concatenated header and payload sections that was signed using a private key

    See RFC 7519 and RFC 7515 for more details on the JWT and JWS formats.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, all other sasl.oauthbearer.assertion.* configurations are ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.private.key.file

    +

    File that contains a private key in the standard PEM format which is used to sign the JWT assertion sent to the identity provider.

    The underlying implementation caches the file contents to avoid the performance hit of loading the file on each access. The caching mechanism will detect when the file changes to allow for the file to be reloaded on modifications. This allows for "live" private key rotation without restarting the Kafka client.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.private.key.passphrase

    +

    The optional passphrase to decrypt the private key file specified by sasl.oauthbearer.assertion.private.key.file.

    Note: If the file referred to by sasl.oauthbearer.assertion.private.key.file is modified on the file system at runtime and it was created with a different passphrase than it was previously, the client will not be able to access the private key file because the passphrase is now out of date. For that reason, when using private key passphrases, either use the same passphrase each time, or—for improved security—restart the Kafka client using the new passphrase configuration.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.template.file

    +

    This optional configuration specifies the file containing the JWT headers and/or payload claims to be used when creating the JWT assertion.

    Not all identity providers require the same set of claims; some may require a given claim while others may prohibit it. In order to provide the most flexibility, this configuration allows the user to provide the static header values and claims that are to be included in the JWT.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.client.credentials.client.id

    +

    The ID (defined in/by the OAuth identity provider) to identify the client requesting the token.

    The client ID was previously stored as part of the sasl.jaas.config configuration with the key clientId. For backward compatibility, the clientId JAAS option can still be used, but it is deprecated and will be removed in a future version.

    Order of precedence:

    • sasl.oauthbearer.client.credentials.client.id from configuration
    • clientId from JAAS

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.client.credentials.client.secret

    +

    The secret (defined by either the user or preassigned, depending on the identity provider) of the client requesting the token.

    The client secret was previously stored as part of the sasl.jaas.config configuration with the key clientSecret. For backward compatibility, the clientSecret JAAS option can still be used, but it is deprecated and will be removed in a future version.

    Order of precedence:

    • sasl.oauthbearer.client.credentials.client.secret from configuration
    • clientSecret from JAAS

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.url

    +

    The OAuth/OIDC provider URL from which the provider's JWKS (JSON Web Key Set) can be retrieved. The URL can be HTTP(S)-based or file-based. If the URL is HTTP(S)-based, the JWKS data will be retrieved from the OAuth/OIDC provider via the configured URL on broker startup. All then-current keys will be cached on the broker for incoming requests. If an authentication request is received for a JWT that includes a "kid" header claim value that isn't yet in the cache, the JWKS endpoint will be queried again on demand. However, the broker polls the URL every sasl.oauthbearer.jwks.endpoint.refresh.ms milliseconds to refresh the cache with any forthcoming keys before any JWT requests that include them are received. If the URL is file-based, the broker will load the JWKS file from a configured location on startup. In the event that the JWT includes a "kid" header value that isn't in the JWKS file, the broker will reject the JWT and authentication will fail.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.jwt.retriever.class

    +

    The fully-qualified class name of a JwtRetriever implementation used to request tokens from the identity provider.

    The default configuration value represents a class that maintains backward compatibility with previous versions of Apache Kafka. The default implementation uses the configuration to determine which concrete implementation to create.

    Other implementations that are provided include:

    • org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever
    • org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
    • org.apache.kafka.common.security.oauthbearer.FileJwtRetriever
    • org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever

    + + + + + +
    Type:class
    Default:org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.jwt.validator.class

    +

    The fully-qualified class name of a JwtValidator implementation used to validate the JWT from the identity provider.

    The default validator (org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator) maintains backward compatibility with previous versions of Apache Kafka. The default validator uses configuration to determine which concrete implementation to create.

    The built-in JwtValidator implementations are:

    • org.apache.kafka.common.security.oauthbearer.BrokerJwtValidator
    • org.apache.kafka.common.security.oauthbearer.ClientJwtValidator
    • org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator

    + + + + + +
    Type:class
    Default:org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.scope

    +

    This is the level of access a client application is granted to a resource or API which is included in the token request. If provided, it should match one or more scopes configured in the identity provider.

    The scope was previously stored as part of the sasl.jaas.config configuration with the key scope. For backward compatibility, the scope JAAS option can still be used, but it is deprecated and will be removed in a future version.

    Order of precedence:

    • sasl.oauthbearer.scope from configuration
    • scope from JAAS

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.token.endpoint.url

    +

    The URL for the OAuth/OIDC identity provider. If the URL is HTTP(S)-based, it is the issuer's token endpoint URL to which requests will be made to login based on the configuration in sasl.oauthbearer.jwt.retriever.class. If the URL is file-based, it specifies a file containing an access token (in JWT serialized form) issued by the OAuth/OIDC identity provider to use for authorization.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    security.protocol

    +

    Protocol used to communicate with brokers.

    + + + + + +
    Type:string
    Default:PLAINTEXT
    Valid Values:(case insensitive) [SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT]
    Importance:medium
    +
  • +
  • +

    send.buffer.bytes

    +

    The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.

    + + + + + +
    Type:int
    Default:131072 (128 kibibytes)
    Valid Values:[-1,...]
    Importance:medium
    +
  • +
  • +

    ssl.enabled.protocols

    +

    The list of protocols enabled for SSL connections. The default is 'TLSv1.2,TLSv1.3'. This means that clients and servers will prefer TLSv1.3 if both support it and fallback to TLSv1.2 otherwise (assuming both support at least TLSv1.2). This default should be fine for most use cases. Also see the config documentation for `ssl.protocol` to understand how it can impact the TLS version negotiation behavior.

    + + + + + +
    Type:list
    Default:TLSv1.2,TLSv1.3
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.keystore.type

    +

    The file format of the key store file. This is optional for client. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM].

    + + + + + +
    Type:string
    Default:JKS
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.protocol

    +

    The SSL protocol used to generate the SSLContext. The default is 'TLSv1.3', which should be fine for most use cases. A typical alternative to the default is 'TLSv1.2'. Allowed values for this config are dependent on the JVM. Clients using the defaults for this config and 'ssl.enabled.protocols' will downgrade to 'TLSv1.2' if the server does not support 'TLSv1.3'. If this config is set to 'TLSv1.2', however, clients will not use 'TLSv1.3' even if it is one of the values in `ssl.enabled.protocols` and the server only supports 'TLSv1.3'.

    + + + + + +
    Type:string
    Default:TLSv1.3
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.provider

    +

    The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.truststore.type

    +

    The file format of the trust store file. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM].

    + + + + + +
    Type:string
    Default:JKS
    Valid Values:
    Importance:medium
    +
  • +
  • +

    worker.sync.timeout.ms

    +

    When the worker is out of sync with other workers and needs to resynchronize configurations, wait up to this amount of time before giving up, leaving the group, and waiting a backoff period before rejoining.

    + + + + + +
    Type:int
    Default:3000 (3 seconds)
    Valid Values:
    Importance:medium
    +
  • +
  • +

    worker.unsync.backoff.ms

    +

    When the worker is out of sync with other workers and fails to catch up within worker.sync.timeout.ms, leave the Connect cluster for this long before rejoining.

    + + + + + +
    Type:int
    Default:300000 (5 minutes)
    Valid Values:
    Importance:medium
    +
  • +
  • +

    access.control.allow.methods

    +

    Sets the methods supported for cross origin requests by setting the Access-Control-Allow-Methods header. The default value of the Access-Control-Allow-Methods header allows cross origin requests for GET, POST and HEAD.

    + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:low
    +
  • +
  • +

    access.control.allow.origin

    +

    Value to set the Access-Control-Allow-Origin header to for REST API requests.To enable cross origin access, set this to the domain of the application that should be permitted to access the API, or '*' to allow access from any domain. The default value only allows access from the domain of the REST API.

    + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:low
    +
  • +
  • +

    admin.listeners

    +

    List of comma-separated URIs the Admin REST API will listen on. The supported protocols are HTTP and HTTPS. An empty or blank string will disable this feature. The default behavior is to use the regular listener (specified by the 'listeners' property).

    + + + + + +
    Type:list
    Default:null
    Valid Values:List of comma-separated URLs, ex: http://localhost:8080,https://localhost:8443.
    Importance:low
    +
  • +
  • +

    client.id

    +

    An id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.

    + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:low
    +
  • +
  • +

    config.providers

    +

    Comma-separated names of ConfigProvider classes, loaded and used in the order specified. Implementing the interface ConfigProvider allows you to replace variable references in connector configurations, such as for externalized secrets.

    + + + + + +
    Type:list
    Default:""
    Valid Values:
    Importance:low
    +
  • +
  • +

    config.storage.replication.factor

    +

    Replication factor used when creating the configuration storage topic

    + + + + + +
    Type:short
    Default:3
    Valid Values:Positive number not larger than the number of brokers in the Kafka cluster, or -1 to use the broker's default
    Importance:low
    +
  • +
  • +

    connect.protocol

    +

    Compatibility mode for Kafka Connect Protocol

    + + + + + +
    Type:string
    Default:sessioned
    Valid Values:[eager, compatible, sessioned]
    Importance:low
    +
  • +
  • +

    header.converter

    +

    HeaderConverter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the header values in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro. By default, the SimpleHeaderConverter is used to serialize header values to strings and deserialize them by inferring the schemas.

    + + + + + +
    Type:class
    Default:org.apache.kafka.connect.storage.SimpleHeaderConverter
    Valid Values:
    Importance:low
    +
  • +
  • +

    header.converter.plugin.version

    +

    Version of the header converter.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    inter.worker.key.generation.algorithm

    +

    The algorithm to use for generating internal request keys. The algorithm 'HmacSHA256' will be used as a default on JVMs that support it; on other JVMs, no default is used and a value for this property must be manually specified in the worker config.

    + + + + + +
    Type:string
    Default:HmacSHA256
    Valid Values:Any KeyGenerator algorithm supported by the worker JVM
    Importance:low
    +
  • +
  • +

    inter.worker.key.size

    +

    The size of the key to use for signing internal requests, in bits. If null, the default key size for the key generation algorithm will be used.

    + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    inter.worker.key.ttl.ms

    +

    The TTL of generated session keys used for internal request validation (in milliseconds)

    + + + + + +
    Type:int
    Default:3600000 (1 hour)
    Valid Values:[0,...,2147483647]
    Importance:low
    +
  • +
  • +

    inter.worker.signature.algorithm

    +

    The algorithm used to sign internal requestsThe algorithm 'inter.worker.signature.algorithm' will be used as a default on JVMs that support it; on other JVMs, no default is used and a value for this property must be manually specified in the worker config.

    + + + + + +
    Type:string
    Default:HmacSHA256
    Valid Values:Any MAC algorithm supported by the worker JVM
    Importance:low
    +
  • +
  • +

    inter.worker.verification.algorithms

    +

    A list of permitted algorithms for verifying internal requests, which must include the algorithm used for the inter.worker.signature.algorithm property. The algorithm(s) '[HmacSHA256]' will be used as a default on JVMs that provide them; on other JVMs, no default is used and a value for this property must be manually specified in the worker config.

    + + + + + +
    Type:list
    Default:HmacSHA256
    Valid Values:A list of one or more MAC algorithms, each supported by the worker JVM
    Importance:low
    +
  • +
  • +

    key.converter.plugin.version

    +

    Version of the key converter.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    listeners

    +

    List of comma-separated URIs the REST API will listen on. The supported protocols are HTTP and HTTPS.
    Specify hostname as 0.0.0.0 to bind to all interfaces.
    Leave hostname empty to bind to default interface.
    Examples of legal listener lists: HTTP://myhost:8083,HTTPS://myhost:8084

    + + + + + +
    Type:list
    Default:http://:8083
    Valid Values:List of comma-separated URLs, ex: http://localhost:8080,https://localhost:8443.
    Importance:low
    +
  • +
  • +

    metadata.max.age.ms

    +

    The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.

    + + + + + +
    Type:long
    Default:300000 (5 minutes)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    metadata.recovery.rebootstrap.trigger.ms

    +

    If a client configured to rebootstrap using metadata.recovery.strategy=rebootstrap is unable to obtain metadata from any of the brokers in the last known metadata for this interval, client repeats the bootstrap process using bootstrap.servers configuration.

    + + + + + +
    Type:long
    Default:300000 (5 minutes)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    metadata.recovery.strategy

    +

    Controls how the client recovers when none of the brokers known to it is available. If set to none, the client fails. If set to rebootstrap, the client repeats the bootstrap process using bootstrap.servers. Rebootstrapping is useful when a client communicates with brokers so infrequently that the set of brokers may change entirely before the client refreshes metadata. Metadata recovery is triggered when all last-known brokers appear unavailable simultaneously. Brokers appear unavailable when disconnected and no current retry attempt is in-progress. Consider increasing reconnect.backoff.ms and reconnect.backoff.max.ms and decreasing socket.connection.setup.timeout.ms and socket.connection.setup.timeout.max.ms for the client. Rebootstrap is also triggered if connection cannot be established to any of the brokers for metadata.recovery.rebootstrap.trigger.ms milliseconds or if server requests rebootstrap.

    + + + + + +
    Type:string
    Default:rebootstrap
    Valid Values:(case insensitive) [REBOOTSTRAP, NONE]
    Importance:low
    +
  • +
  • +

    metric.reporters

    +

    A list of classes to use as metrics reporters. Implementing the org.apache.kafka.common.metrics.MetricsReporter interface allows plugging in classes that will be notified of new metric creation. When custom reporters are set and org.apache.kafka.common.metrics.JmxReporter is needed, it has to be explicitly added to the list.

    + + + + + +
    Type:list
    Default:org.apache.kafka.common.metrics.JmxReporter
    Valid Values:
    Importance:low
    +
  • +
  • +

    metrics.num.samples

    +

    The number of samples maintained to compute metrics.

    + + + + + +
    Type:int
    Default:2
    Valid Values:[1,...]
    Importance:low
    +
  • +
  • +

    metrics.recording.level

    +

    The highest recording level for metrics. It has three levels for recording metrics - info, debug, and trace.

    INFO level records only essential metrics necessary for monitoring system performance and health. It collects vital data without gathering too much detail, making it suitable for production environments where minimal overhead is desired.

    DEBUG level records most metrics, providing more detailed information about the system's operation. It's useful for development and testing environments where you need deeper insights to debug and fine-tune the application.

    TRACE level records all possible metrics, capturing every detail about the system's performance and operation. It's best for controlled environments where in-depth analysis is required, though it can introduce significant overhead.

    + + + + + +
    Type:string
    Default:INFO
    Valid Values:[INFO, DEBUG]
    Importance:low
    +
  • +
  • +

    metrics.sample.window.ms

    +

    The window of time a metrics sample is computed over.

    + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    offset.flush.interval.ms

    +

    Interval at which to try committing offsets for tasks.

    + + + + + +
    Type:long
    Default:60000 (1 minute)
    Valid Values:
    Importance:low
    +
  • +
  • +

    offset.flush.timeout.ms

    +

    Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt. This property has no effect for source connectors running with exactly-once support.

    + + + + + +
    Type:long
    Default:5000 (5 seconds)
    Valid Values:
    Importance:low
    +
  • +
  • +

    offset.storage.partitions

    +

    The number of partitions used when creating the offset storage topic

    + + + + + +
    Type:int
    Default:25
    Valid Values:Positive number, or -1 to use the broker's default
    Importance:low
    +
  • +
  • +

    offset.storage.replication.factor

    +

    Replication factor used when creating the offset storage topic

    + + + + + +
    Type:short
    Default:3
    Valid Values:Positive number not larger than the number of brokers in the Kafka cluster, or -1 to use the broker's default
    Importance:low
    +
  • +
  • +

    plugin.discovery

    +

    Method to use to discover plugins present in the classpath and plugin.path configuration. This can be one of multiple values with the following meanings:
    * only_scan: Discover plugins only by reflection. Plugins which are not discoverable by ServiceLoader will not impact worker startup.
    * hybrid_warn: Discover plugins reflectively and by ServiceLoader. Plugins which are not discoverable by ServiceLoader will print warnings during worker startup.
    * hybrid_fail: Discover plugins reflectively and by ServiceLoader. Plugins which are not discoverable by ServiceLoader will cause worker startup to fail.
    * service_load: Discover plugins only by ServiceLoader. Faster startup than other modes. Plugins which are not discoverable by ServiceLoader may not be usable.

    + + + + + +
    Type:string
    Default:hybrid_warn
    Valid Values:(case insensitive) [ONLY_SCAN, SERVICE_LOAD, HYBRID_WARN, HYBRID_FAIL]
    Importance:low
    +
  • +
  • +

    plugin.path

    +

    List of paths separated by commas (,) that contain plugins (connectors, converters, transformations). The list should consist of top level directories that include any combination of:
    a) directories immediately containing jars with plugins and their dependencies
    b) uber-jars with plugins and their dependencies
    c) directories immediately containing the package directory structure of classes of plugins and their dependencies
    Note: symlinks will be followed to discover dependencies or plugins.
    Examples: plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors
    Do not use config provider variables in this property, since the raw path is used by the worker's scanner before config providers are initialized and used to replace variables.

    + + + + + +
    Type:list
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    reconnect.backoff.max.ms

    +

    The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.

    + + + + + +
    Type:long
    Default:1000 (1 second)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    reconnect.backoff.ms

    +

    The base amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all connection attempts by the client to a broker. This value is the initial backoff value and will increase exponentially for each consecutive connection failure, up to the reconnect.backoff.max.ms value.

    + + + + + +
    Type:long
    Default:50
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    response.http.headers.config

    +

    Rules for REST API HTTP response headers

    + + + + + +
    Type:string
    Default:""
    Valid Values:Comma-separated header rules, where each header rule is of the form '[action] [header name]:[header value]' and optionally surrounded by double quotes if any part of a header rule contains a comma
    Importance:low
    +
  • +
  • +

    rest.advertised.host.name

    +

    If this is set, this is the hostname that will be given out to other workers to connect to.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    rest.advertised.listener

    +

    Sets the advertised listener (HTTP or HTTPS) which will be given to other workers to use.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    rest.advertised.port

    +

    If this is set, this is the port that will be given out to other workers to connect to.

    + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    rest.extension.classes

    +

    Comma-separated names of ConnectRestExtension classes, loaded and called in the order specified. Implementing the interface ConnectRestExtension allows you to inject into Connect's REST API user defined resources like filters. Typically used to add custom capability like logging, security, etc.

    + + + + + +
    Type:list
    Default:""
    Valid Values:
    Importance:low
    +
  • +
  • +

    retry.backoff.max.ms

    +

    The maximum amount of time in milliseconds to wait when retrying a request to the broker that has repeatedly failed. If provided, the backoff per client will increase exponentially for each failed request, up to this maximum. To prevent all clients from being synchronized upon retry, a randomized jitter with a factor of 0.2 will be applied to the backoff, resulting in the backoff falling within a range between 20% below and 20% above the computed value. If retry.backoff.ms is set to be higher than retry.backoff.max.ms, then retry.backoff.max.ms will be used as a constant backoff from the beginning without any exponential increase

    + + + + + +
    Type:long
    Default:1000 (1 second)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    retry.backoff.ms

    +

    The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios. This value is the initial backoff value and will increase exponentially for each failed request, up to the retry.backoff.max.ms value.

    + + + + + +
    Type:long
    Default:100
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    sasl.kerberos.kinit.cmd

    +

    Kerberos kinit command path.

    + + + + + +
    Type:string
    Default:/usr/bin/kinit
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.min.time.before.relogin

    +

    Login thread sleep time between refresh attempts.

    + + + + + +
    Type:long
    Default:60000
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.ticket.renew.jitter

    +

    Percentage of random jitter added to the renewal time.

    + + + + + +
    Type:double
    Default:0.05
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.ticket.renew.window.factor

    +

    Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.

    + + + + + +
    Type:double
    Default:0.8
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.connect.timeout.ms

    +

    The (optional) value in milliseconds for the external authentication provider connection timeout. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.read.timeout.ms

    +

    The (optional) value in milliseconds for the external authentication provider read timeout. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.buffer.seconds

    +

    The amount of buffer time before credential expiration to maintain when refreshing a credential, in seconds. If a refresh would otherwise occur closer to expiration than the number of buffer seconds then the refresh will be moved up to maintain as much of the buffer time as possible. Legal values are between 0 and 3600 (1 hour); a default value of 300 (5 minutes) is used if no value is specified. This value and sasl.login.refresh.min.period.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:short
    Default:300
    Valid Values:[0,...,3600]
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.min.period.seconds

    +

    The desired minimum time for the login refresh thread to wait before refreshing a credential, in seconds. Legal values are between 0 and 900 (15 minutes); a default value of 60 (1 minute) is used if no value is specified. This value and sasl.login.refresh.buffer.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:short
    Default:60
    Valid Values:[0,...,900]
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.window.factor

    +

    Login refresh thread will sleep until the specified window factor relative to the credential's lifetime has been reached, at which time it will try to refresh the credential. Legal values are between 0.5 (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used if no value is specified. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:double
    Default:0.8
    Valid Values:[0.5,...,1.0]
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.window.jitter

    +

    The maximum amount of random jitter relative to the credential's lifetime that is added to the login refresh thread's sleep time. Legal values are between 0 and 0.25 (25%) inclusive; a default value of 0.05 (5%) is used if no value is specified. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:double
    Default:0.05
    Valid Values:[0.0,...,0.25]
    Importance:low
    +
  • +
  • +

    sasl.login.retry.backoff.max.ms

    +

    The (optional) value in milliseconds for the maximum wait between login attempts to the external authentication provider. Login uses an exponential backoff algorithm with an initial wait based on the sasl.login.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.login.retry.backoff.max.ms setting. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:long
    Default:10000 (10 seconds)
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.retry.backoff.ms

    +

    The (optional) value in milliseconds for the initial wait between login attempts to the external authentication provider. Login uses an exponential backoff algorithm with an initial wait based on the sasl.login.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.login.retry.backoff.max.ms setting. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:long
    Default:100
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.exp.seconds

    +

    The number of seconds in the future for which the JWT is valid. The value is used to determine the JWT exp (Expiration) claim based on the current system time when the JWT is created.

    The formula to generate the exp claim is very simple:

    Let:

    x = the current timestamp in seconds, on client
    y = the value of this configuration

    Then:

    exp = x + y

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:int
    Default:300
    Valid Values:[0,...,86400]
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.nbf.seconds

    +

    The number of seconds in the past from which the JWT is valid. The value is used to determine the JWT nbf (Not Before) claim based on the current system time when the JWT is created.

    The formula to generate the nbf claim is very simple:

    Let:

    x = the current timestamp in seconds, on client
    y = the value of this configuration

    Then:

    nbf = x - y

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:int
    Default:60
    Valid Values:[0,...,3600]
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.clock.skew.seconds

    +

    The (optional) value in seconds to allow for differences between the time of the OAuth/OIDC identity provider and the broker.

    + + + + + +
    Type:int
    Default:30
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.expected.audience

    +

    The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. The JWT will be inspected for the standard OAuth "aud" claim and if this value is set, the broker will match the value from JWT's "aud" claim to see if there is an exact match. If there is no match, the broker will reject the JWT and authentication will fail.

    + + + + + +
    Type:list
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.expected.issuer

    +

    The (optional) setting for the broker to use to verify that the JWT was created by the expected issuer. The JWT will be inspected for the standard OAuth "iss" claim and if this value is set, the broker will match it exactly against what is in the JWT's "iss" claim. If there is no match, the broker will reject the JWT and authentication will fail.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.header.urlencode

    +

    The (optional) setting to enable the OAuth client to URL-encode the client_id and client_secret in the authorization header in accordance with RFC6749, see here for more details. The default value is set to 'false' for backward compatibility

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.refresh.ms

    +

    The (optional) value in milliseconds for the broker to wait between refreshing its JWKS (JSON Web Key Set) cache that contains the keys to verify the signature of the JWT.

    + + + + + +
    Type:long
    Default:3600000 (1 hour)
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms

    +

    The (optional) value in milliseconds for the maximum wait between attempts to retrieve the JWKS (JSON Web Key Set) from the external authentication provider. JWKS retrieval uses an exponential backoff algorithm with an initial wait based on the sasl.oauthbearer.jwks.endpoint.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms setting.

    + + + + + +
    Type:long
    Default:10000 (10 seconds)
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.retry.backoff.ms

    +

    The (optional) value in milliseconds for the initial wait between JWKS (JSON Web Key Set) retrieval attempts from the external authentication provider. JWKS retrieval uses an exponential backoff algorithm with an initial wait based on the sasl.oauthbearer.jwks.endpoint.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms setting.

    + + + + + +
    Type:long
    Default:100
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.scope.claim.name

    +

    The OAuth claim for the scope is often named "scope", but this (optional) setting can provide a different name to use for the scope included in the JWT payload's claims if the OAuth/OIDC provider uses a different name for that claim.

    + + + + + +
    Type:string
    Default:scope
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.sub.claim.name

    +

    The OAuth claim for the subject is often named "sub", but this (optional) setting can provide a different name to use for the subject included in the JWT payload's claims if the OAuth/OIDC provider uses a different name for that claim.

    + + + + + +
    Type:string
    Default:sub
    Valid Values:
    Importance:low
    +
  • +
  • +

    scheduled.rebalance.max.delay.ms

    +

    The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned

    + + + + + +
    Type:int
    Default:300000 (5 minutes)
    Valid Values:[0,...,2147483647]
    Importance:low
    +
  • +
  • +

    socket.connection.setup.timeout.max.ms

    +

    The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. To avoid connection storms, a randomization factor of 0.2 will be applied to the timeout resulting in a random range between 20% below and 20% above the computed value.

    + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    socket.connection.setup.timeout.ms

    +

    The amount of time the client will wait for the socket connection to be established. If the connection is not built before the timeout elapses, clients will close the socket channel. This value is the initial backoff value and will increase exponentially for each consecutive connection failure, up to the socket.connection.setup.timeout.max.ms value.

    + + + + + +
    Type:long
    Default:10000 (10 seconds)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    ssl.cipher.suites

    +

    A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. By default all the available cipher suites are supported.

    + + + + + +
    Type:list
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.client.auth

    +

    Configures kafka broker to request client authentication. The following settings are common:

    • ssl.client.auth=required If set to required client authentication is required.
    • ssl.client.auth=requested This means client authentication is optional. unlike required, if this option is set client can choose not to provide authentication information about itself
    • ssl.client.auth=none This means client authentication is not needed.

    + + + + + +
    Type:string
    Default:none
    Valid Values:[required, requested, none]
    Importance:low
    +
  • +
  • +

    ssl.endpoint.identification.algorithm

    +

    The endpoint identification algorithm to validate server hostname using server certificate.

    + + + + + +
    Type:string
    Default:https
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.engine.factory.class

    +

    The class of type org.apache.kafka.common.security.auth.SslEngineFactory to provide SSLEngine objects. Default value is org.apache.kafka.common.security.ssl.DefaultSslEngineFactory. Alternatively, setting this to org.apache.kafka.common.security.ssl.CommonNameLoggingSslEngineFactory will log the common name of expired SSL certificates used by clients to authenticate at any of the brokers with log level INFO. Note that this will cause a tiny delay during establishment of new connections from mTLS clients to brokers due to the extra code for examining the certificate chain provided by the client. Note further that the implementation uses a custom truststore based on the standard Java truststore and thus might be considered a security risk due to not being as mature as the standard one.

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.keymanager.algorithm

    +

    The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.

    + + + + + +
    Type:string
    Default:SunX509
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.secure.random.implementation

    +

    The SecureRandom PRNG implementation to use for SSL cryptography operations.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.trustmanager.algorithm

    +

    The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.

    + + + + + +
    Type:string
    Default:PKIX
    Valid Values:
    Importance:low
    +
  • +
  • +

    status.storage.partitions

    +

    The number of partitions used when creating the status storage topic

    + + + + + +
    Type:int
    Default:5
    Valid Values:Positive number, or -1 to use the broker's default
    Importance:low
    +
  • +
  • +

    status.storage.replication.factor

    +

    Replication factor used when creating the status storage topic

    + + + + + +
    Type:short
    Default:3
    Valid Values:Positive number not larger than the number of brokers in the Kafka cluster, or -1 to use the broker's default
    Importance:low
    +
  • +
  • +

    task.shutdown.graceful.timeout.ms

    +

    Amount of time to wait for tasks to shutdown gracefully. This is the total amount of time, not per task. All task have shutdown triggered, then they are waited on sequentially.

    + + + + + +
    Type:long
    Default:5000 (5 seconds)
    Valid Values:
    Importance:low
    +
  • +
  • +

    topic.creation.enable

    +

    Whether to allow automatic creation of topics used by source connectors, when source connectors are configured with `topic.creation.` properties. Each task will use an admin client to create its topics and will not depend on the Kafka brokers to create topics automatically.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    topic.tracking.allow.reset

    +

    If set to true, it allows user requests to reset the set of active topics per connector.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    topic.tracking.enable

    +

    Enable tracking the set of active topics per connector during runtime.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    value.converter.plugin.version

    +

    Version of the value converter.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
+ diff --git a/static/41/generated/connect_metrics.html b/static/41/generated/connect_metrics.html new file mode 100644 index 000000000..d0e0f045d --- /dev/null +++ b/static/41/generated/connect_metrics.html @@ -0,0 +1,463 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Metric/Attribute nameDescriptionMbean name
connector-countThe number of connectors run in this worker.kafka.connect:type=connect-worker-metrics
connector-startup-attempts-totalThe total number of connector startups that this worker has attempted.kafka.connect:type=connect-worker-metrics
connector-startup-failure-percentageThe average percentage of this worker's connectors starts that failed.kafka.connect:type=connect-worker-metrics
connector-startup-failure-totalThe total number of connector starts that failed.kafka.connect:type=connect-worker-metrics
connector-startup-success-percentageThe average percentage of this worker's connectors starts that succeeded.kafka.connect:type=connect-worker-metrics
connector-startup-success-totalThe total number of connector starts that succeeded.kafka.connect:type=connect-worker-metrics
task-countThe number of tasks run in this worker.kafka.connect:type=connect-worker-metrics
task-startup-attempts-totalThe total number of task startups that this worker has attempted.kafka.connect:type=connect-worker-metrics
task-startup-failure-percentageThe average percentage of this worker's tasks starts that failed.kafka.connect:type=connect-worker-metrics
task-startup-failure-totalThe total number of task starts that failed.kafka.connect:type=connect-worker-metrics
task-startup-success-percentageThe average percentage of this worker's tasks starts that succeeded.kafka.connect:type=connect-worker-metrics
task-startup-success-totalThe total number of task starts that succeeded.kafka.connect:type=connect-worker-metrics
connector-destroyed-task-countThe number of destroyed tasks of the connector on the worker.kafka.connect:type=connect-worker-metrics,connector="{connector}"
connector-failed-task-countThe number of failed tasks of the connector on the worker.kafka.connect:type=connect-worker-metrics,connector="{connector}"
connector-paused-task-countThe number of paused tasks of the connector on the worker.kafka.connect:type=connect-worker-metrics,connector="{connector}"
connector-restarting-task-countThe number of restarting tasks of the connector on the worker.kafka.connect:type=connect-worker-metrics,connector="{connector}"
connector-running-task-countThe number of running tasks of the connector on the worker.kafka.connect:type=connect-worker-metrics,connector="{connector}"
connector-total-task-countThe number of tasks of the connector on the worker.kafka.connect:type=connect-worker-metrics,connector="{connector}"
connector-unassigned-task-countThe number of unassigned tasks of the connector on the worker.kafka.connect:type=connect-worker-metrics,connector="{connector}"
completed-rebalances-totalThe total number of rebalances completed by this worker.kafka.connect:type=connect-worker-rebalance-metrics
connect-protocolThe Connect protocol used by this clusterkafka.connect:type=connect-worker-rebalance-metrics
epochThe epoch or generation number of this worker.kafka.connect:type=connect-worker-rebalance-metrics
leader-nameThe name of the group leader.kafka.connect:type=connect-worker-rebalance-metrics
rebalance-avg-time-msThe average time in milliseconds spent by this worker to rebalance.kafka.connect:type=connect-worker-rebalance-metrics
rebalance-max-time-msThe maximum time in milliseconds spent by this worker to rebalance.kafka.connect:type=connect-worker-rebalance-metrics
rebalancingWhether this worker is currently rebalancing.kafka.connect:type=connect-worker-rebalance-metrics
time-since-last-rebalance-msThe time in milliseconds since this worker completed the most recent rebalance.kafka.connect:type=connect-worker-rebalance-metrics
connector-classThe name of the connector class.kafka.connect:type=connector-metrics,connector="{connector}"
connector-typeThe type of the connector. One of 'source' or 'sink'.kafka.connect:type=connector-metrics,connector="{connector}"
connector-versionThe version of the connector class, as reported by the connector.kafka.connect:type=connector-metrics,connector="{connector}"
statusThe status of the connector. One of 'unassigned', 'running', 'paused', 'stopped', 'failed', or 'restarting'.kafka.connect:type=connector-metrics,connector="{connector}"
predicate-classThe class name of the predicate classkafka.connect:type=connector-predicate-metrics,connector="{connector}",task="{task}",predicate="{predicate}"
predicate-versionThe version of the predicate classkafka.connect:type=connector-predicate-metrics,connector="{connector}",task="{task}",predicate="{predicate}"
batch-size-avgThe average number of records in the batches the task has processed so far.kafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
batch-size-maxThe number of records in the largest batch the task has processed so far.kafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
connector-classThe name of the connector class.kafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
connector-typeThe type of the connector. One of 'source' or 'sink'.kafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
connector-versionThe version of the connector class, as reported by the connector.kafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
header-converter-classThe fully qualified class name from header.converterkafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
header-converter-versionThe version instantiated for header.converter. May be undefinedkafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
key-converter-classThe fully qualified class name from key.converterkafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
key-converter-versionThe version instantiated for key.converter. May be undefinedkafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
offset-commit-avg-time-msThe average time in milliseconds taken by this task to commit offsets.kafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
offset-commit-failure-percentageThe average percentage of this task's offset commit attempts that failed.kafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
offset-commit-max-time-msThe maximum time in milliseconds taken by this task to commit offsets.kafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
offset-commit-success-percentageThe average percentage of this task's offset commit attempts that succeeded.kafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
pause-ratioThe fraction of time this task has spent in the pause state.kafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
running-ratioThe fraction of time this task has spent in the running state.kafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
statusThe status of the connector task. One of 'unassigned', 'running', 'paused', 'failed', or 'restarting'.kafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
task-classThe class name of the task.kafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
task-versionThe version of the task.kafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
value-converter-classThe fully qualified class name from value.converterkafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
value-converter-versionThe version instantiated for value.converter. May be undefinedkafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
transform-classThe class name of the transformation classkafka.connect:type=connector-transform-metrics,connector="{connector}",task="{task}",transform="{transform}"
transform-versionThe version of the transformation classkafka.connect:type=connector-transform-metrics,connector="{connector}",task="{task}",transform="{transform}"
offset-commit-completion-rateThe average per-second number of offset commit completions that were completed successfully.kafka.connect:type=sink-task-metrics,connector="{connector}",task="{task}"
offset-commit-completion-totalThe total number of offset commit completions that were completed successfully.kafka.connect:type=sink-task-metrics,connector="{connector}",task="{task}"
offset-commit-seq-noThe current sequence number for offset commits.kafka.connect:type=sink-task-metrics,connector="{connector}",task="{task}"
offset-commit-skip-rateThe average per-second number of offset commit completions that were received too late and skipped/ignored.kafka.connect:type=sink-task-metrics,connector="{connector}",task="{task}"
offset-commit-skip-totalThe total number of offset commit completions that were received too late and skipped/ignored.kafka.connect:type=sink-task-metrics,connector="{connector}",task="{task}"
partition-countThe number of topic partitions assigned to this task belonging to the named sink connector in this worker.kafka.connect:type=sink-task-metrics,connector="{connector}",task="{task}"
put-batch-avg-time-msThe average time taken by this task to put a batch of sinks records.kafka.connect:type=sink-task-metrics,connector="{connector}",task="{task}"
put-batch-max-time-msThe maximum time taken by this task to put a batch of sinks records.kafka.connect:type=sink-task-metrics,connector="{connector}",task="{task}"
sink-record-active-countThe number of records that have been read from Kafka but not yet completely committed/flushed/acknowledged by the sink task.kafka.connect:type=sink-task-metrics,connector="{connector}",task="{task}"
sink-record-active-count-avgThe average number of records that have been read from Kafka but not yet completely committed/flushed/acknowledged by the sink task.kafka.connect:type=sink-task-metrics,connector="{connector}",task="{task}"
sink-record-active-count-maxThe maximum number of records that have been read from Kafka but not yet completely committed/flushed/acknowledged by the sink task.kafka.connect:type=sink-task-metrics,connector="{connector}",task="{task}"
sink-record-lag-maxThe maximum lag in terms of number of records that the sink task is behind the consumer's position for any topic partitions.kafka.connect:type=sink-task-metrics,connector="{connector}",task="{task}"
sink-record-read-rateThe average per-second number of records read from Kafka for this task belonging to the named sink connector in this worker. This is before transformations are applied.kafka.connect:type=sink-task-metrics,connector="{connector}",task="{task}"
sink-record-read-totalThe total number of records read from Kafka by this task belonging to the named sink connector in this worker, since the task was last restarted.kafka.connect:type=sink-task-metrics,connector="{connector}",task="{task}"
sink-record-send-rateThe average per-second number of records output from the transformations and sent/put to this task belonging to the named sink connector in this worker. This is after transformations are applied and excludes any records filtered out by the transformations.kafka.connect:type=sink-task-metrics,connector="{connector}",task="{task}"
sink-record-send-totalThe total number of records output from the transformations and sent/put to this task belonging to the named sink connector in this worker, since the task was last restarted.kafka.connect:type=sink-task-metrics,connector="{connector}",task="{task}"
poll-batch-avg-time-msThe average time in milliseconds taken by this task to poll for a batch of source records.kafka.connect:type=source-task-metrics,connector="{connector}",task="{task}"
poll-batch-max-time-msThe maximum time in milliseconds taken by this task to poll for a batch of source records.kafka.connect:type=source-task-metrics,connector="{connector}",task="{task}"
source-record-active-countThe number of records that have been produced by this task but not yet completely written to Kafka.kafka.connect:type=source-task-metrics,connector="{connector}",task="{task}"
source-record-active-count-avgThe average number of records that have been produced by this task but not yet completely written to Kafka.kafka.connect:type=source-task-metrics,connector="{connector}",task="{task}"
source-record-active-count-maxThe maximum number of records that have been produced by this task but not yet completely written to Kafka.kafka.connect:type=source-task-metrics,connector="{connector}",task="{task}"
source-record-poll-rateThe average per-second number of records produced/polled (before transformation) by this task belonging to the named source connector in this worker.kafka.connect:type=source-task-metrics,connector="{connector}",task="{task}"
source-record-poll-totalThe total number of records produced/polled (before transformation) by this task belonging to the named source connector in this worker.kafka.connect:type=source-task-metrics,connector="{connector}",task="{task}"
source-record-write-rateThe average per-second number of records written to Kafka for this task belonging to the named source connector in this worker, since the task was last restarted. This is after transformations are applied, and excludes any records filtered out by the transformations.kafka.connect:type=source-task-metrics,connector="{connector}",task="{task}"
source-record-write-totalThe number of records output written to Kafka for this task belonging to the named source connector in this worker, since the task was last restarted. This is after transformations are applied, and excludes any records filtered out by the transformations.kafka.connect:type=source-task-metrics,connector="{connector}",task="{task}"
transaction-size-avgThe average number of records in the transactions the task has committed so far.kafka.connect:type=source-task-metrics,connector="{connector}",task="{task}"
transaction-size-maxThe number of records in the largest transaction the task has committed so far.kafka.connect:type=source-task-metrics,connector="{connector}",task="{task}"
transaction-size-minThe number of records in the smallest transaction the task has committed so far. kafka.connect:type=source-task-metrics,connector="{connector}",task="{task}"
deadletterqueue-produce-failuresThe number of failed writes to the dead letter queue.kafka.connect:type=task-error-metrics,connector="{connector}",task="{task}"
deadletterqueue-produce-requestsThe number of attempted writes to the dead letter queue.kafka.connect:type=task-error-metrics,connector="{connector}",task="{task}"
last-error-timestampThe epoch timestamp when this task last encountered an error.kafka.connect:type=task-error-metrics,connector="{connector}",task="{task}"
total-errors-loggedThe number of errors that were logged.kafka.connect:type=task-error-metrics,connector="{connector}",task="{task}"
total-record-errorsThe number of record processing errors in this task. kafka.connect:type=task-error-metrics,connector="{connector}",task="{task}"
total-record-failuresThe number of record processing failures in this task.kafka.connect:type=task-error-metrics,connector="{connector}",task="{task}"
total-records-skippedThe number of records skipped due to errors.kafka.connect:type=task-error-metrics,connector="{connector}",task="{task}"
total-retriesThe number of operations retried.kafka.connect:type=task-error-metrics,connector="{connector}",task="{task}"
diff --git a/static/41/generated/connect_predicates.html b/static/41/generated/connect_predicates.html new file mode 100644 index 000000000..7a26dc6bb --- /dev/null +++ b/static/41/generated/connect_predicates.html @@ -0,0 +1,45 @@ +
+
org.apache.kafka.connect.transforms.predicates.HasHeaderKey
+A predicate which is true for records with at least one header with the configured name. +

+

    +
  • +
    name
    +

    The header name.

    + + + + + +
    Type:string
    Default:
    Valid Values:non-empty string
    Importance:medium
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.predicates.RecordIsTombstone
+A predicate which is true for records which are tombstones (i.e. have null value). +

+

    +
+ +
+
+
org.apache.kafka.connect.transforms.predicates.TopicNameMatches
+A predicate which is true for records with a topic name that matches the configured regular expression. +

+

    +
  • +
    pattern
    +

    A Java regular expression for matching against the name of a record's topic.

    + + + + + +
    Type:string
    Default:
    Valid Values:non-empty string, valid regex
    Importance:medium
    +
  • +
+ +
+ diff --git a/static/41/generated/connect_rest.yaml b/static/41/generated/connect_rest.yaml new file mode 100644 index 000000000..79f59295c --- /dev/null +++ b/static/41/generated/connect_rest.yaml @@ -0,0 +1,715 @@ +openapi: 3.0.0 +info: + contact: + email: dev@kafka.apache.org + description: "This is the documentation of the [Apache Kafka](https://kafka.apache.org)\ + \ Connect REST API." + license: + name: Apache 2.0 + url: https://www.apache.org/licenses/LICENSE-2.0.html + title: Kafka Connect REST API + version: 4.1.0 +paths: + /: + get: + operationId: serverInfo + responses: + default: + content: + application/json: + schema: + $ref: "#/components/schemas/ServerInfo" + description: default response + summary: Get details about this Connect worker and the ID of the Kafka cluster + it is connected to + /admin/loggers: + get: + operationId: listLoggers + responses: + default: + content: + application/json: {} + description: default response + summary: List the current loggers that have their levels explicitly set and + their log levels + /admin/loggers/{logger}: + get: + operationId: getLogger + parameters: + - in: path + name: logger + required: true + schema: + type: string + responses: + default: + content: + application/json: {} + description: default response + summary: Get the log level for the specified logger + put: + operationId: setLevel + parameters: + - in: path + name: logger + required: true + schema: + type: string + - description: "The scope for the logging modification (single-worker, cluster-wide,\ + \ etc.)" + in: query + name: scope + schema: + type: string + default: worker + requestBody: + content: + application/json: + schema: + type: object + additionalProperties: + type: string + responses: + default: + content: + application/json: {} + description: default response + summary: Set the log level for the specified logger + /connector-plugins: + get: + operationId: listConnectorPlugins + parameters: + - description: Whether to list only connectors instead of all plugins + in: query + name: connectorsOnly + schema: + type: boolean + default: true + responses: + default: + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/PluginInfo" + description: default response + summary: List all connector plugins installed + /connector-plugins/{pluginName}/config: + get: + operationId: getConnectorConfigDef + parameters: + - in: path + name: pluginName + required: true + schema: + type: string + - in: query + name: version + schema: + type: string + default: latest + responses: + default: + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/ConfigKeyInfo" + description: default response + summary: Get the configuration definition for the specified pluginName + /connector-plugins/{pluginName}/config/validate: + put: + operationId: validateConfigs + parameters: + - in: path + name: pluginName + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + type: object + additionalProperties: + type: string + responses: + default: + content: + application/json: + schema: + $ref: "#/components/schemas/ConfigInfos" + description: default response + summary: Validate the provided configuration against the configuration definition + for the specified pluginName + /connectors: + get: + operationId: listConnectors + responses: + default: + content: + application/json: {} + description: default response + summary: List all active connectors + post: + operationId: createConnector + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/CreateConnectorRequest" + responses: + default: + content: + application/json: {} + description: default response + summary: Create a new connector + /connectors/{connector}: + delete: + operationId: destroyConnector + parameters: + - in: path + name: connector + required: true + schema: + type: string + responses: + default: + content: + application/json: {} + description: default response + summary: Delete the specified connector + get: + operationId: getConnector + parameters: + - in: path + name: connector + required: true + schema: + type: string + responses: + default: + content: + application/json: + schema: + $ref: "#/components/schemas/ConnectorInfo" + description: default response + summary: Get the details for the specified connector + /connectors/{connector}/config: + get: + operationId: getConnectorConfig + parameters: + - in: path + name: connector + required: true + schema: + type: string + responses: + default: + content: + application/json: + schema: + type: object + additionalProperties: + type: string + description: default response + summary: Get the configuration for the specified connector + patch: + operationId: patchConnectorConfig + parameters: + - in: path + name: connector + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + type: object + additionalProperties: + type: string + responses: + default: + content: + application/json: {} + description: default response + put: + operationId: putConnectorConfig + parameters: + - in: path + name: connector + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + type: object + additionalProperties: + type: string + responses: + default: + content: + application/json: {} + description: default response + summary: Create or reconfigure the specified connector + /connectors/{connector}/offsets: + delete: + operationId: resetConnectorOffsets + parameters: + - in: path + name: connector + required: true + schema: + type: string + responses: + default: + content: + application/json: {} + description: default response + summary: Reset the offsets for the specified connector + get: + operationId: getOffsets + parameters: + - in: path + name: connector + required: true + schema: + type: string + responses: + default: + content: + application/json: + schema: + $ref: "#/components/schemas/ConnectorOffsets" + description: default response + summary: Get the current offsets for the specified connector + patch: + operationId: alterConnectorOffsets + parameters: + - in: path + name: connector + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/ConnectorOffsets" + responses: + default: + content: + application/json: {} + description: default response + summary: Alter the offsets for the specified connector + /connectors/{connector}/pause: + put: + description: This operation is idempotent and has no effects if the connector + is already paused + operationId: pauseConnector + parameters: + - in: path + name: connector + required: true + schema: + type: string + responses: + default: + content: + application/json: {} + description: default response + summary: Pause the specified connector + /connectors/{connector}/restart: + post: + operationId: restartConnector + parameters: + - in: path + name: connector + required: true + schema: + type: string + - description: Whether to also restart tasks + in: query + name: includeTasks + schema: + type: boolean + default: false + - description: Whether to only restart failed tasks/connectors + in: query + name: onlyFailed + schema: + type: boolean + default: false + responses: + default: + content: + application/json: {} + description: default response + summary: Restart the specified connector + /connectors/{connector}/resume: + put: + description: This operation is idempotent and has no effects if the connector + is already running + operationId: resumeConnector + parameters: + - in: path + name: connector + required: true + schema: + type: string + responses: + default: + content: + application/json: {} + description: default response + summary: Resume the specified connector + /connectors/{connector}/status: + get: + operationId: getConnectorStatus + parameters: + - in: path + name: connector + required: true + schema: + type: string + responses: + default: + content: + application/json: + schema: + $ref: "#/components/schemas/ConnectorStateInfo" + description: default response + summary: Get the status for the specified connector + /connectors/{connector}/stop: + put: + description: This operation is idempotent and has no effects if the connector + is already stopped + operationId: stopConnector + parameters: + - in: path + name: connector + required: true + schema: + type: string + responses: + default: + content: + application/json: {} + description: default response + summary: Stop the specified connector + /connectors/{connector}/tasks: + get: + operationId: getTaskConfigs + parameters: + - in: path + name: connector + required: true + schema: + type: string + responses: + default: + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/TaskInfo" + description: default response + summary: List all tasks and their configurations for the specified connector + /connectors/{connector}/tasks/{task}/restart: + post: + operationId: restartTask + parameters: + - in: path + name: connector + required: true + schema: + type: string + - in: path + name: task + required: true + schema: + type: integer + format: int32 + responses: + default: + content: + application/json: {} + description: default response + summary: Restart the specified task for the specified connector + /connectors/{connector}/tasks/{task}/status: + get: + operationId: getTaskStatus + parameters: + - in: path + name: connector + required: true + schema: + type: string + - in: path + name: task + required: true + schema: + type: integer + format: int32 + responses: + default: + content: + application/json: + schema: + $ref: "#/components/schemas/TaskState" + description: default response + summary: Get the state of the specified task for the specified connector + /connectors/{connector}/topics: + get: + operationId: getConnectorActiveTopics + parameters: + - in: path + name: connector + required: true + schema: + type: string + responses: + default: + content: + application/json: {} + description: default response + summary: Get the list of topics actively used by the specified connector + /connectors/{connector}/topics/reset: + put: + operationId: resetConnectorActiveTopics + parameters: + - in: path + name: connector + required: true + schema: + type: string + responses: + default: + content: + application/json: {} + description: default response + summary: Reset the list of topics actively used by the specified connector + /health: + get: + operationId: healthCheck + responses: + default: + content: + application/json: {} + description: default response + summary: Health check endpoint to verify worker readiness and liveness +components: + schemas: + ConfigInfo: + type: object + properties: + definition: + $ref: "#/components/schemas/ConfigKeyInfo" + value: + $ref: "#/components/schemas/ConfigValueInfo" + ConfigInfos: + type: object + properties: + configs: + type: array + items: + $ref: "#/components/schemas/ConfigInfo" + error_count: + type: integer + format: int32 + groups: + type: array + items: + type: string + name: + type: string + ConfigKeyInfo: + type: object + properties: + default_value: + type: string + dependents: + type: array + items: + type: string + display_name: + type: string + documentation: + type: string + group: + type: string + importance: + type: string + name: + type: string + order: + type: integer + format: int32 + order_in_group: + type: integer + format: int32 + writeOnly: true + required: + type: boolean + type: + type: string + width: + type: string + ConfigValueInfo: + type: object + properties: + errors: + type: array + items: + type: string + name: + type: string + recommended_values: + type: array + items: + type: string + value: + type: string + visible: + type: boolean + ConnectorInfo: + type: object + properties: + config: + type: object + additionalProperties: + type: string + name: + type: string + tasks: + type: array + items: + $ref: "#/components/schemas/ConnectorTaskId" + type: + type: string + enum: + - source + - sink + - unknown + ConnectorOffset: + type: object + properties: + offset: + type: object + additionalProperties: + type: object + partition: + type: object + additionalProperties: + type: object + ConnectorOffsets: + type: object + properties: + offsets: + type: array + items: + $ref: "#/components/schemas/ConnectorOffset" + ConnectorState: + type: object + properties: + msg: + type: string + writeOnly: true + state: + type: string + trace: + type: string + version: + type: string + worker_id: + type: string + ConnectorStateInfo: + type: object + properties: + connector: + $ref: "#/components/schemas/ConnectorState" + name: + type: string + tasks: + type: array + items: + $ref: "#/components/schemas/TaskState" + type: + type: string + enum: + - source + - sink + - unknown + ConnectorTaskId: + type: object + properties: + connector: + type: string + task: + type: integer + format: int32 + CreateConnectorRequest: + type: object + properties: + config: + type: object + additionalProperties: + type: string + initial_state: + type: string + enum: + - RUNNING + - PAUSED + - STOPPED + name: + type: string + PluginInfo: + type: object + properties: + class: + type: string + type: + type: string + version: + type: string + ServerInfo: + type: object + properties: + commit: + type: string + kafka_cluster_id: + type: string + version: + type: string + TaskInfo: + type: object + properties: + config: + type: object + additionalProperties: + type: string + id: + $ref: "#/components/schemas/ConnectorTaskId" + TaskState: + type: object + properties: + id: + type: integer + format: int32 + msg: + type: string + writeOnly: true + state: + type: string + trace: + type: string + version: + type: string + worker_id: + type: string diff --git a/static/41/generated/connect_transforms.html b/static/41/generated/connect_transforms.html new file mode 100644 index 000000000..27f20bbcc --- /dev/null +++ b/static/41/generated/connect_transforms.html @@ -0,0 +1,549 @@ +
+
org.apache.kafka.connect.transforms.Cast
+Cast fields or the entire key or value to a specific type, e.g. to force an integer field to a smaller width. Cast from integers, floats, boolean and string to any other type, and cast binary to string (base64 encoded).

Use the concrete transformation type designed for the record key (org.apache.kafka.connect.transforms.Cast$Key) or value (org.apache.kafka.connect.transforms.Cast$Value). +

+

    +
  • +
    spec
    +

    List of fields and the type to cast them to of the form field1:type,field2:type to cast fields of Maps or Structs. A single type to cast the entire value. Valid types are int8, int16, int32, int64, float32, float64, boolean, and string. Note that binary fields can only be cast to string.

    + + + + + +
    Type:list
    Default:
    Valid Values:list of colon-delimited pairs, e.g. foo:bar,abc:xyz
    Importance:high
    +
  • +
  • +
    replace.null.with.default
    +

    Whether to replace fields that have a default value and that are null to the default value. When set to true, the default value is used, otherwise null is used.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:medium
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.DropHeaders
+Removes one or more headers from each record. +

+

    +
  • +
    headers
    +

    The name of the headers to be removed.

    + + + + + +
    Type:list
    Default:
    Valid Values:non-empty list
    Importance:high
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.ExtractField
+Extract the specified field from a Struct when schema present, or a Map in the case of schemaless data. Any null values are passed through unmodified.

Use the concrete transformation type designed for the record key (org.apache.kafka.connect.transforms.ExtractField$Key) or value (org.apache.kafka.connect.transforms.ExtractField$Value). +

+

    +
  • +
    field
    +

    Field name to extract.

    + + + + + +
    Type:string
    Default:
    Valid Values:
    Importance:medium
    +
  • +
  • +
    field.syntax.version
    +

    Defines the version of the syntax to access fields. If set to `V1`, then the field paths are limited to access the elements at the root level of the struct or map. If set to `V2`, the syntax will support accessing nested elements. To access nested elements, dotted notation is used. If dots are already included in the field name, then backtick pairs can be used to wrap field names containing dots. E.g. to access the subfield `baz` from a field named "foo.bar" in a struct/map the following format can be used to access its elements: "`foo.bar`.baz".

    + + + + + +
    Type:string
    Default:V1
    Valid Values:(case insensitive) [V1, V2]
    Importance:high
    +
  • +
  • +
    replace.null.with.default
    +

    Whether to replace fields that have a default value and that are null to the default value. When set to true, the default value is used, otherwise null is used.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:medium
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.Filter
+Drops all records, filtering them from subsequent transformations in the chain. This is intended to be used conditionally to filter out records matching (or not matching) a particular Predicate. +

+

    +
+ +
+
+
org.apache.kafka.connect.transforms.Flatten
+Flatten a nested data structure, generating names for each field by concatenating the field names at each level with a configurable delimiter character. Applies to Struct when schema present, or a Map in the case of schemaless data. Array fields and their contents are not modified. The default delimiter is '.'.

Use the concrete transformation type designed for the record key (org.apache.kafka.connect.transforms.Flatten$Key) or value (org.apache.kafka.connect.transforms.Flatten$Value). +

+

    +
  • +
    delimiter
    +

    Delimiter to insert between field names from the input record when generating field names for the output record

    + + + + + +
    Type:string
    Default:.
    Valid Values:
    Importance:medium
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.HeaderFrom
+Moves or copies fields in the key/value of a record into that record's headers. Corresponding elements of fields and headers together identify a field and the header it should be moved or copied to. Use the concrete transformation type designed for the record key (org.apache.kafka.connect.transforms.HeaderFrom$Key) or value (org.apache.kafka.connect.transforms.HeaderFrom$Value). +

+

    +
  • +
    fields
    +

    Field names in the record whose values are to be copied or moved to headers.

    + + + + + +
    Type:list
    Default:
    Valid Values:non-empty list
    Importance:high
    +
  • +
  • +
    headers
    +

    Header names, in the same order as the field names listed in the fields configuration property.

    + + + + + +
    Type:list
    Default:
    Valid Values:non-empty list
    Importance:high
    +
  • +
  • +
    operation
    +

    Either move if the fields are to be moved to the headers (removed from the key/value), or copy if the fields are to be copied to the headers (retained in the key/value).

    + + + + + +
    Type:string
    Default:
    Valid Values:[move, copy]
    Importance:high
    +
  • +
  • +
    replace.null.with.default
    +

    Whether to replace fields that have a default value and that are null to the default value. When set to true, the default value is used, otherwise null is used.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:medium
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.HoistField
+Wrap data using the specified field name in a Struct when schema present, or a Map in the case of schemaless data.

Use the concrete transformation type designed for the record key (org.apache.kafka.connect.transforms.HoistField$Key) or value (org.apache.kafka.connect.transforms.HoistField$Value). +

+

    +
  • +
    field
    +

    Field name for the single field that will be created in the resulting Struct or Map.

    + + + + + +
    Type:string
    Default:
    Valid Values:
    Importance:medium
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.InsertField
+Insert field(s) using attributes from the record metadata or a configured static value.

Use the concrete transformation type designed for the record key (org.apache.kafka.connect.transforms.InsertField$Key) or value (org.apache.kafka.connect.transforms.InsertField$Value). +

+

    +
  • +
    offset.field
    +

    Field name for Kafka offset - only applicable to sink connectors.
    Suffix with ! to make this a required field, or ? to keep it optional (the default).

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +
    partition.field
    +

    Field name for Kafka partition. Suffix with ! to make this a required field, or ? to keep it optional (the default).

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +
    replace.null.with.default
    +

    Whether to replace fields that have a default value and that are null to the default value. When set to true, the default value is used, otherwise null is used.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:medium
    +
  • +
  • +
    static.field
    +

    Field name for static data field. Suffix with ! to make this a required field, or ? to keep it optional (the default).

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +
    static.value
    +

    Static field value, if field name configured.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +
    timestamp.field
    +

    Field name for record timestamp. Suffix with ! to make this a required field, or ? to keep it optional (the default).

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +
    topic.field
    +

    Field name for Kafka topic. Suffix with ! to make this a required field, or ? to keep it optional (the default).

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.InsertHeader
+Add a header to each record. +

+

    +
  • +
    header
    +

    The name of the header.

    + + + + + +
    Type:string
    Default:
    Valid Values:non-null string
    Importance:high
    +
  • +
  • +
    value.literal
    +

    The literal value that is to be set as the header value on all records.

    + + + + + +
    Type:string
    Default:
    Valid Values:non-null string
    Importance:high
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.MaskField
+Mask specified fields with a valid null value for the field type (i.e. 0, false, empty string, and so on).

For numeric and string fields, an optional replacement value can be specified that is converted to the correct type.

Use the concrete transformation type designed for the record key (org.apache.kafka.connect.transforms.MaskField$Key) or value (org.apache.kafka.connect.transforms.MaskField$Value). +

+

    +
  • +
    fields
    +

    Names of fields to mask.

    + + + + + +
    Type:list
    Default:
    Valid Values:non-empty list
    Importance:high
    +
  • +
  • +
    replace.null.with.default
    +

    Whether to replace fields that have a default value and that are null to the default value. When set to true, the default value is used, otherwise null is used.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:medium
    +
  • +
  • +
    replacement
    +

    Custom value replacement, that will be applied to all 'fields' values (numeric or non-empty string values only).

    + + + + + +
    Type:string
    Default:null
    Valid Values:non-empty string
    Importance:low
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.RegexRouter
+Update the record topic using the configured regular expression and replacement string.

Under the hood, the regex is compiled to a java.util.regex.Pattern. If the pattern matches the input topic, java.util.regex.Matcher#replaceFirst() is used with the replacement string to obtain the new topic. +

+

    +
  • +
    regex
    +

    Regular expression to use for matching.

    + + + + + +
    Type:string
    Default:
    Valid Values:valid regex
    Importance:high
    +
  • +
  • +
    replacement
    +

    Replacement string.

    + + + + + +
    Type:string
    Default:
    Valid Values:
    Importance:high
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.ReplaceField
+Filter or rename fields.

Use the concrete transformation type designed for the record key (org.apache.kafka.connect.transforms.ReplaceField$Key) or value (org.apache.kafka.connect.transforms.ReplaceField$Value). +

+

    +
  • +
    exclude
    +

    Fields to exclude. This takes precedence over the fields to include.

    + + + + + +
    Type:list
    Default:""
    Valid Values:
    Importance:medium
    +
  • +
  • +
    include
    +

    Fields to include. If specified, only these fields will be used.

    + + + + + +
    Type:list
    Default:""
    Valid Values:
    Importance:medium
    +
  • +
  • +
    renames
    +

    Field rename mappings.

    + + + + + +
    Type:list
    Default:""
    Valid Values:list of colon-delimited pairs, e.g. foo:bar,abc:xyz
    Importance:medium
    +
  • +
  • +
    replace.null.with.default
    +

    Whether to replace fields that have a default value and that are null to the default value. When set to true, the default value is used, otherwise null is used.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:medium
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.SetSchemaMetadata
+Set the schema name, version or both on the record's key (org.apache.kafka.connect.transforms.SetSchemaMetadata$Key) or value (org.apache.kafka.connect.transforms.SetSchemaMetadata$Value) schema. +

+

    +
  • +
    schema.name
    +

    Schema name to set.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +
    schema.version
    +

    Schema version to set.

    + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +
    replace.null.with.default
    +

    Whether to replace fields that have a default value and that are null to the default value. When set to true, the default value is used, otherwise null is used.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:medium
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.TimestampConverter
+Convert timestamps between different formats such as Unix epoch, strings, and Connect Date/Timestamp types.Applies to individual fields or to the entire value.

Use the concrete transformation type designed for the record key (org.apache.kafka.connect.transforms.TimestampConverter$Key) or value (org.apache.kafka.connect.transforms.TimestampConverter$Value). +

+

    +
  • +
    target.type
    +

    The desired timestamp representation: string, unix, Date, Time, or Timestamp

    + + + + + +
    Type:string
    Default:
    Valid Values:[string, unix, Date, Time, Timestamp]
    Importance:high
    +
  • +
  • +
    field
    +

    The field containing the timestamp, or empty if the entire value is a timestamp

    + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:high
    +
  • +
  • +
    format
    +

    A SimpleDateFormat-compatible format for the timestamp. Used to generate the output when type=string or used to parse the input if the input is a string.

    + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:medium
    +
  • +
  • +
    replace.null.with.default
    +

    Whether to replace fields that have a default value and that are null to the default value. When set to true, the default value is used, otherwise null is used.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:medium
    +
  • +
  • +
    unix.precision
    +

    The desired Unix precision for the timestamp: seconds, milliseconds, microseconds, or nanoseconds. Used to generate the output when type=unix or used to parse the input if the input is a Long.Note: This SMT will cause precision loss during conversions from, and to, values with sub-millisecond components.

    + + + + + +
    Type:string
    Default:milliseconds
    Valid Values:[nanoseconds, microseconds, milliseconds, seconds]
    Importance:low
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.TimestampRouter
+Update the record's topic field as a function of the original topic value and the record timestamp.

This is mainly useful for sink connectors, since the topic field is often used to determine the equivalent entity name in the destination system(e.g. database table or search index name). +

+

    +
  • +
    timestamp.format
    +

    Format string for the timestamp that is compatible with java.text.SimpleDateFormat.

    + + + + + +
    Type:string
    Default:yyyyMMdd
    Valid Values:
    Importance:high
    +
  • +
  • +
    topic.format
    +

    Format string which can contain ${topic} and ${timestamp} as placeholders for the topic and timestamp, respectively.

    + + + + + +
    Type:string
    Default:${topic}-${timestamp}
    Valid Values:
    Importance:high
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.ValueToKey
+Replace the record key with a new key formed from a subset of fields in the record value. +

+

    +
  • +
    fields
    +

    Field names on the record value to extract as the record key.

    + + + + + +
    Type:list
    Default:
    Valid Values:non-empty list
    Importance:high
    +
  • +
  • +
    replace.null.with.default
    +

    Whether to replace fields that have a default value and that are null to the default value. When set to true, the default value is used, otherwise null is used.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:medium
    +
  • +
+ +
+ diff --git a/static/41/generated/consumer_config.html b/static/41/generated/consumer_config.html new file mode 100644 index 000000000..58333b038 --- /dev/null +++ b/static/41/generated/consumer_config.html @@ -0,0 +1,1133 @@ +
    +
  • +

    key.deserializer

    +

    Deserializer class for key that implements the org.apache.kafka.common.serialization.Deserializer interface.

    + + + + + +
    Type:class
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    value.deserializer

    +

    Deserializer class for value that implements the org.apache.kafka.common.serialization.Deserializer interface.

    + + + + + +
    Type:class
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    bootstrap.servers

    +

    A list of host/port pairs used to establish the initial connection to the Kafka cluster. Clients use this list to bootstrap and discover the full set of Kafka brokers. While the order of servers in the list does not matter, we recommend including more than one server to ensure resilience if any servers are down. This list does not need to contain the entire set of brokers, as Kafka clients automatically manage and update connections to the cluster efficiently. This list must be in the form host1:port1,host2:port2,....

    + + + + + +
    Type:list
    Default:""
    Valid Values:non-null string
    Importance:high
    +
  • +
  • +

    fetch.min.bytes

    +

    The minimum amount of data the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request. The default setting of 1 byte means that fetch requests are answered as soon as that many byte(s) of data is available or the fetch request times out waiting for data to arrive. Setting this to a larger value will cause the server to wait for larger amounts of data to accumulate which can improve server throughput a bit at the cost of some additional latency.

    + + + + + +
    Type:int
    Default:1
    Valid Values:[0,...]
    Importance:high
    +
  • +
  • +

    group.id

    +

    A unique string that identifies the consumer group this consumer belongs to. This property is required if the consumer uses either the group management functionality by using subscribe(topic) or the Kafka-based offset management strategy.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    group.protocol

    +

    The group protocol consumer should use. We currently support "classic" or "consumer". If "consumer" is specified, then the consumer group protocol will be used. Otherwise, the classic group protocol will be used.

    + + + + + +
    Type:string
    Default:classic
    Valid Values:(case insensitive) [CONSUMER, CLASSIC]
    Importance:high
    +
  • +
  • +

    heartbeat.interval.ms

    +

    The expected time between heartbeats to the consumer coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the consumer's session stays active and to facilitate rebalancing when new consumers join or leave the group. This config is only supported if group.protocol is set to "classic". In that case, the value must be set lower than session.timeout.ms, but typically should be set no higher than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances.If group.protocol is set to "consumer", this config is not supported, as the heartbeat interval is controlled by the broker with group.consumer.heartbeat.interval.ms.

    + + + + + +
    Type:int
    Default:3000 (3 seconds)
    Valid Values:
    Importance:high
    +
  • +
  • +

    max.partition.fetch.bytes

    +

    The maximum amount of data per-partition the server will return. Records are fetched in batches by the consumer. If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. The maximum record batch size accepted by the broker is defined via message.max.bytes (broker config) or max.message.bytes (topic config). See fetch.max.bytes for limiting the consumer request size. Consider increasing max.partition.fetch.bytes especially in the cases of remote storage reads (KIP-405), because currently only one partition per fetch request is served from the remote store (KAFKA-14915).

    + + + + + +
    Type:int
    Default:1048576 (1 mebibyte)
    Valid Values:[0,...]
    Importance:high
    +
  • +
  • +

    session.timeout.ms

    +

    The timeout used to detect client failures when using Kafka's group management facility. The client sends periodic heartbeats to indicate its liveness to the broker. If no heartbeats are received by the broker before the expiration of this session timeout, then the broker will remove this client from the group and initiate a rebalance. Note that the value must be in the allowable range as configured in the broker configuration by group.min.session.timeout.ms and group.max.session.timeout.ms. Note that this client configuration is not supported when group.protocol is set to "consumer". In that case, session timeout is controlled by the broker config group.consumer.session.timeout.ms.

    + + + + + +
    Type:int
    Default:45000 (45 seconds)
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.key.password

    +

    The password of the private key in the key store file or the PEM key specified in 'ssl.keystore.key'.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.certificate.chain

    +

    Certificate chain in the format specified by 'ssl.keystore.type'. Default SSL engine factory supports only PEM format with a list of X.509 certificates

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.key

    +

    Private key in the format specified by 'ssl.keystore.type'. Default SSL engine factory supports only PEM format with PKCS#8 keys. If the key is encrypted, key password must be specified using 'ssl.key.password'

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.location

    +

    The location of the key store file. This is optional for client and can be used for two-way authentication for client.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.password

    +

    The store password for the key store file. This is optional for client and only needed if 'ssl.keystore.location' is configured. Key store password is not supported for PEM format.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.truststore.certificates

    +

    Trusted certificates in the format specified by 'ssl.truststore.type'. Default SSL engine factory supports only PEM format with X.509 certificates.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.truststore.location

    +

    The location of the trust store file.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.truststore.password

    +

    The password for the trust store file. If a password is not set, trust store file configured will still be used, but integrity checking is disabled. Trust store password is not supported for PEM format.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    allow.auto.create.topics

    +

    Allow automatic topic creation on the broker when subscribing to or assigning a topic. A topic being subscribed to will be automatically created only if the broker allows for it using `auto.create.topics.enable` broker configuration.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:medium
    +
  • +
  • +

    auto.offset.reset

    +

    What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server (e.g. because that data has been deleted):

    • earliest: automatically reset the offset to the earliest offset
    • latest: automatically reset the offset to the latest offset
    • by_duration:<duration>: automatically reset the offset to a configured <duration> from the current timestamp. <duration> must be specified in ISO8601 format (PnDTnHnMn.nS). Negative duration is not allowed.
    • none: throw exception to the consumer if no previous offset is found for the consumer's group
    • anything else: throw exception to the consumer.

    Note that altering partition numbers while setting this config to latest may cause message delivery loss since producers could start to send messages to newly added partitions (i.e. no initial offsets exist yet) before consumers reset their offsets.

    + + + + + +
    Type:string
    Default:latest
    Valid Values:[latest, earliest, none, by_duration:PnDTnHnMn.nS]
    Importance:medium
    +
  • +
  • +

    client.dns.lookup

    +

    Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again (both the JVM and the OS cache DNS name lookups, however). If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips.

    + + + + + +
    Type:string
    Default:use_all_dns_ips
    Valid Values:[use_all_dns_ips, resolve_canonical_bootstrap_servers_only]
    Importance:medium
    +
  • +
  • +

    connections.max.idle.ms

    +

    Close idle connections after the number of milliseconds specified by this config.

    + + + + + +
    Type:long
    Default:540000 (9 minutes)
    Valid Values:
    Importance:medium
    +
  • +
  • +

    default.api.timeout.ms

    +

    Specifies the timeout (in milliseconds) for client APIs. This configuration is used as the default timeout for all client operations that do not specify a timeout parameter.

    + + + + + +
    Type:int
    Default:60000 (1 minute)
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    enable.auto.commit

    +

    If true the consumer's offset will be periodically committed in the background.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:medium
    +
  • +
  • +

    exclude.internal.topics

    +

    Whether internal topics matching a subscribed pattern should be excluded from the subscription. It is always possible to explicitly subscribe to an internal topic.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:medium
    +
  • +
  • +

    fetch.max.bytes

    +

    The maximum amount of data the server should return for a fetch request. Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. The maximum record batch size accepted by the broker is defined via message.max.bytes (broker config) or max.message.bytes (topic config). A fetch request consists of many partitions, and there is another setting that controls how much data is returned for each partition in a fetch request - see max.partition.fetch.bytes. Note that there is a current limitation when performing remote reads from tiered storage (KIP-405) - only one partition out of the fetch request is fetched from the remote store (KAFKA-14915). Note also that the consumer performs multiple fetches in parallel.

    + + + + + +
    Type:int
    Default:52428800 (50 mebibytes)
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    group.instance.id

    +

    A unique identifier of the consumer instance provided by the end user. Only non-empty strings are permitted. If set, the consumer is treated as a static member, which means that only one instance with this ID is allowed in the consumer group at any time. This can be used in combination with a larger session timeout to avoid group rebalances caused by transient unavailability (e.g. process restarts). If not set, the consumer will join the group as a dynamic member, which is the traditional behavior.

    + + + + + +
    Type:string
    Default:null
    Valid Values:non-empty string
    Importance:medium
    +
  • +
  • +

    group.remote.assignor

    +

    The name of the server-side assignor to use. If not specified, the group coordinator will pick the first assignor defined in the broker config group.consumer.assignors.This configuration is applied only if group.protocol is set to "consumer".

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    isolation.level

    +

    Controls how to read messages written transactionally. If set to read_committed, consumer.poll() will only return transactional messages which have been committed. If set to read_uncommitted (the default), consumer.poll() will return all messages, even transactional messages which have been aborted. Non-transactional messages will be returned unconditionally in either mode.

    Messages will always be returned in offset order. Hence, in read_committed mode, consumer.poll() will only return messages up to the last stable offset (LSO), which is the one less than the offset of the first open transaction. In particular any messages appearing after messages belonging to ongoing transactions will be withheld until the relevant transaction has been completed. As a result, read_committed consumers will not be able to read up to the high watermark when there are in flight transactions.

    Further, when in read_committed the seekToEnd method will return the LSO

    + + + + + +
    Type:string
    Default:read_uncommitted
    Valid Values:[read_committed, read_uncommitted]
    Importance:medium
    +
  • +
  • +

    max.poll.interval.ms

    +

    The maximum delay between invocations of poll() when using consumer group management. This places an upper bound on the amount of time that the consumer can be idle before fetching more records. If poll() is not called before expiration of this timeout, then the consumer is considered failed and the group will rebalance in order to reassign the partitions to another member. For consumers using a non-null group.instance.id which reach this timeout, partitions will not be immediately reassigned. Instead, the consumer will stop sending heartbeats and partitions will be reassigned after expiration of the session timeout (defined by the client config session.timeout.ms if using the Classic rebalance protocol, or by the broker config group.consumer.session.timeout.ms if using the Consumer protocol). This mirrors the behavior of a static consumer which has shutdown.

    + + + + + +
    Type:int
    Default:300000 (5 minutes)
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    max.poll.records

    +

    The maximum number of records returned in a single call to poll(). Note, that max.poll.records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll.

    + + + + + +
    Type:int
    Default:500
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    partition.assignment.strategy

    +

    A list of class names or class types, ordered by preference, of supported partition assignment strategies that the client will use to distribute partition ownership amongst consumer instances when group management is used. Available options are:

    • org.apache.kafka.clients.consumer.RangeAssignor: Assigns partitions on a per-topic basis.
    • org.apache.kafka.clients.consumer.RoundRobinAssignor: Assigns partitions to consumers in a round-robin fashion.
    • org.apache.kafka.clients.consumer.StickyAssignor: Guarantees an assignment that is maximally balanced while preserving as many existing partition assignments as possible.
    • org.apache.kafka.clients.consumer.CooperativeStickyAssignor: Follows the same StickyAssignor logic, but allows for cooperative rebalancing.

    The default assignor is [RangeAssignor, CooperativeStickyAssignor], which will use the RangeAssignor by default, but allows upgrading to the CooperativeStickyAssignor with just a single rolling bounce that removes the RangeAssignor from the list.

    Implementing the org.apache.kafka.clients.consumer.ConsumerPartitionAssignor interface allows you to plug in a custom assignment strategy.

    + + + + + +
    Type:list
    Default:class org.apache.kafka.clients.consumer.RangeAssignor,class org.apache.kafka.clients.consumer.CooperativeStickyAssignor
    Valid Values:non-null string
    Importance:medium
    +
  • +
  • +

    receive.buffer.bytes

    +

    The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.

    + + + + + +
    Type:int
    Default:65536 (64 kibibytes)
    Valid Values:[-1,...]
    Importance:medium
    +
  • +
  • +

    request.timeout.ms

    +

    The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.

    + + + + + +
    Type:int
    Default:30000 (30 seconds)
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    sasl.client.callback.handler.class

    +

    The fully qualified name of a SASL client callback handler class that implements the AuthenticateCallbackHandler interface.

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.jaas.config

    +

    JAAS login context parameters for SASL connections in the format used by JAAS configuration files. JAAS configuration file format is described here. The format for the value is: loginModuleClass controlFlag (optionName=optionValue)*;. For brokers, the config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.jaas.config=com.example.ScramLoginModule required;

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.kerberos.service.name

    +

    The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.login.callback.handler.class

    +

    The fully qualified name of a SASL login callback handler class that implements the AuthenticateCallbackHandler interface. For brokers, login callback handler config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.login.class

    +

    The fully qualified name of a class that implements the Login interface. For brokers, login config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.mechanism

    +

    SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. GSSAPI is the default mechanism.

    + + + + + +
    Type:string
    Default:GSSAPI
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.algorithm

    +

    The algorithm the Apache Kafka client should use to sign the assertion sent to the identity provider. It is also used as the value of the OAuth alg (Algorithm) header in the JWT assertion.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:RS256
    Valid Values:(case insensitive) [ES256, RS256]
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.aud

    +

    The JWT aud (Audience) claim which will be included in the client JWT assertion created locally.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.iss

    +

    The value to be used as the iss (Issuer) claim which will be included in the client JWT assertion created locally.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.jti.include

    +

    Flag that determines if the JWT assertion should generate a unique ID for the JWT and include it in the jti (JWT ID) claim.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.sub

    +

    The value to be used as the sub (Subject) claim which will be included in the client JWT assertion created locally.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.file

    +

    File that contains a pre-generated JWT assertion.

    The underlying implementation caches the file contents to avoid the performance hit of loading the file on each access. The caching mechanism will detect whenthe file changes to allow for the file to be reloaded on modifications. This allows for "live" assertion rotation without restarting the Kafka client.

    The file contains the assertion in the serialized, three part JWT format:

    1. The header section is a base 64-encoded JWT header that contains values like alg (Algorithm), typ (Type, always the literal value JWT), etc.
    2. The payload section includes the base 64-encoded set of JWT claims, such as aud (Audience), iss (Issuer), sub (Subject), etc.
    3. The signature section is the concatenated header and payload sections that was signed using a private key

    See RFC 7519 and RFC 7515 for more details on the JWT and JWS formats.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, all other sasl.oauthbearer.assertion.* configurations are ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.private.key.file

    +

    File that contains a private key in the standard PEM format which is used to sign the JWT assertion sent to the identity provider.

    The underlying implementation caches the file contents to avoid the performance hit of loading the file on each access. The caching mechanism will detect when the file changes to allow for the file to be reloaded on modifications. This allows for "live" private key rotation without restarting the Kafka client.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.private.key.passphrase

    +

    The optional passphrase to decrypt the private key file specified by sasl.oauthbearer.assertion.private.key.file.

    Note: If the file referred to by sasl.oauthbearer.assertion.private.key.file is modified on the file system at runtime and it was created with a different passphrase than it was previously, the client will not be able to access the private key file because the passphrase is now out of date. For that reason, when using private key passphrases, either use the same passphrase each time, or—for improved security—restart the Kafka client using the new passphrase configuration.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.template.file

    +

    This optional configuration specifies the file containing the JWT headers and/or payload claims to be used when creating the JWT assertion.

    Not all identity providers require the same set of claims; some may require a given claim while others may prohibit it. In order to provide the most flexibility, this configuration allows the user to provide the static header values and claims that are to be included in the JWT.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.client.credentials.client.id

    +

    The ID (defined in/by the OAuth identity provider) to identify the client requesting the token.

    The client ID was previously stored as part of the sasl.jaas.config configuration with the key clientId. For backward compatibility, the clientId JAAS option can still be used, but it is deprecated and will be removed in a future version.

    Order of precedence:

    • sasl.oauthbearer.client.credentials.client.id from configuration
    • clientId from JAAS

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.client.credentials.client.secret

    +

    The secret (defined by either the user or preassigned, depending on the identity provider) of the client requesting the token.

    The client secret was previously stored as part of the sasl.jaas.config configuration with the key clientSecret. For backward compatibility, the clientSecret JAAS option can still be used, but it is deprecated and will be removed in a future version.

    Order of precedence:

    • sasl.oauthbearer.client.credentials.client.secret from configuration
    • clientSecret from JAAS

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.url

    +

    The OAuth/OIDC provider URL from which the provider's JWKS (JSON Web Key Set) can be retrieved. The URL can be HTTP(S)-based or file-based. If the URL is HTTP(S)-based, the JWKS data will be retrieved from the OAuth/OIDC provider via the configured URL on broker startup. All then-current keys will be cached on the broker for incoming requests. If an authentication request is received for a JWT that includes a "kid" header claim value that isn't yet in the cache, the JWKS endpoint will be queried again on demand. However, the broker polls the URL every sasl.oauthbearer.jwks.endpoint.refresh.ms milliseconds to refresh the cache with any forthcoming keys before any JWT requests that include them are received. If the URL is file-based, the broker will load the JWKS file from a configured location on startup. In the event that the JWT includes a "kid" header value that isn't in the JWKS file, the broker will reject the JWT and authentication will fail.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.jwt.retriever.class

    +

    The fully-qualified class name of a JwtRetriever implementation used to request tokens from the identity provider.

    The default configuration value represents a class that maintains backward compatibility with previous versions of Apache Kafka. The default implementation uses the configuration to determine which concrete implementation to create.

    Other implementations that are provided include:

    • org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever
    • org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
    • org.apache.kafka.common.security.oauthbearer.FileJwtRetriever
    • org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever

    + + + + + +
    Type:class
    Default:org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.jwt.validator.class

    +

    The fully-qualified class name of a JwtValidator implementation used to validate the JWT from the identity provider.

    The default validator (org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator) maintains backward compatibility with previous versions of Apache Kafka. The default validator uses configuration to determine which concrete implementation to create.

    The built-in JwtValidator implementations are:

    • org.apache.kafka.common.security.oauthbearer.BrokerJwtValidator
    • org.apache.kafka.common.security.oauthbearer.ClientJwtValidator
    • org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator

    + + + + + +
    Type:class
    Default:org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.scope

    +

    This is the level of access a client application is granted to a resource or API which is included in the token request. If provided, it should match one or more scopes configured in the identity provider.

    The scope was previously stored as part of the sasl.jaas.config configuration with the key scope. For backward compatibility, the scope JAAS option can still be used, but it is deprecated and will be removed in a future version.

    Order of precedence:

    • sasl.oauthbearer.scope from configuration
    • scope from JAAS

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.token.endpoint.url

    +

    The URL for the OAuth/OIDC identity provider. If the URL is HTTP(S)-based, it is the issuer's token endpoint URL to which requests will be made to login based on the configuration in sasl.oauthbearer.jwt.retriever.class. If the URL is file-based, it specifies a file containing an access token (in JWT serialized form) issued by the OAuth/OIDC identity provider to use for authorization.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    security.protocol

    +

    Protocol used to communicate with brokers.

    + + + + + +
    Type:string
    Default:PLAINTEXT
    Valid Values:(case insensitive) [SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT]
    Importance:medium
    +
  • +
  • +

    send.buffer.bytes

    +

    The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.

    + + + + + +
    Type:int
    Default:131072 (128 kibibytes)
    Valid Values:[-1,...]
    Importance:medium
    +
  • +
  • +

    share.acknowledgement.mode

    +

    Controls the acknowledgement mode for a share consumer. If set to implicit, the acknowledgement mode of the consumer is implicit and it must not use org.apache.kafka.clients.consumer.ShareConsumer.acknowledge() to acknowledge delivery of records. Instead, delivery is acknowledged implicitly on the next call to poll or commit. If set to explicit, the acknowledgement mode of the consumer is explicit and it must use org.apache.kafka.clients.consumer.ShareConsumer.acknowledge() to acknowledge delivery of records.

    + + + + + +
    Type:string
    Default:implicit
    Valid Values:[implicit, explicit]
    Importance:medium
    +
  • +
  • +

    socket.connection.setup.timeout.max.ms

    +

    The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. To avoid connection storms, a randomization factor of 0.2 will be applied to the timeout resulting in a random range between 20% below and 20% above the computed value.

    + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:
    Importance:medium
    +
  • +
  • +

    socket.connection.setup.timeout.ms

    +

    The amount of time the client will wait for the socket connection to be established. If the connection is not built before the timeout elapses, clients will close the socket channel. This value is the initial backoff value and will increase exponentially for each consecutive connection failure, up to the socket.connection.setup.timeout.max.ms value.

    + + + + + +
    Type:long
    Default:10000 (10 seconds)
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.enabled.protocols

    +

    The list of protocols enabled for SSL connections. The default is 'TLSv1.2,TLSv1.3'. This means that clients and servers will prefer TLSv1.3 if both support it and fallback to TLSv1.2 otherwise (assuming both support at least TLSv1.2). This default should be fine for most use cases. Also see the config documentation for `ssl.protocol` to understand how it can impact the TLS version negotiation behavior.

    + + + + + +
    Type:list
    Default:TLSv1.2,TLSv1.3
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.keystore.type

    +

    The file format of the key store file. This is optional for client. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM].

    + + + + + +
    Type:string
    Default:JKS
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.protocol

    +

    The SSL protocol used to generate the SSLContext. The default is 'TLSv1.3', which should be fine for most use cases. A typical alternative to the default is 'TLSv1.2'. Allowed values for this config are dependent on the JVM. Clients using the defaults for this config and 'ssl.enabled.protocols' will downgrade to 'TLSv1.2' if the server does not support 'TLSv1.3'. If this config is set to 'TLSv1.2', however, clients will not use 'TLSv1.3' even if it is one of the values in `ssl.enabled.protocols` and the server only supports 'TLSv1.3'.

    + + + + + +
    Type:string
    Default:TLSv1.3
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.provider

    +

    The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.truststore.type

    +

    The file format of the trust store file. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM].

    + + + + + +
    Type:string
    Default:JKS
    Valid Values:
    Importance:medium
    +
  • +
  • +

    auto.commit.interval.ms

    +

    The frequency in milliseconds that the consumer offsets are auto-committed to Kafka if enable.auto.commit is set to true.

    + + + + + +
    Type:int
    Default:5000 (5 seconds)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    check.crcs

    +

    Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk corruption to the messages occurred. This check adds some overhead, so it may be disabled in cases seeking extreme performance.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    client.id

    +

    An id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.

    + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:low
    +
  • +
  • +

    client.rack

    +

    A rack identifier for this client. This can be any string value which indicates where this client is physically located. It corresponds with the broker config 'broker.rack'

    + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:low
    +
  • +
  • +

    enable.metrics.push

    +

    Whether to enable pushing of client metrics to the cluster, if the cluster has a client metrics subscription which matches this client.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    fetch.max.wait.ms

    +

    The maximum amount of time the server will block before answering the fetch request there isn't sufficient data to immediately satisfy the requirement given by fetch.min.bytes. This config is used only for local log fetch. To tune the remote fetch maximum wait time, please refer to 'remote.fetch.max.wait.ms' broker config

    + + + + + +
    Type:int
    Default:500
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    interceptor.classes

    +

    A list of classes to use as interceptors. Implementing the org.apache.kafka.clients.consumer.ConsumerInterceptor interface allows you to intercept (and possibly mutate) records received by the consumer. By default, there are no interceptors.

    + + + + + +
    Type:list
    Default:""
    Valid Values:non-null string
    Importance:low
    +
  • +
  • +

    metadata.max.age.ms

    +

    The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.

    + + + + + +
    Type:long
    Default:300000 (5 minutes)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    metadata.recovery.rebootstrap.trigger.ms

    +

    If a client configured to rebootstrap using metadata.recovery.strategy=rebootstrap is unable to obtain metadata from any of the brokers in the last known metadata for this interval, client repeats the bootstrap process using bootstrap.servers configuration.

    + + + + + +
    Type:long
    Default:300000 (5 minutes)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    metadata.recovery.strategy

    +

    Controls how the client recovers when none of the brokers known to it is available. If set to none, the client fails. If set to rebootstrap, the client repeats the bootstrap process using bootstrap.servers. Rebootstrapping is useful when a client communicates with brokers so infrequently that the set of brokers may change entirely before the client refreshes metadata. Metadata recovery is triggered when all last-known brokers appear unavailable simultaneously. Brokers appear unavailable when disconnected and no current retry attempt is in-progress. Consider increasing reconnect.backoff.ms and reconnect.backoff.max.ms and decreasing socket.connection.setup.timeout.ms and socket.connection.setup.timeout.max.ms for the client. Rebootstrap is also triggered if connection cannot be established to any of the brokers for metadata.recovery.rebootstrap.trigger.ms milliseconds or if server requests rebootstrap.

    + + + + + +
    Type:string
    Default:rebootstrap
    Valid Values:(case insensitive) [REBOOTSTRAP, NONE]
    Importance:low
    +
  • +
  • +

    metric.reporters

    +

    A list of classes to use as metrics reporters. Implementing the org.apache.kafka.common.metrics.MetricsReporter interface allows plugging in classes that will be notified of new metric creation. When custom reporters are set and org.apache.kafka.common.metrics.JmxReporter is needed, it has to be explicitly added to the list.

    + + + + + +
    Type:list
    Default:org.apache.kafka.common.metrics.JmxReporter
    Valid Values:non-null string
    Importance:low
    +
  • +
  • +

    metrics.num.samples

    +

    The number of samples maintained to compute metrics.

    + + + + + +
    Type:int
    Default:2
    Valid Values:[1,...]
    Importance:low
    +
  • +
  • +

    metrics.recording.level

    +

    The highest recording level for metrics. It has three levels for recording metrics - info, debug, and trace.

    INFO level records only essential metrics necessary for monitoring system performance and health. It collects vital data without gathering too much detail, making it suitable for production environments where minimal overhead is desired.

    DEBUG level records most metrics, providing more detailed information about the system's operation. It's useful for development and testing environments where you need deeper insights to debug and fine-tune the application.

    TRACE level records all possible metrics, capturing every detail about the system's performance and operation. It's best for controlled environments where in-depth analysis is required, though it can introduce significant overhead.

    + + + + + +
    Type:string
    Default:INFO
    Valid Values:[INFO, DEBUG, TRACE]
    Importance:low
    +
  • +
  • +

    metrics.sample.window.ms

    +

    The window of time a metrics sample is computed over.

    + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    reconnect.backoff.max.ms

    +

    The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.

    + + + + + +
    Type:long
    Default:1000 (1 second)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    reconnect.backoff.ms

    +

    The base amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all connection attempts by the client to a broker. This value is the initial backoff value and will increase exponentially for each consecutive connection failure, up to the reconnect.backoff.max.ms value.

    + + + + + +
    Type:long
    Default:50
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    retry.backoff.max.ms

    +

    The maximum amount of time in milliseconds to wait when retrying a request to the broker that has repeatedly failed. If provided, the backoff per client will increase exponentially for each failed request, up to this maximum. To prevent all clients from being synchronized upon retry, a randomized jitter with a factor of 0.2 will be applied to the backoff, resulting in the backoff falling within a range between 20% below and 20% above the computed value. If retry.backoff.ms is set to be higher than retry.backoff.max.ms, then retry.backoff.max.ms will be used as a constant backoff from the beginning without any exponential increase

    + + + + + +
    Type:long
    Default:1000 (1 second)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    retry.backoff.ms

    +

    The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios. This value is the initial backoff value and will increase exponentially for each failed request, up to the retry.backoff.max.ms value.

    + + + + + +
    Type:long
    Default:100
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    sasl.kerberos.kinit.cmd

    +

    Kerberos kinit command path.

    + + + + + +
    Type:string
    Default:/usr/bin/kinit
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.min.time.before.relogin

    +

    Login thread sleep time between refresh attempts.

    + + + + + +
    Type:long
    Default:60000
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.ticket.renew.jitter

    +

    Percentage of random jitter added to the renewal time.

    + + + + + +
    Type:double
    Default:0.05
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.ticket.renew.window.factor

    +

    Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.

    + + + + + +
    Type:double
    Default:0.8
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.connect.timeout.ms

    +

    The (optional) value in milliseconds for the external authentication provider connection timeout. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.read.timeout.ms

    +

    The (optional) value in milliseconds for the external authentication provider read timeout. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.buffer.seconds

    +

    The amount of buffer time before credential expiration to maintain when refreshing a credential, in seconds. If a refresh would otherwise occur closer to expiration than the number of buffer seconds then the refresh will be moved up to maintain as much of the buffer time as possible. Legal values are between 0 and 3600 (1 hour); a default value of 300 (5 minutes) is used if no value is specified. This value and sasl.login.refresh.min.period.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:short
    Default:300
    Valid Values:[0,...,3600]
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.min.period.seconds

    +

    The desired minimum time for the login refresh thread to wait before refreshing a credential, in seconds. Legal values are between 0 and 900 (15 minutes); a default value of 60 (1 minute) is used if no value is specified. This value and sasl.login.refresh.buffer.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:short
    Default:60
    Valid Values:[0,...,900]
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.window.factor

    +

    Login refresh thread will sleep until the specified window factor relative to the credential's lifetime has been reached, at which time it will try to refresh the credential. Legal values are between 0.5 (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used if no value is specified. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:double
    Default:0.8
    Valid Values:[0.5,...,1.0]
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.window.jitter

    +

    The maximum amount of random jitter relative to the credential's lifetime that is added to the login refresh thread's sleep time. Legal values are between 0 and 0.25 (25%) inclusive; a default value of 0.05 (5%) is used if no value is specified. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:double
    Default:0.05
    Valid Values:[0.0,...,0.25]
    Importance:low
    +
  • +
  • +

    sasl.login.retry.backoff.max.ms

    +

    The (optional) value in milliseconds for the maximum wait between login attempts to the external authentication provider. Login uses an exponential backoff algorithm with an initial wait based on the sasl.login.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.login.retry.backoff.max.ms setting. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:long
    Default:10000 (10 seconds)
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.retry.backoff.ms

    +

    The (optional) value in milliseconds for the initial wait between login attempts to the external authentication provider. Login uses an exponential backoff algorithm with an initial wait based on the sasl.login.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.login.retry.backoff.max.ms setting. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:long
    Default:100
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.exp.seconds

    +

    The number of seconds in the future for which the JWT is valid. The value is used to determine the JWT exp (Expiration) claim based on the current system time when the JWT is created.

    The formula to generate the exp claim is very simple:

    Let:

    x = the current timestamp in seconds, on client
    y = the value of this configuration

    Then:

    exp = x + y

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:int
    Default:300
    Valid Values:[0,...,86400]
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.nbf.seconds

    +

    The number of seconds in the past from which the JWT is valid. The value is used to determine the JWT nbf (Not Before) claim based on the current system time when the JWT is created.

    The formula to generate the nbf claim is very simple:

    Let:

    x = the current timestamp in seconds, on client
    y = the value of this configuration

    Then:

    nbf = x - y

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:int
    Default:60
    Valid Values:[0,...,3600]
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.clock.skew.seconds

    +

    The (optional) value in seconds to allow for differences between the time of the OAuth/OIDC identity provider and the broker.

    + + + + + +
    Type:int
    Default:30
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.expected.audience

    +

    The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. The JWT will be inspected for the standard OAuth "aud" claim and if this value is set, the broker will match the value from JWT's "aud" claim to see if there is an exact match. If there is no match, the broker will reject the JWT and authentication will fail.

    + + + + + +
    Type:list
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.expected.issuer

    +

    The (optional) setting for the broker to use to verify that the JWT was created by the expected issuer. The JWT will be inspected for the standard OAuth "iss" claim and if this value is set, the broker will match it exactly against what is in the JWT's "iss" claim. If there is no match, the broker will reject the JWT and authentication will fail.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.header.urlencode

    +

    The (optional) setting to enable the OAuth client to URL-encode the client_id and client_secret in the authorization header in accordance with RFC6749, see here for more details. The default value is set to 'false' for backward compatibility

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.refresh.ms

    +

    The (optional) value in milliseconds for the broker to wait between refreshing its JWKS (JSON Web Key Set) cache that contains the keys to verify the signature of the JWT.

    + + + + + +
    Type:long
    Default:3600000 (1 hour)
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms

    +

    The (optional) value in milliseconds for the maximum wait between attempts to retrieve the JWKS (JSON Web Key Set) from the external authentication provider. JWKS retrieval uses an exponential backoff algorithm with an initial wait based on the sasl.oauthbearer.jwks.endpoint.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms setting.

    + + + + + +
    Type:long
    Default:10000 (10 seconds)
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.retry.backoff.ms

    +

    The (optional) value in milliseconds for the initial wait between JWKS (JSON Web Key Set) retrieval attempts from the external authentication provider. JWKS retrieval uses an exponential backoff algorithm with an initial wait based on the sasl.oauthbearer.jwks.endpoint.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms setting.

    + + + + + +
    Type:long
    Default:100
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.scope.claim.name

    +

    The OAuth claim for the scope is often named "scope", but this (optional) setting can provide a different name to use for the scope included in the JWT payload's claims if the OAuth/OIDC provider uses a different name for that claim.

    + + + + + +
    Type:string
    Default:scope
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.sub.claim.name

    +

    The OAuth claim for the subject is often named "sub", but this (optional) setting can provide a different name to use for the subject included in the JWT payload's claims if the OAuth/OIDC provider uses a different name for that claim.

    + + + + + +
    Type:string
    Default:sub
    Valid Values:
    Importance:low
    +
  • +
  • +

    security.providers

    +

    A list of configurable creator classes each returning a provider implementing security algorithms. These classes should implement the org.apache.kafka.common.security.auth.SecurityProviderCreator interface.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.cipher.suites

    +

    A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. By default all the available cipher suites are supported.

    + + + + + +
    Type:list
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.endpoint.identification.algorithm

    +

    The endpoint identification algorithm to validate server hostname using server certificate.

    + + + + + +
    Type:string
    Default:https
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.engine.factory.class

    +

    The class of type org.apache.kafka.common.security.auth.SslEngineFactory to provide SSLEngine objects. Default value is org.apache.kafka.common.security.ssl.DefaultSslEngineFactory. Alternatively, setting this to org.apache.kafka.common.security.ssl.CommonNameLoggingSslEngineFactory will log the common name of expired SSL certificates used by clients to authenticate at any of the brokers with log level INFO. Note that this will cause a tiny delay during establishment of new connections from mTLS clients to brokers due to the extra code for examining the certificate chain provided by the client. Note further that the implementation uses a custom truststore based on the standard Java truststore and thus might be considered a security risk due to not being as mature as the standard one.

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.keymanager.algorithm

    +

    The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.

    + + + + + +
    Type:string
    Default:SunX509
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.secure.random.implementation

    +

    The SecureRandom PRNG implementation to use for SSL cryptography operations.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.trustmanager.algorithm

    +

    The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.

    + + + + + +
    Type:string
    Default:PKIX
    Valid Values:
    Importance:low
    +
  • +
+ diff --git a/static/41/generated/consumer_metrics.html b/static/41/generated/consumer_metrics.html new file mode 100644 index 000000000..25e375014 --- /dev/null +++ b/static/41/generated/consumer_metrics.html @@ -0,0 +1,153 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Metric/Attribute nameDescriptionMbean name
bytes-consumed-rateThe average number of bytes consumed per secondkafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}"
bytes-consumed-totalThe total number of bytes consumedkafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}"
fetch-latency-avgThe average time taken for a fetch request.kafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}"
fetch-latency-maxThe max time taken for any fetch request.kafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}"
fetch-rateThe number of fetch requests per second.kafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}"
fetch-size-avgThe average number of bytes fetched per requestkafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}"
fetch-size-maxThe maximum number of bytes fetched per requestkafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}"
fetch-throttle-time-avgThe average throttle time in mskafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}"
fetch-throttle-time-maxThe maximum throttle time in mskafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}"
fetch-totalThe total number of fetch requests.kafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}"
records-consumed-rateThe average number of records consumed per secondkafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}"
records-consumed-totalThe total number of records consumedkafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}"
records-lag-maxThe maximum lag in terms of number of records for any partition in this window. NOTE: This is based on current offset and not committed offsetkafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}"
records-lead-minThe minimum lead in terms of number of records for any partition in this windowkafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}"
records-per-request-avgThe average number of records in each requestkafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}"
bytes-consumed-rateThe average number of bytes consumed per second for a topic. Note: For topic names with periods (.), an additional metric with underscores is emitted. However, the periods replaced metric is deprecated. Please use the metric with actual topic name instead.kafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}",topic="{topic}"
bytes-consumed-totalThe total number of bytes consumed for a topic. Note: For topic names with periods (.), an additional metric with underscores is emitted. However, the periods replaced metric is deprecated. Please use the metric with actual topic name instead.kafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}",topic="{topic}"
fetch-size-avgThe average number of bytes fetched per request for a topic. Note: For topic names with periods (.), an additional metric with underscores is emitted. However, the periods replaced metric is deprecated. Please use the metric with actual topic name instead.kafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}",topic="{topic}"
fetch-size-maxThe maximum number of bytes fetched per request for a topic. Note: For topic names with periods (.), an additional metric with underscores is emitted. However, the periods replaced metric is deprecated. Please use the metric with actual topic name instead.kafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}",topic="{topic}"
records-consumed-rateThe average number of records consumed per second for a topic. Note: For topic names with periods (.), an additional metric with underscores is emitted. However, the periods replaced metric is deprecated. Please use the metric with actual topic name instead.kafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}",topic="{topic}"
records-consumed-totalThe total number of records consumed for a topic. Note: For topic names with periods (.), an additional metric with underscores is emitted. However, the periods replaced metric is deprecated. Please use the metric with actual topic name instead.kafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}",topic="{topic}"
records-per-request-avgThe average number of records in each request for a topic. Note: For topic names with periods (.), an additional metric with underscores is emitted. However, the periods replaced metric is deprecated. Please use the metric with actual topic name instead.kafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}",topic="{topic}"
preferred-read-replicaThe current read replica for the partition, or -1 if reading from leader. Note: For topic names with periods (.), an additional metric with underscores is emitted. However, the periods replaced metric is deprecated. Please use the metric with actual topic name instead.kafka.consumer:type=consumer-fetch-manager-metrics,partition="{partition}",topic="{topic}",client-id="{client-id}"
records-lagThe latest lag of the partition. Note: For topic names with periods (.), an additional metric with underscores is emitted. However, the periods replaced metric is deprecated. Please use the metric with actual topic name instead.kafka.consumer:type=consumer-fetch-manager-metrics,partition="{partition}",topic="{topic}",client-id="{client-id}"
records-lag-avgThe average lag of the partition. Note: For topic names with periods (.), an additional metric with underscores is emitted. However, the periods replaced metric is deprecated. Please use the metric with actual topic name instead.kafka.consumer:type=consumer-fetch-manager-metrics,partition="{partition}",topic="{topic}",client-id="{client-id}"
records-lag-maxThe max lag of the partition. Note: For topic names with periods (.), an additional metric with underscores is emitted. However, the periods replaced metric is deprecated. Please use the metric with actual topic name instead.kafka.consumer:type=consumer-fetch-manager-metrics,partition="{partition}",topic="{topic}",client-id="{client-id}"
records-leadThe latest lead of the partition. Note: For topic names with periods (.), an additional metric with underscores is emitted. However, the periods replaced metric is deprecated. Please use the metric with actual topic name instead.kafka.consumer:type=consumer-fetch-manager-metrics,partition="{partition}",topic="{topic}",client-id="{client-id}"
records-lead-avgThe average lead of the partition. Note: For topic names with periods (.), an additional metric with underscores is emitted. However, the periods replaced metric is deprecated. Please use the metric with actual topic name instead.kafka.consumer:type=consumer-fetch-manager-metrics,partition="{partition}",topic="{topic}",client-id="{client-id}"
records-lead-minThe min lead of the partition. Note: For topic names with periods (.), an additional metric with underscores is emitted. However, the periods replaced metric is deprecated. Please use the metric with actual topic name instead.kafka.consumer:type=consumer-fetch-manager-metrics,partition="{partition}",topic="{topic}",client-id="{client-id}"
diff --git a/static/41/generated/directory_config_provider_configs.html b/static/41/generated/directory_config_provider_configs.html new file mode 100644 index 000000000..4eab4f468 --- /dev/null +++ b/static/41/generated/directory_config_provider_configs.html @@ -0,0 +1,13 @@ +
    +
  • +

    allowed.paths

    +

    A comma separated list of paths that this config provider is allowed to access. If not set, all paths are allowed.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
+ diff --git a/static/41/generated/envvar_config_provider_configs.html b/static/41/generated/envvar_config_provider_configs.html new file mode 100644 index 000000000..56d2baf75 --- /dev/null +++ b/static/41/generated/envvar_config_provider_configs.html @@ -0,0 +1,13 @@ +
    +
  • +

    allowlist.pattern

    +

    A pattern / regular expression that needs to match for environment variables to be used by this config provider.

    + + + + + +
    Type:string
    Default:.*
    Valid Values:
    Importance:medium
    +
  • +
+ diff --git a/static/41/generated/file_config_provider_configs.html b/static/41/generated/file_config_provider_configs.html new file mode 100644 index 000000000..0e8f68c79 --- /dev/null +++ b/static/41/generated/file_config_provider_configs.html @@ -0,0 +1,13 @@ +
    +
  • +

    allowed.paths

    +

    A comma separated list of paths that this config provider is allowed to access. If not set, all paths are allowed.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
+ diff --git a/static/41/generated/group_config.html b/static/41/generated/group_config.html new file mode 100644 index 000000000..51f062af1 --- /dev/null +++ b/static/41/generated/group_config.html @@ -0,0 +1,103 @@ +
    +
  • +

    consumer.heartbeat.interval.ms

    +

    The heartbeat interval given to the members of a consumer group.

    + + + + + +
    Type:int
    Default:5000 (5 seconds)
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    consumer.session.timeout.ms

    +

    The timeout to detect client failures when using the consumer group protocol.

    + + + + + +
    Type:int
    Default:45000 (45 seconds)
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    share.auto.offset.reset

    +

    The strategy to initialize the share-partition start offset.

    • earliest: automatically reset the offset to the earliest offset
    • latest: automatically reset the offset to the latest offset
    • by_duration:<duration>: automatically reset the offset to a configured duration from the current timestamp. <duration> must be specified in ISO8601 format (PnDTnHnMn.nS). Negative duration is not allowed.
    • anything else: throw exception to the share consumer.

    + + + + + +
    Type:string
    Default:latest
    Valid Values:[latest, earliest, by_duration:PnDTnHnMn.nS]
    Importance:medium
    +
  • +
  • +

    share.heartbeat.interval.ms

    +

    The heartbeat interval given to the members of a share group.

    + + + + + +
    Type:int
    Default:5000 (5 seconds)
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    share.isolation.level

    +

    Controls how to read records written transactionally. If set to "read_committed", the share group will only deliver transactional records which have been committed. If set to "read_uncommitted", the share group will return all messages, even transactional messages which have been aborted. Non-transactional records will be returned unconditionally in either mode.

    + + + + + +
    Type:string
    Default:read_uncommitted
    Valid Values:[read_committed, read_uncommitted]
    Importance:medium
    +
  • +
  • +

    share.record.lock.duration.ms

    +

    The record acquisition lock duration in milliseconds for share groups.

    + + + + + +
    Type:int
    Default:30000 (30 seconds)
    Valid Values:[1000,...]
    Importance:medium
    +
  • +
  • +

    share.session.timeout.ms

    +

    The timeout to detect client failures when using the share group protocol.

    + + + + + +
    Type:int
    Default:45000 (45 seconds)
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    streams.heartbeat.interval.ms

    +

    The heartbeat interval given to the members.

    + + + + + +
    Type:int
    Default:5000 (5 seconds)
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    streams.num.standby.replicas

    +

    The number of standby replicas for each task.

    + + + + + +
    Type:int
    Default:0
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    streams.session.timeout.ms

    +

    The timeout to detect client failures when using the streams group protocol.

    + + + + + +
    Type:int
    Default:45000 (45 seconds)
    Valid Values:[1,...]
    Importance:medium
    +
  • +
+ diff --git a/static/41/generated/kafka_config.html b/static/41/generated/kafka_config.html new file mode 100644 index 000000000..38c42094b --- /dev/null +++ b/static/41/generated/kafka_config.html @@ -0,0 +1,3479 @@ +
    +
  • +

    node.id

    +

    The node ID associated with the roles this process is playing when process.roles is non-empty. This is required configuration when running in KRaft mode.

    + + + + + + +
    Type:int
    Default:
    Valid Values:[0,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    process.roles

    +

    The roles that this process plays: 'broker', 'controller', or 'broker,controller' if it is both.

    + + + + + + +
    Type:list
    Default:
    Valid Values:[broker, controller]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    add.partitions.to.txn.retry.backoff.max.ms

    +

    The maximum allowed timeout for adding partitions to transactions on the server side. It only applies to the actual add partition operations, not the verification. It will not be effective if it is larger than request.timeout.ms

    + + + + + + +
    Type:int
    Default:100
    Valid Values:[0,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    add.partitions.to.txn.retry.backoff.ms

    +

    The server-side retry backoff when the server attemptsto add the partition to the transaction

    + + + + + + +
    Type:int
    Default:20
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    advertised.listeners

    +

    Specifies the listener addresses that the Kafka brokers will advertise to clients and other brokers. The config is useful where the actual listener configuration listeners does not represent the addresses that clients should use to connect, such as in cloud environments. The addresses are published to and managed by the controller, the brokers pull these data from the controller as needed. In IaaS environments, this may need to be different from the interface to which the broker binds. If this is not set, the value for listeners will be used. Unlike listeners, it is not valid to advertise the 0.0.0.0 meta-address.
    Also unlike listeners, there can be duplicated ports in this property, so that one listener can be configured to advertise another listener's address. This can be useful in some cases where external load balancers are used.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    auto.create.topics.enable

    +

    Enable auto creation of topic on the server.

    + + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    auto.leader.rebalance.enable

    +

    Enables auto leader balancing. A background thread checks the distribution of partition leaders at regular intervals, configurable by leader.imbalance.check.interval.seconds. If the leader is imbalanced, leader rebalance to the preferred leader for partitions is triggered.

    + + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    background.threads

    +

    The number of threads to use for various background processing tasks

    + + + + + + +
    Type:int
    Default:10
    Valid Values:[1,...]
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    broker.id

    +

    The broker id for this server.

    + + + + + + +
    Type:int
    Default:-1
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    compression.type

    +

    Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.

    + + + + + + +
    Type:string
    Default:producer
    Valid Values:[uncompressed, zstd, lz4, snappy, gzip, producer]
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    controller.listener.names

    +

    A comma-separated list of the names of the listeners used by the controller. This is required when communicating with the controller quorum, the broker will always use the first listener in this list.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    controller.quorum.bootstrap.servers

    +

    List of endpoints to use for bootstrapping the cluster metadata. The endpoints are specified in comma-separated list of {host}:{port} entries. For example: localhost:9092,localhost:9093,localhost:9094.

    + + + + + + +
    Type:list
    Default:""
    Valid Values:non-empty list
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    controller.quorum.election.backoff.max.ms

    +

    Maximum time in milliseconds before starting new elections. This is used in the binary exponential backoff mechanism that helps prevent gridlocked elections

    + + + + + + +
    Type:int
    Default:1000 (1 second)
    Valid Values:[0,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    controller.quorum.election.timeout.ms

    +

    Maximum time in milliseconds to wait without being able to fetch from the leader before triggering a new election

    + + + + + + +
    Type:int
    Default:1000 (1 second)
    Valid Values:[0,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    controller.quorum.fetch.timeout.ms

    +

    Maximum time without a successful fetch from the current leader before becoming a candidate and triggering an election for voters; Maximum time a leader can go without receiving valid fetch or fetchSnapshot request from a majority of the quorum before resigning.

    + + + + + + +
    Type:int
    Default:2000 (2 seconds)
    Valid Values:[0,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    controller.quorum.voters

    +

    Map of id/endpoint information for the set of voters in a comma-separated list of {id}@{host}:{port} entries. For example: 1@localhost:9092,2@localhost:9093,3@localhost:9094

    + + + + + + +
    Type:list
    Default:""
    Valid Values:non-empty list
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    delete.topic.enable

    +

    When set to true, topics can be deleted by the admin client. When set to false, deletion requests will be explicitly rejected by the broker.

    + + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    early.start.listeners

    +

    A comma-separated list of listener names which may be started before the authorizer has finished initialization. This is useful when the authorizer is dependent on the cluster itself for bootstrapping, as is the case for the StandardAuthorizer (which stores ACLs in the metadata log.) By default, all listeners included in controller.listener.names will also be early start listeners. A listener should not appear in this list if it accepts external traffic.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    group.coordinator.threads

    +

    The number of threads used by the group coordinator.

    + + + + + + +
    Type:int
    Default:4
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    leader.imbalance.check.interval.seconds

    +

    The frequency with which the partition rebalance check is triggered by the controller

    + + + + + + +
    Type:long
    Default:300
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    listeners

    +

    Listener List - Comma-separated list of URIs we will listen on and the listener names. If the listener name is not a security protocol, listener.security.protocol.map must also be set.
    Listener names and port numbers must be unique unless one listener is an IPv4 address and the other listener is an IPv6 address (for the same port).
    Specify hostname as 0.0.0.0 to bind to all interfaces.
    Leave hostname empty to bind to default interface.
    Examples of legal listener lists:
    PLAINTEXT://myhost:9092,SSL://:9091
    CLIENT://0.0.0.0:9092,REPLICATION://localhost:9093
    PLAINTEXT://127.0.0.1:9092,SSL://[::1]:9092

    + + + + + + +
    Type:string
    Default:PLAINTEXT://:9092
    Valid Values:
    Importance:high
    Update Mode:per-broker
    +
  • +
  • +

    log.dir

    +

    The directory in which the log data is kept (supplemental for log.dirs property)

    + + + + + + +
    Type:string
    Default:/tmp/kafka-logs
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    log.dirs

    +

    A comma-separated list of the directories where the log data is stored. If not set, the value in log.dir is used.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    log.flush.interval.messages

    +

    The number of messages accumulated on a log partition before messages are flushed to disk.

    + + + + + + +
    Type:long
    Default:9223372036854775807
    Valid Values:[1,...]
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    log.flush.interval.ms

    +

    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used

    + + + + + + +
    Type:long
    Default:null
    Valid Values:
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    log.flush.offset.checkpoint.interval.ms

    +

    The frequency with which we update the persistent record of the last flush which acts as the log recovery point.

    + + + + + + +
    Type:int
    Default:60000 (1 minute)
    Valid Values:[0,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    log.flush.scheduler.interval.ms

    +

    The frequency in ms that the log flusher checks whether any log needs to be flushed to disk

    + + + + + + +
    Type:long
    Default:9223372036854775807
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    log.flush.start.offset.checkpoint.interval.ms

    +

    The frequency with which we update the persistent record of log start offset

    + + + + + + +
    Type:int
    Default:60000 (1 minute)
    Valid Values:[0,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    log.retention.bytes

    +

    The maximum size of the log before deleting it

    + + + + + + +
    Type:long
    Default:-1
    Valid Values:
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    log.retention.hours

    +

    The number of hours to keep a log file before deleting it (in hours), tertiary to log.retention.ms property

    + + + + + + +
    Type:int
    Default:168
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    log.retention.minutes

    +

    The number of minutes to keep a log file before deleting it (in minutes), secondary to log.retention.ms property. If not set, the value in log.retention.hours is used

    + + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    log.retention.ms

    +

    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.

    + + + + + + +
    Type:long
    Default:null
    Valid Values:
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    log.roll.hours

    +

    The maximum time before a new log segment is rolled out (in hours), secondary to log.roll.ms property

    + + + + + + +
    Type:int
    Default:168
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    log.roll.jitter.hours

    +

    The maximum jitter to subtract from logRollTimeMillis (in hours), secondary to log.roll.jitter.ms property

    + + + + + + +
    Type:int
    Default:0
    Valid Values:[0,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    log.roll.jitter.ms

    +

    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used

    + + + + + + +
    Type:long
    Default:null
    Valid Values:
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    log.roll.ms

    +

    The maximum time before a new log segment is rolled out (in milliseconds). If not set, the value in log.roll.hours is used

    + + + + + + +
    Type:long
    Default:null
    Valid Values:
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    log.segment.bytes

    +

    The maximum size of a single log file

    + + + + + + +
    Type:int
    Default:1073741824 (1 gibibyte)
    Valid Values:[1048576,...]
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    log.segment.delete.delay.ms

    +

    The amount of time to wait before deleting a file from the filesystem. If the value is 0 and there is no file to delete, the system will wait 1 millisecond. Low value will cause busy waiting

    + + + + + + +
    Type:long
    Default:60000 (1 minute)
    Valid Values:[0,...]
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    message.max.bytes

    +

    The largest record batch size allowed by Kafka (after compression if compression is enabled).This can be set per topic with the topic level max.message.bytes config.

    + + + + + + +
    Type:int
    Default:1048588
    Valid Values:[0,...]
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    metadata.log.dir

    +

    This configuration determines where we put the metadata log. If it is not set, the metadata log is placed in the first log directory from log.dirs.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    metadata.log.max.record.bytes.between.snapshots

    +

    This is the maximum number of bytes in the log between the latest snapshot and the high-watermark needed before generating a new snapshot. The default value is 20971520. To generate snapshots based on the time elapsed, see the metadata.log.max.snapshot.interval.ms configuration. The Kafka node will generate a snapshot when either the maximum time interval is reached or the maximum bytes limit is reached.

    + + + + + + +
    Type:long
    Default:20971520
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    metadata.log.max.snapshot.interval.ms

    +

    This is the maximum number of milliseconds to wait to generate a snapshot if there are committed records in the log that are not included in the latest snapshot. A value of zero disables time based snapshot generation. The default value is 3600000. To generate snapshots based on the number of metadata bytes, see the metadata.log.max.record.bytes.between.snapshots configuration. The Kafka node will generate a snapshot when either the maximum time interval is reached or the maximum bytes limit is reached.

    + + + + + + +
    Type:long
    Default:3600000 (1 hour)
    Valid Values:[0,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    metadata.log.segment.bytes

    +

    The maximum size of a single metadata log file.

    + + + + + + +
    Type:int
    Default:1073741824 (1 gibibyte)
    Valid Values:[8388608,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    metadata.log.segment.ms

    +

    The maximum time before a new metadata log file is rolled out (in milliseconds).

    + + + + + + +
    Type:long
    Default:604800000 (7 days)
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    metadata.max.retention.bytes

    +

    The maximum combined size of the metadata log and snapshots before deleting old snapshots and log files. Since at least one snapshot must exist before any logs can be deleted, this is a soft limit.

    + + + + + + +
    Type:long
    Default:104857600 (100 mebibytes)
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    metadata.max.retention.ms

    +

    The number of milliseconds to keep a metadata log file or snapshot before deleting it. Since at least one snapshot must exist before any logs can be deleted, this is a soft limit.

    + + + + + + +
    Type:long
    Default:604800000 (7 days)
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    min.insync.replicas

    +

    Specifies the minimum number of in-sync replicas (including the leader) required for a write to succeed when a producer sets acks to "all" (or "-1"). In the acks=all case, every in-sync replica must acknowledge a write for it to be considered successful. E.g., if a topic has replication.factor of 3 and the ISR set includes all three replicas, then all three replicas must acknowledge an acks=all write for it to succeed, even if min.insync.replicas happens to be less than 3. If acks=all and the current ISR set contains fewer than min.insync.replicas members, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend).
    Regardless of the acks setting, the messages will not be visible to the consumers until they are replicated to all in-sync replicas and the min.insync.replicas condition is met.
    When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and produce with acks of "all". This ensures that a majority of replicas must persist a write before it's considered successful by the producer and it's visible to consumers.

    Note that when the Eligible Leader Replicas feature is enabled, the semantics of this config changes. Please refer to the ELR section for more info.

    + + + + + + +
    Type:int
    Default:1
    Valid Values:[1,...]
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    num.io.threads

    +

    The number of threads that the server uses for processing requests, which may include disk I/O

    + + + + + + +
    Type:int
    Default:8
    Valid Values:[1,...]
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    num.network.threads

    +

    The number of threads that the server uses for receiving requests from the network and sending responses to the network. Noted: each listener (except for controller listener) creates its own thread pool.

    + + + + + + +
    Type:int
    Default:3
    Valid Values:[1,...]
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    num.recovery.threads.per.data.dir

    +

    The number of threads per data directory to be used for log recovery at startup and flushing at shutdown

    + + + + + + +
    Type:int
    Default:2
    Valid Values:[1,...]
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    num.replica.alter.log.dirs.threads

    +

    The number of threads that can move replicas between log directories, which may include disk I/O. The default value is equal to the number of directories specified in the log.dir or log.dirs configuration property.

    + + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    num.replica.fetchers

    +

    Number of fetcher threads used to replicate records from each source broker. The total number of fetchers on each broker is bound by num.replica.fetchers multiplied by the number of brokers in the cluster.Increasing this value can increase the degree of I/O parallelism in the follower and leader broker at the cost of higher CPU and memory utilization.

    + + + + + + +
    Type:int
    Default:1
    Valid Values:
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    offset.metadata.max.bytes

    +

    The maximum size for a metadata entry associated with an offset commit.

    + + + + + + +
    Type:int
    Default:4096 (4 kibibytes)
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    offsets.commit.timeout.ms

    +

    Offset commit will be delayed until all replicas for the offsets topic receive the commit or this timeout is reached. This is similar to the producer request timeout. This is applied to all the writes made by the coordinator.

    + + + + + + +
    Type:int
    Default:5000 (5 seconds)
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    offsets.load.buffer.size

    +

    Batch size for reading from the offsets segments when loading group metadata into the cache (soft-limit, overridden if records are too large).

    + + + + + + +
    Type:int
    Default:5242880
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    offsets.retention.check.interval.ms

    +

    Frequency at which to check for stale offsets

    + + + + + + +
    Type:long
    Default:600000 (10 minutes)
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    offsets.retention.minutes

    +

    For subscribed consumers, committed offset of a specific partition will be expired and discarded when 1) this retention period has elapsed after the consumer group loses all its consumers (i.e. becomes empty); 2) this retention period has elapsed since the last time an offset is committed for the partition and the group is no longer subscribed to the corresponding topic. For standalone consumers (using manual assignment), offsets will be expired after this retention period has elapsed since the time of last commit. Note that when a group is deleted via the delete-group request, its committed offsets will also be deleted without extra retention period; also when a topic is deleted via the delete-topic request, upon propagated metadata update any group's committed offsets for that topic will also be deleted without extra retention period.

    + + + + + + +
    Type:int
    Default:10080
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    offsets.topic.compression.codec

    +

    Compression codec for the offsets topic - compression may be used to achieve "atomic" commits.

    + + + + + + +
    Type:int
    Default:0
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    offsets.topic.num.partitions

    +

    The number of partitions for the offset commit topic (should not change after deployment).

    + + + + + + +
    Type:int
    Default:50
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    offsets.topic.replication.factor

    +

    The replication factor for the offsets topic (set higher to ensure availability). Internal topic creation will fail until the cluster size meets this replication factor requirement.

    + + + + + + +
    Type:short
    Default:3
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    offsets.topic.segment.bytes

    +

    The offsets topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads.

    + + + + + + +
    Type:int
    Default:104857600 (100 mebibytes)
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    queued.max.requests

    +

    The number of queued requests allowed for data-plane, before blocking the network threads

    + + + + + + +
    Type:int
    Default:500
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    replica.fetch.min.bytes

    +

    Minimum bytes expected for each fetch response. If not enough bytes, wait up to replica.fetch.wait.max.ms (broker config).

    + + + + + + +
    Type:int
    Default:1
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    replica.fetch.wait.max.ms

    +

    The maximum wait time for each fetcher request issued by follower replicas. This value should always be less than the replica.lag.time.max.ms at all times to prevent frequent shrinking of ISR for low throughput topics

    + + + + + + +
    Type:int
    Default:500
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    replica.high.watermark.checkpoint.interval.ms

    +

    The frequency with which the high watermark is saved out to disk

    + + + + + + +
    Type:long
    Default:5000 (5 seconds)
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    replica.lag.time.max.ms

    +

    If a follower hasn't sent any fetch requests or hasn't consumed up to the leader's log end offset for at least this time, the leader will remove the follower from ISR

    + + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    replica.socket.receive.buffer.bytes

    +

    The socket receive buffer for network requests to the leader for replicating data

    + + + + + + +
    Type:int
    Default:65536 (64 kibibytes)
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    replica.socket.timeout.ms

    +

    The socket timeout for network requests. Its value should be at least replica.fetch.wait.max.ms

    + + + + + + +
    Type:int
    Default:30000 (30 seconds)
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    request.timeout.ms

    +

    The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.

    + + + + + + +
    Type:int
    Default:30000 (30 seconds)
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    sasl.mechanism.controller.protocol

    +

    SASL mechanism used for communication with controllers. Default is GSSAPI.

    + + + + + + +
    Type:string
    Default:GSSAPI
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    share.coordinator.load.buffer.size

    +

    Batch size for reading from the share-group state topic when loading state information into the cache (soft-limit, overridden if records are too large).

    + + + + + + +
    Type:int
    Default:5242880
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    share.coordinator.state.topic.compression.codec

    +

    Compression codec for the share-group state topic.

    + + + + + + +
    Type:int
    Default:0
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    share.coordinator.state.topic.min.isr

    +

    Overridden min.insync.replicas for the share-group state topic.

    + + + + + + +
    Type:short
    Default:2
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    share.coordinator.state.topic.num.partitions

    +

    The number of partitions for the share-group state topic (should not change after deployment).

    + + + + + + +
    Type:int
    Default:50
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    share.coordinator.state.topic.replication.factor

    +

    Replication factor for the share-group state topic. Topic creation will fail until the cluster size meets this replication factor requirement.

    + + + + + + +
    Type:short
    Default:3
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    share.coordinator.state.topic.segment.bytes

    +

    The log segment size for the share-group state topic.

    + + + + + + +
    Type:int
    Default:104857600 (100 mebibytes)
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    share.coordinator.write.timeout.ms

    +

    The duration in milliseconds that the share coordinator will wait for all replicas of the share-group state topic to receive a write.

    + + + + + + +
    Type:int
    Default:5000 (5 seconds)
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    socket.receive.buffer.bytes

    +

    The SO_RCVBUF buffer of the socket server sockets. If the value is -1, the OS default will be used.

    + + + + + + +
    Type:int
    Default:102400 (100 kibibytes)
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    socket.request.max.bytes

    +

    The maximum number of bytes in a socket request

    + + + + + + +
    Type:int
    Default:104857600 (100 mebibytes)
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    socket.send.buffer.bytes

    +

    The SO_SNDBUF buffer of the socket server sockets. If the value is -1, the OS default will be used.

    + + + + + + +
    Type:int
    Default:102400 (100 kibibytes)
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    transaction.max.timeout.ms

    +

    The maximum allowed timeout for transactions. If a client’s requested transaction time exceed this, then the broker will return an error in InitProducerIdRequest. This prevents a client from too large of a timeout, which can stall consumers reading from topics included in the transaction.

    + + + + + + +
    Type:int
    Default:900000 (15 minutes)
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    transaction.state.log.load.buffer.size

    +

    Batch size for reading from the transaction log segments when loading producer ids and transactions into the cache (soft-limit, overridden if records are too large).

    + + + + + + +
    Type:int
    Default:5242880
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    transaction.state.log.min.isr

    +

    The minimum number of replicas that must acknowledge a write to transaction topic in order to be considered successful.

    + + + + + + +
    Type:int
    Default:2
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    transaction.state.log.num.partitions

    +

    The number of partitions for the transaction topic (should not change after deployment).

    + + + + + + +
    Type:int
    Default:50
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    transaction.state.log.replication.factor

    +

    The replication factor for the transaction topic (set higher to ensure availability). Internal topic creation will fail until the cluster size meets this replication factor requirement.

    + + + + + + +
    Type:short
    Default:3
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    transaction.state.log.segment.bytes

    +

    The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads

    + + + + + + +
    Type:int
    Default:104857600 (100 mebibytes)
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    transactional.id.expiration.ms

    +

    The time in ms that the transaction coordinator will wait without receiving any transaction status updates for the current transaction before expiring its transactional id. Transactional IDs will not expire while a the transaction is still ongoing.

    + + + + + + +
    Type:int
    Default:604800000 (7 days)
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    unclean.leader.election.enable

    +

    Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss

    Note: In KRaft mode, when enabling this config dynamically, it needs to wait for the unclean leader election thread to trigger election periodically (default is 5 minutes). Please run `kafka-leader-election.sh` with `unclean` option to trigger the unclean leader election immediately if needed.

    + + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    broker.heartbeat.interval.ms

    +

    The length of time in milliseconds between broker heartbeats.

    + + + + + + +
    Type:int
    Default:2000 (2 seconds)
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    broker.rack

    +

    Rack of the broker. This will be used in rack aware replication assignment for fault tolerance. Examples: RACK1, us-east-1d

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    broker.session.timeout.ms

    +

    The length of time in milliseconds that a broker lease lasts if no heartbeats are made.

    + + + + + + +
    Type:int
    Default:9000 (9 seconds)
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    compression.gzip.level

    +

    The compression level to use if compression.type is set to 'gzip'.

    + + + + + + +
    Type:int
    Default:-1
    Valid Values:[1,...,9] or -1
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    compression.lz4.level

    +

    The compression level to use if compression.type is set to 'lz4'.

    + + + + + + +
    Type:int
    Default:9
    Valid Values:[1,...,17]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    compression.zstd.level

    +

    The compression level to use if compression.type is set to 'zstd'.

    + + + + + + +
    Type:int
    Default:3
    Valid Values:[-131072,...,22]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    connections.max.idle.ms

    +

    Idle connections timeout: the server socket processor threads close the connections that idle more than this

    + + + + + + +
    Type:long
    Default:600000 (10 minutes)
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    connections.max.reauth.ms

    +

    When explicitly set to a positive number (the default is 0, not a positive number), a session lifetime that will not exceed the configured value will be communicated to v2.2.0 or later clients when they authenticate. The broker will disconnect any such connection that is not re-authenticated within the session lifetime and that is then subsequently used for any purpose other than re-authentication. Configuration names can optionally be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.oauthbearer.connections.max.reauth.ms=3600000

    + + + + + + +
    Type:long
    Default:0
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    controlled.shutdown.enable

    +

    Enable controlled shutdown of the server.

    + + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    controller.quorum.append.linger.ms

    +

    The duration in milliseconds that the leader will wait for writes to accumulate before flushing them to disk.

    + + + + + + +
    Type:int
    Default:25
    Valid Values:[0,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    controller.quorum.request.timeout.ms

    +

    The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.

    + + + + + + +
    Type:int
    Default:2000 (2 seconds)
    Valid Values:[0,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    controller.socket.timeout.ms

    +

    The socket timeout for controller-to-broker channels.

    + + + + + + +
    Type:int
    Default:30000 (30 seconds)
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    default.replication.factor

    +

    The replication factor for automatically created topics, and for topics created with -1 as the replication factor

    + + + + + + +
    Type:int
    Default:1
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    delegation.token.expiry.time.ms

    +

    The token validity time in milliseconds before the token needs to be renewed. Default value 1 day.

    + + + + + + +
    Type:long
    Default:86400000 (1 day)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    delegation.token.max.lifetime.ms

    +

    The token has a maximum lifetime beyond which it cannot be renewed anymore. Default value 7 days.

    + + + + + + +
    Type:long
    Default:604800000 (7 days)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    delegation.token.secret.key

    +

    Secret key to generate and verify delegation tokens. The same key must be configured across all the brokers. If using Kafka with KRaft, the key must also be set across all controllers. If the key is not set or set to empty string, brokers will disable the delegation token support.

    + + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    delete.records.purgatory.purge.interval.requests

    +

    The purge interval (in number of requests) of the delete records request purgatory

    + + + + + + +
    Type:int
    Default:1
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    fetch.max.bytes

    +

    The maximum number of bytes we will return for a fetch request. Must be at least 1024.

    + + + + + + +
    Type:int
    Default:57671680 (55 mebibytes)
    Valid Values:[1024,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    fetch.purgatory.purge.interval.requests

    +

    The purge interval (in number of requests) of the fetch request purgatory

    + + + + + + +
    Type:int
    Default:1000
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.consumer.assignors

    +

    The server side assignors as a list of either names for builtin assignors or full class names for customer assignors. The first one in the list is considered as the default assignor to be used in the case where the consumer does not specify an assignor. The supported builtin assignors are: uniform, range.

    + + + + + + +
    Type:list
    Default:uniform,range
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.consumer.heartbeat.interval.ms

    +

    The heartbeat interval given to the members of a consumer group.

    + + + + + + +
    Type:int
    Default:5000 (5 seconds)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.consumer.max.heartbeat.interval.ms

    +

    The maximum heartbeat interval for registered consumers.

    + + + + + + +
    Type:int
    Default:15000 (15 seconds)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.consumer.max.session.timeout.ms

    +

    The maximum allowed session timeout for registered consumers.

    + + + + + + +
    Type:int
    Default:60000 (1 minute)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.consumer.max.size

    +

    The maximum number of consumers that a single consumer group can accommodate. This value will only impact groups under the CONSUMER group protocol. To configure the max group size when using the CLASSIC group protocol use group.max.size instead.

    + + + + + + +
    Type:int
    Default:2147483647
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.consumer.migration.policy

    +

    The config that enables converting the non-empty classic group using the consumer embedded protocol to the non-empty consumer group using the consumer group protocol and vice versa; conversions of empty groups in both directions are always enabled regardless of this policy. bidirectional: both upgrade from classic group to consumer group and downgrade from consumer group to classic group are enabled, upgrade: only upgrade from classic group to consumer group is enabled, downgrade: only downgrade from consumer group to classic group is enabled, disabled: neither upgrade nor downgrade is enabled.

    + + + + + + +
    Type:string
    Default:bidirectional
    Valid Values:(case insensitive) [DISABLED, DOWNGRADE, UPGRADE, BIDIRECTIONAL]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.consumer.min.heartbeat.interval.ms

    +

    The minimum heartbeat interval for registered consumers.

    + + + + + + +
    Type:int
    Default:5000 (5 seconds)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.consumer.min.session.timeout.ms

    +

    The minimum allowed session timeout for registered consumers.

    + + + + + + +
    Type:int
    Default:45000 (45 seconds)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.consumer.session.timeout.ms

    +

    The timeout to detect client failures when using the consumer group protocol.

    + + + + + + +
    Type:int
    Default:45000 (45 seconds)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.coordinator.append.linger.ms

    +

    The duration in milliseconds that the coordinator will wait for writes to accumulate before flushing them to disk. Increasing this value improves write efficiency and batch size, but also increases the response latency for requests, as the coordinator must wait for batches to be flushed to disk before completing request processing. Transactional writes are not accumulated.

    + + + + + + +
    Type:int
    Default:5
    Valid Values:[0,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.coordinator.rebalance.protocols

    +

    The list of enabled rebalance protocols.The streams rebalance protocol is in early access and therefore must not be used in production.

    + + + + + + +
    Type:list
    Default:classic,consumer,streams
    Valid Values:[consumer, classic, share, streams]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.initial.rebalance.delay.ms

    +

    The amount of time the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins.

    + + + + + + +
    Type:int
    Default:3000 (3 seconds)
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.max.session.timeout.ms

    +

    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

    + + + + + + +
    Type:int
    Default:1800000 (30 minutes)
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.max.size

    +

    The maximum number of consumers that a single consumer group can accommodate.

    + + + + + + +
    Type:int
    Default:2147483647
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.min.session.timeout.ms

    +

    The minimum allowed session timeout for registered consumers. Shorter timeouts result in quicker failure detection at the cost of more frequent consumer heartbeating, which can overwhelm broker resources.

    + + + + + + +
    Type:int
    Default:6000 (6 seconds)
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.share.assignors

    +

    The server-side assignors as a list of either names for built-in assignors or full class names for custom assignors. The list must contain only a single entry which is used by all groups. The supported built-in assignors are: simple.

    + + + + + + +
    Type:list
    Default:simple
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.share.delivery.count.limit

    +

    The maximum number of delivery attempts for a record delivered to a share group.

    + + + + + + +
    Type:int
    Default:5
    Valid Values:[2,...,10]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.share.heartbeat.interval.ms

    +

    The heartbeat interval given to the members of a share group.

    + + + + + + +
    Type:int
    Default:5000 (5 seconds)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.share.max.heartbeat.interval.ms

    +

    The maximum heartbeat interval for share group members.

    + + + + + + +
    Type:int
    Default:15000 (15 seconds)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.share.max.record.lock.duration.ms

    +

    The record acquisition lock maximum duration in milliseconds for share groups.

    + + + + + + +
    Type:int
    Default:60000 (1 minute)
    Valid Values:[30000,...,3600000]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.share.max.session.timeout.ms

    +

    The maximum allowed session timeout for share group members.

    + + + + + + +
    Type:int
    Default:60000 (1 minute)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.share.max.share.sessions

    +

    The maximum number of share sessions per broker.

    + + + + + + +
    Type:int
    Default:2000
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.share.max.size

    +

    The maximum number of members that a single share group can accommodate.

    + + + + + + +
    Type:int
    Default:200
    Valid Values:[1,...,1000]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.share.min.heartbeat.interval.ms

    +

    The minimum heartbeat interval for share group members.

    + + + + + + +
    Type:int
    Default:5000 (5 seconds)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.share.min.record.lock.duration.ms

    +

    The record acquisition lock minimum duration in milliseconds for share groups.

    + + + + + + +
    Type:int
    Default:15000 (15 seconds)
    Valid Values:[1000,...,30000]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.share.min.session.timeout.ms

    +

    The minimum allowed session timeout for share group members.

    + + + + + + +
    Type:int
    Default:45000 (45 seconds)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.share.partition.max.record.locks

    +

    Share-group record lock limit per share-partition.

    + + + + + + +
    Type:int
    Default:2000
    Valid Values:[100,...,10000]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.share.record.lock.duration.ms

    +

    The record acquisition lock duration in milliseconds for share groups.

    + + + + + + +
    Type:int
    Default:30000 (30 seconds)
    Valid Values:[1000,...,3600000]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.share.session.timeout.ms

    +

    The timeout to detect client failures when using the share group protocol.

    + + + + + + +
    Type:int
    Default:45000 (45 seconds)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.streams.heartbeat.interval.ms

    +

    The heartbeat interval given to the members.

    + + + + + + +
    Type:int
    Default:5000 (5 seconds)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.streams.max.heartbeat.interval.ms

    +

    The maximum allowed value for the group-level configuration of streams.heartbeat.interval.ms

    + + + + + + +
    Type:int
    Default:15000 (15 seconds)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.streams.max.session.timeout.ms

    +

    The maximum allowed value for the group-level configuration of streams.session.timeout.ms

    + + + + + + +
    Type:int
    Default:60000 (1 minute)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.streams.max.size

    +

    The maximum number of streams clients that a single streams group can accommodate.

    + + + + + + +
    Type:int
    Default:2147483647
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.streams.max.standby.replicas

    +

    The maximum allowed value for the group-level configuration of streams.num.standby.replicas

    + + + + + + +
    Type:int
    Default:2
    Valid Values:[0,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.streams.min.heartbeat.interval.ms

    +

    The minimum allowed value for the group-level configuration of streams.heartbeat.interval.ms

    + + + + + + +
    Type:int
    Default:5000 (5 seconds)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.streams.min.session.timeout.ms

    +

    The minimum allowed value for the group-level configuration of streams.session.timeout.ms

    + + + + + + +
    Type:int
    Default:45000 (45 seconds)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.streams.num.standby.replicas

    +

    The number of standby replicas for each task.

    + + + + + + +
    Type:int
    Default:0
    Valid Values:[0,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.streams.session.timeout.ms

    +

    The timeout to detect client failures when using the streams group protocol.

    + + + + + + +
    Type:int
    Default:45000 (45 seconds)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    initial.broker.registration.timeout.ms

    +

    When initially registering with the controller quorum, the number of milliseconds to wait before declaring failure and exiting the broker process.

    + + + + + + +
    Type:int
    Default:60000 (1 minute)
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    inter.broker.listener.name

    +

    Name of listener used for communication between brokers. If this is unset, the listener name is defined by security.inter.broker.protocol. It is an error to set this and security.inter.broker.protocol properties at the same time.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    log.cleaner.backoff.ms

    +

    The amount of time to sleep when there are no logs to clean

    + + + + + + +
    Type:long
    Default:15000 (15 seconds)
    Valid Values:[0,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.cleaner.dedupe.buffer.size

    +

    The total memory used for log deduplication across all cleaner threads

    + + + + + + +
    Type:long
    Default:134217728
    Valid Values:
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.cleaner.delete.retention.ms

    +

    The amount of time to retain tombstone message markers for log compacted topics. This setting also gives a bound on the time in which a consumer must complete a read if they begin from offset 0 to ensure that they get a valid snapshot of the final stage (otherwise tombstones messages may be collected before a consumer completes their scan).

    + + + + + + +
    Type:long
    Default:86400000 (1 day)
    Valid Values:[0,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.cleaner.enable

    +

    This configuration has been deprecated and will be removed in Kafka 5.0. Users should not set it to false to prepare for its future removal. Enable the log cleaner process to run on the server. Should be enabled if using any topics with a cleanup.policy=compact including the internal offsets topic. If disabled those topics will not be compacted and continually grow in size.

    + + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    log.cleaner.io.buffer.load.factor

    +

    Log cleaner dedupe buffer load factor. The percentage full the dedupe buffer can become. A higher value will allow more log to be cleaned at once but will lead to more hash collisions

    + + + + + + +
    Type:double
    Default:0.9
    Valid Values:
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.cleaner.io.buffer.size

    +

    The total memory used for log cleaner I/O buffers across all cleaner threads

    + + + + + + +
    Type:int
    Default:524288
    Valid Values:[0,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.cleaner.io.max.bytes.per.second

    +

    The log cleaner will be throttled so that the sum of its read and write i/o will be less than this value on average

    + + + + + + +
    Type:double
    Default:1.7976931348623157E308
    Valid Values:
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.cleaner.max.compaction.lag.ms

    +

    The maximum time a message will remain ineligible for compaction in the log. Only applicable for logs that are being compacted.

    + + + + + + +
    Type:long
    Default:9223372036854775807
    Valid Values:[1,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.cleaner.min.cleanable.ratio

    +

    The minimum ratio of dirty log to total log for a log to eligible for cleaning. If the log.cleaner.max.compaction.lag.ms or the log.cleaner.min.compaction.lag.ms configurations are also specified, then the log compactor considers the log eligible for compaction as soon as either: (i) the dirty ratio threshold has been met and the log has had dirty (uncompacted) records for at least the log.cleaner.min.compaction.lag.ms duration, or (ii) if the log has had dirty (uncompacted) records for at most the log.cleaner.max.compaction.lag.ms period.

    + + + + + + +
    Type:double
    Default:0.5
    Valid Values:[0,...,1]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.cleaner.min.compaction.lag.ms

    +

    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.

    + + + + + + +
    Type:long
    Default:0
    Valid Values:[0,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.cleaner.threads

    +

    The number of background threads to use for log cleaning

    + + + + + + +
    Type:int
    Default:1
    Valid Values:[0,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.cleanup.policy

    +

    The default cleanup policy for segments beyond the retention window. A comma separated list of valid policies.

    + + + + + + +
    Type:list
    Default:delete
    Valid Values:[compact, delete]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.index.interval.bytes

    +

    The interval with which we add an entry to the offset index.

    + + + + + + +
    Type:int
    Default:4096 (4 kibibytes)
    Valid Values:[0,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.index.size.max.bytes

    +

    The maximum size in bytes of the offset index

    + + + + + + +
    Type:int
    Default:10485760 (10 mebibytes)
    Valid Values:[4,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.local.retention.bytes

    +

    The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. Default value is -2, it represents `log.retention.bytes` value to be used. The effective value should always be less than or equal to `log.retention.bytes` value.

    + + + + + + +
    Type:long
    Default:-2
    Valid Values:[-2,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.local.retention.ms

    +

    The number of milliseconds to keep the local log segments before it gets eligible for deletion. Default value is -2, it represents `log.retention.ms` value is to be used. The effective value should always be less than or equal to `log.retention.ms` value.

    + + + + + + +
    Type:long
    Default:-2
    Valid Values:[-2,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.message.timestamp.after.max.ms

    +

    This configuration sets the allowable timestamp difference between the message timestamp and the broker's timestamp. The message timestamp can be later than or equal to the broker's timestamp, with the maximum allowable difference determined by the value set in this configuration. If log.message.timestamp.type=CreateTime, the message will be rejected if the difference in timestamps exceeds this specified threshold. This configuration is ignored if log.message.timestamp.type=LogAppendTime.

    + + + + + + +
    Type:long
    Default:3600000 (1 hour)
    Valid Values:[0,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.message.timestamp.before.max.ms

    +

    This configuration sets the allowable timestamp difference between the broker's timestamp and the message timestamp. The message timestamp can be earlier than or equal to the broker's timestamp, with the maximum allowable difference determined by the value set in this configuration. If log.message.timestamp.type=CreateTime, the message will be rejected if the difference in timestamps exceeds this specified threshold. This configuration is ignored if log.message.timestamp.type=LogAppendTime.

    + + + + + + +
    Type:long
    Default:9223372036854775807
    Valid Values:[0,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.message.timestamp.type

    +

    Define whether the timestamp in the message is message create time or log append time. The value should be either CreateTime or LogAppendTime.

    + + + + + + +
    Type:string
    Default:CreateTime
    Valid Values:[CreateTime, LogAppendTime]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.preallocate

    +

    Should pre allocate file when create new segment? If you are using Kafka on Windows, you probably need to set it to true.

    + + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.retention.check.interval.ms

    +

    The frequency in milliseconds that the log cleaner checks whether any log is eligible for deletion

    + + + + + + +
    Type:long
    Default:300000 (5 minutes)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    max.connection.creation.rate

    +

    The maximum connection creation rate we allow in the broker at any time. Listener-level limits may also be configured by prefixing the config name with the listener prefix, for example, listener.name.internal.max.connection.creation.rate.Broker-wide connection rate limit should be configured based on broker capacity while listener limits should be configured based on application requirements. New connections will be throttled if either the listener or the broker limit is reached, with the exception of inter-broker listener. Connections on the inter-broker listener will be throttled only when the listener-level rate limit is reached.

    + + + + + + +
    Type:int
    Default:2147483647
    Valid Values:[0,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    max.connections

    +

    The maximum number of connections we allow in the broker at any time. This limit is applied in addition to any per-ip limits configured using max.connections.per.ip. Listener-level limits may also be configured by prefixing the config name with the listener prefix, for example, listener.name.internal.max.connections.per.ip. Broker-wide limit should be configured based on broker capacity while listener limits should be configured based on application requirements. New connections are blocked if either the listener or broker limit is reached. Connections on the inter-broker listener are permitted even if broker-wide limit is reached. The least recently used connection on another listener will be closed in this case.

    + + + + + + +
    Type:int
    Default:2147483647
    Valid Values:[0,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    max.connections.per.ip

    +

    The maximum number of connections we allow from each ip address. This can be set to 0 if there are overrides configured using max.connections.per.ip.overrides property. New connections from the ip address are dropped if the limit is reached.

    + + + + + + +
    Type:int
    Default:2147483647
    Valid Values:[0,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    max.connections.per.ip.overrides

    +

    A comma-separated list of per-ip or hostname overrides to the default maximum number of connections. An example value is "hostName:100,127.0.0.1:200"

    + + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    max.incremental.fetch.session.cache.slots

    +

    The maximum number of total incremental fetch sessions that we will maintain. FetchSessionCache is sharded into 8 shards and the limit is equally divided among all shards. Sessions are allocated to each shard in round-robin. Only entries within a shard are considered eligible for eviction.

    + + + + + + +
    Type:int
    Default:1000
    Valid Values:[0,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    max.request.partition.size.limit

    +

    The maximum number of partitions can be served in one request.

    + + + + + + +
    Type:int
    Default:2000
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    num.partitions

    +

    The default number of log partitions per topic

    + + + + + + +
    Type:int
    Default:1
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    principal.builder.class

    +

    The fully qualified name of a class that implements the KafkaPrincipalBuilder interface, which is used to build the KafkaPrincipal object used during authorization. If no principal builder is defined, the default behavior depends on the security protocol in use. For SSL authentication, the principal will be derived using the rules defined by ssl.principal.mapping.rules applied on the distinguished name from the client certificate if one is provided; otherwise, if client authentication is not required, the principal name will be ANONYMOUS. For SASL authentication, the principal will be derived using the rules defined by sasl.kerberos.principal.to.local.rules if GSSAPI is in use, and the SASL authentication ID for other mechanisms. For PLAINTEXT, the principal will be ANONYMOUS. Note that custom implementations of KafkaPrincipalBuilder is required to implement KafkaPrincipalSerde interface, otherwise brokers will not be able to forward requests to the controller.

    + + + + + + +
    Type:class
    Default:org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    producer.purgatory.purge.interval.requests

    +

    The purge interval (in number of requests) of the producer request purgatory

    + + + + + + +
    Type:int
    Default:1000
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    queued.max.request.bytes

    +

    The number of queued bytes allowed before no more requests are read

    + + + + + + +
    Type:long
    Default:-1
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    remote.fetch.max.wait.ms

    +

    The maximum amount of time the server will wait before answering the remote fetch request. Note that the broker currently only fetches one partition per fetch request from the remote store. (KAFKA-14915)

    + + + + + + +
    Type:int
    Default:500
    Valid Values:[1,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    remote.list.offsets.request.timeout.ms

    +

    The maximum amount of time the server will wait for the remote list offsets request to complete.

    + + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    remote.log.manager.copier.thread.pool.size

    +

    Size of the thread pool used in scheduling tasks to copy segments.

    + + + + + + +
    Type:int
    Default:10
    Valid Values:[1,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    remote.log.manager.copy.max.bytes.per.second

    +

    The maximum number of bytes that can be copied from local storage to remote storage per second. This is a global limit for all the partitions that are being copied from local storage to remote storage. The default value is Long.MAX_VALUE, which means there is no limit on the number of bytes that can be copied per second.

    + + + + + + +
    Type:long
    Default:9223372036854775807
    Valid Values:[1,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    remote.log.manager.copy.quota.window.num

    +

    The number of samples to retain in memory for remote copy quota management. The default value is 11, which means there are 10 whole windows + 1 current window.

    + + + + + + +
    Type:int
    Default:11
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    remote.log.manager.copy.quota.window.size.seconds

    +

    The time span of each sample for remote copy quota management. The default value is 1 second.

    + + + + + + +
    Type:int
    Default:1
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    remote.log.manager.expiration.thread.pool.size

    +

    Size of the thread pool used in scheduling tasks to clean up the expired remote log segments.

    + + + + + + +
    Type:int
    Default:10
    Valid Values:[1,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    remote.log.manager.fetch.max.bytes.per.second

    +

    The maximum number of bytes that can be fetched from remote storage to local storage per second. This is a global limit for all the partitions that are being fetched from remote storage to local storage. The default value is Long.MAX_VALUE, which means there is no limit on the number of bytes that can be fetched per second.

    + + + + + + +
    Type:long
    Default:9223372036854775807
    Valid Values:[1,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    remote.log.manager.fetch.quota.window.num

    +

    The number of samples to retain in memory for remote fetch quota management. The default value is 11, which means there are 10 whole windows + 1 current window.

    + + + + + + +
    Type:int
    Default:11
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    remote.log.manager.fetch.quota.window.size.seconds

    +

    The time span of each sample for remote fetch quota management. The default value is 1 second.

    + + + + + + +
    Type:int
    Default:1
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    remote.log.manager.thread.pool.size

    +

    Size of the thread pool used in scheduling follower tasks to read the highest-uploaded remote-offset for follower partitions.

    + + + + + + +
    Type:int
    Default:2
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    remote.log.metadata.manager.class.name

    +

    Fully qualified class name of `RemoteLogMetadataManager` implementation.

    + + + + + + +
    Type:string
    Default:org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManager
    Valid Values:non-empty string
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    remote.log.metadata.manager.class.path

    +

    Class path of the `RemoteLogMetadataManager` implementation. If specified, the RemoteLogMetadataManager implementation and its dependent libraries will be loaded by a dedicated classloader which searches this class path before the Kafka broker class path. The syntax of this parameter is same as the standard Java class path string.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    remote.log.metadata.manager.impl.prefix

    +

    Prefix used for properties to be passed to RemoteLogMetadataManager implementation. For example this value can be `rlmm.config.`.

    + + + + + + +
    Type:string
    Default:rlmm.config.
    Valid Values:non-empty string
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    remote.log.metadata.manager.listener.name

    +

    Listener name of the local broker to which it should get connected if needed by RemoteLogMetadataManager implementation.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:non-empty string
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    remote.log.reader.max.pending.tasks

    +

    Maximum remote log reader thread pool task queue size. If the task queue is full, fetch requests are served with an error.

    + + + + + + +
    Type:int
    Default:100
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    remote.log.reader.threads

    +

    Size of the thread pool that is allocated for handling remote log reads.

    + + + + + + +
    Type:int
    Default:10
    Valid Values:[1,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    remote.log.storage.manager.class.name

    +

    Fully qualified class name of `RemoteStorageManager` implementation.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:non-empty string
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    remote.log.storage.manager.class.path

    +

    Class path of the `RemoteStorageManager` implementation. If specified, the RemoteStorageManager implementation and its dependent libraries will be loaded by a dedicated classloader which searches this class path before the Kafka broker class path. The syntax of this parameter is same as the standard Java class path string.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    remote.log.storage.manager.impl.prefix

    +

    Prefix used for properties to be passed to RemoteStorageManager implementation. For example this value can be `rsm.config.`.

    + + + + + + +
    Type:string
    Default:rsm.config.
    Valid Values:non-empty string
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    remote.log.storage.system.enable

    +

    Whether to enable tiered storage functionality in a broker or not. When it is true broker starts all the services required for the tiered storage functionality.

    + + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    replica.fetch.backoff.ms

    +

    The amount of time to sleep when fetch partition error occurs.

    + + + + + + +
    Type:int
    Default:1000 (1 second)
    Valid Values:[0,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    replica.fetch.max.bytes

    +

    The number of bytes of messages to attempt to fetch for each partition. This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. The maximum record batch size accepted by the broker is defined via message.max.bytes (broker config) or max.message.bytes (topic config).

    + + + + + + +
    Type:int
    Default:1048576 (1 mebibyte)
    Valid Values:[0,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    replica.fetch.response.max.bytes

    +

    Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. The maximum record batch size accepted by the broker is defined via message.max.bytes (broker config) or max.message.bytes (topic config).

    + + + + + + +
    Type:int
    Default:10485760 (10 mebibytes)
    Valid Values:[0,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    replica.selector.class

    +

    The fully qualified class name that implements ReplicaSelector. This is used by the broker to find the preferred read replica. By default, we use an implementation that returns the leader.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.client.callback.handler.class

    +

    The fully qualified name of a SASL client callback handler class that implements the AuthenticateCallbackHandler interface.

    + + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.enabled.mechanisms

    +

    The list of SASL mechanisms enabled in the Kafka server. The list may contain any mechanism for which a security provider is available. Only GSSAPI is enabled by default.

    + + + + + + +
    Type:list
    Default:GSSAPI
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.jaas.config

    +

    JAAS login context parameters for SASL connections in the format used by JAAS configuration files. JAAS configuration file format is described here. The format for the value is: loginModuleClass controlFlag (optionName=optionValue)*;. For brokers, the config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.jaas.config=com.example.ScramLoginModule required;

    + + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.kerberos.kinit.cmd

    +

    Kerberos kinit command path.

    + + + + + + +
    Type:string
    Default:/usr/bin/kinit
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.kerberos.min.time.before.relogin

    +

    Login thread sleep time between refresh attempts.

    + + + + + + +
    Type:long
    Default:60000
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.kerberos.principal.to.local.rules

    +

    A list of rules for mapping from principal names to short names (typically operating system usernames). The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, principal names of the form {username}/{hostname}@{REALM} are mapped to {username}. For more details on the format please see security authorization and acls. Note that this configuration is ignored if an extension of KafkaPrincipalBuilder is provided by the principal.builder.class configuration.

    + + + + + + +
    Type:list
    Default:DEFAULT
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.kerberos.service.name

    +

    The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.kerberos.ticket.renew.jitter

    +

    Percentage of random jitter added to the renewal time.

    + + + + + + +
    Type:double
    Default:0.05
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.kerberos.ticket.renew.window.factor

    +

    Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.

    + + + + + + +
    Type:double
    Default:0.8
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.login.callback.handler.class

    +

    The fully qualified name of a SASL login callback handler class that implements the AuthenticateCallbackHandler interface. For brokers, login callback handler config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler

    + + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.login.class

    +

    The fully qualified name of a class that implements the Login interface. For brokers, login config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin

    + + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.login.refresh.buffer.seconds

    +

    The amount of buffer time before credential expiration to maintain when refreshing a credential, in seconds. If a refresh would otherwise occur closer to expiration than the number of buffer seconds then the refresh will be moved up to maintain as much of the buffer time as possible. Legal values are between 0 and 3600 (1 hour); a default value of 300 (5 minutes) is used if no value is specified. This value and sasl.login.refresh.min.period.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.

    + + + + + + +
    Type:short
    Default:300
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.login.refresh.min.period.seconds

    +

    The desired minimum time for the login refresh thread to wait before refreshing a credential, in seconds. Legal values are between 0 and 900 (15 minutes); a default value of 60 (1 minute) is used if no value is specified. This value and sasl.login.refresh.buffer.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.

    + + + + + + +
    Type:short
    Default:60
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.login.refresh.window.factor

    +

    Login refresh thread will sleep until the specified window factor relative to the credential's lifetime has been reached, at which time it will try to refresh the credential. Legal values are between 0.5 (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used if no value is specified. Currently applies only to OAUTHBEARER.

    + + + + + + +
    Type:double
    Default:0.8
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.login.refresh.window.jitter

    +

    The maximum amount of random jitter relative to the credential's lifetime that is added to the login refresh thread's sleep time. Legal values are between 0 and 0.25 (25%) inclusive; a default value of 0.05 (5%) is used if no value is specified. Currently applies only to OAUTHBEARER.

    + + + + + + +
    Type:double
    Default:0.05
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.mechanism.inter.broker.protocol

    +

    SASL mechanism used for inter-broker communication. Default is GSSAPI.

    + + + + + + +
    Type:string
    Default:GSSAPI
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.oauthbearer.assertion.algorithm

    +

    The algorithm the Apache Kafka client should use to sign the assertion sent to the identity provider. It is also used as the value of the OAuth alg (Algorithm) header in the JWT assertion.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + + +
    Type:string
    Default:RS256
    Valid Values:(case insensitive) [ES256, RS256]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.aud

    +

    The JWT aud (Audience) claim which will be included in the client JWT assertion created locally.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.iss

    +

    The value to be used as the iss (Issuer) claim which will be included in the client JWT assertion created locally.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.jti.include

    +

    Flag that determines if the JWT assertion should generate a unique ID for the JWT and include it in the jti (JWT ID) claim.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.sub

    +

    The value to be used as the sub (Subject) claim which will be included in the client JWT assertion created locally.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.assertion.file

    +

    File that contains a pre-generated JWT assertion.

    The underlying implementation caches the file contents to avoid the performance hit of loading the file on each access. The caching mechanism will detect whenthe file changes to allow for the file to be reloaded on modifications. This allows for "live" assertion rotation without restarting the Kafka client.

    The file contains the assertion in the serialized, three part JWT format:

    1. The header section is a base 64-encoded JWT header that contains values like alg (Algorithm), typ (Type, always the literal value JWT), etc.
    2. The payload section includes the base 64-encoded set of JWT claims, such as aud (Audience), iss (Issuer), sub (Subject), etc.
    3. The signature section is the concatenated header and payload sections that was signed using a private key

    See RFC 7519 and RFC 7515 for more details on the JWT and JWS formats.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, all other sasl.oauthbearer.assertion.* configurations are ignored.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.assertion.private.key.file

    +

    File that contains a private key in the standard PEM format which is used to sign the JWT assertion sent to the identity provider.

    The underlying implementation caches the file contents to avoid the performance hit of loading the file on each access. The caching mechanism will detect when the file changes to allow for the file to be reloaded on modifications. This allows for "live" private key rotation without restarting the Kafka client.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.assertion.private.key.passphrase

    +

    The optional passphrase to decrypt the private key file specified by sasl.oauthbearer.assertion.private.key.file.

    Note: If the file referred to by sasl.oauthbearer.assertion.private.key.file is modified on the file system at runtime and it was created with a different passphrase than it was previously, the client will not be able to access the private key file because the passphrase is now out of date. For that reason, when using private key passphrases, either use the same passphrase each time, or—for improved security—restart the Kafka client using the new passphrase configuration.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.assertion.template.file

    +

    This optional configuration specifies the file containing the JWT headers and/or payload claims to be used when creating the JWT assertion.

    Not all identity providers require the same set of claims; some may require a given claim while others may prohibit it. In order to provide the most flexibility, this configuration allows the user to provide the static header values and claims that are to be included in the JWT.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.client.credentials.client.id

    +

    The ID (defined in/by the OAuth identity provider) to identify the client requesting the token.

    The client ID was previously stored as part of the sasl.jaas.config configuration with the key clientId. For backward compatibility, the clientId JAAS option can still be used, but it is deprecated and will be removed in a future version.

    Order of precedence:

    • sasl.oauthbearer.client.credentials.client.id from configuration
    • clientId from JAAS

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.client.credentials.client.secret

    +

    The secret (defined by either the user or preassigned, depending on the identity provider) of the client requesting the token.

    The client secret was previously stored as part of the sasl.jaas.config configuration with the key clientSecret. For backward compatibility, the clientSecret JAAS option can still be used, but it is deprecated and will be removed in a future version.

    Order of precedence:

    • sasl.oauthbearer.client.credentials.client.secret from configuration
    • clientSecret from JAAS

    + + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.url

    +

    The OAuth/OIDC provider URL from which the provider's JWKS (JSON Web Key Set) can be retrieved. The URL can be HTTP(S)-based or file-based. If the URL is HTTP(S)-based, the JWKS data will be retrieved from the OAuth/OIDC provider via the configured URL on broker startup. All then-current keys will be cached on the broker for incoming requests. If an authentication request is received for a JWT that includes a "kid" header claim value that isn't yet in the cache, the JWKS endpoint will be queried again on demand. However, the broker polls the URL every sasl.oauthbearer.jwks.endpoint.refresh.ms milliseconds to refresh the cache with any forthcoming keys before any JWT requests that include them are received. If the URL is file-based, the broker will load the JWKS file from a configured location on startup. In the event that the JWT includes a "kid" header value that isn't in the JWKS file, the broker will reject the JWT and authentication will fail.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.jwt.retriever.class

    +

    The fully-qualified class name of a JwtRetriever implementation used to request tokens from the identity provider.

    The default configuration value represents a class that maintains backward compatibility with previous versions of Apache Kafka. The default implementation uses the configuration to determine which concrete implementation to create.

    Other implementations that are provided include:

    • org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever
    • org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
    • org.apache.kafka.common.security.oauthbearer.FileJwtRetriever
    • org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever

    + + + + + + +
    Type:class
    Default:org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.jwt.validator.class

    +

    The fully-qualified class name of a JwtValidator implementation used to validate the JWT from the identity provider.

    The default validator (org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator) maintains backward compatibility with previous versions of Apache Kafka. The default validator uses configuration to determine which concrete implementation to create.

    The built-in JwtValidator implementations are:

    • org.apache.kafka.common.security.oauthbearer.BrokerJwtValidator
    • org.apache.kafka.common.security.oauthbearer.ClientJwtValidator
    • org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator

    + + + + + + +
    Type:class
    Default:org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.scope

    +

    This is the level of access a client application is granted to a resource or API which is included in the token request. If provided, it should match one or more scopes configured in the identity provider.

    The scope was previously stored as part of the sasl.jaas.config configuration with the key scope. For backward compatibility, the scope JAAS option can still be used, but it is deprecated and will be removed in a future version.

    Order of precedence:

    • sasl.oauthbearer.scope from configuration
    • scope from JAAS

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.token.endpoint.url

    +

    The URL for the OAuth/OIDC identity provider. If the URL is HTTP(S)-based, it is the issuer's token endpoint URL to which requests will be made to login based on the configuration in sasl.oauthbearer.jwt.retriever.class. If the URL is file-based, it specifies a file containing an access token (in JWT serialized form) issued by the OAuth/OIDC identity provider to use for authorization.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.server.callback.handler.class

    +

    The fully qualified name of a SASL server callback handler class that implements the AuthenticateCallbackHandler interface. Server callback handlers must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.plain.sasl.server.callback.handler.class=com.example.CustomPlainCallbackHandler.

    + + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.server.max.receive.size

    +

    The maximum receive size allowed before and during initial SASL authentication. Default receive size is 512KB. GSSAPI limits requests to 64K, but we allow upto 512KB by default for custom SASL mechanisms. In practice, PLAIN, SCRAM and OAUTH mechanisms can use much smaller limits.

    + + + + + + +
    Type:int
    Default:524288
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    security.inter.broker.protocol

    +

    Security protocol used to communicate between brokers. It is an error to set this and inter.broker.listener.name properties at the same time.

    + + + + + + +
    Type:string
    Default:PLAINTEXT
    Valid Values:[PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    share.coordinator.append.linger.ms

    +

    The duration in milliseconds that the share coordinator will wait for writes to accumulate before flushing them to disk.

    + + + + + + +
    Type:int
    Default:5
    Valid Values:[0,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    share.coordinator.snapshot.update.records.per.snapshot

    +

    The number of update records the share coordinator writes between snapshot records.

    + + + + + + +
    Type:int
    Default:500
    Valid Values:[0,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    share.coordinator.threads

    +

    The number of threads used by the share coordinator.

    + + + + + + +
    Type:int
    Default:1
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    share.fetch.purgatory.purge.interval.requests

    +

    The purge interval (in number of requests) of the share fetch request purgatory

    + + + + + + +
    Type:int
    Default:1000
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    socket.connection.setup.timeout.max.ms

    +

    The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. To avoid connection storms, a randomization factor of 0.2 will be applied to the timeout resulting in a random range between 20% below and 20% above the computed value.

    + + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    socket.connection.setup.timeout.ms

    +

    The amount of time the client will wait for the socket connection to be established. If the connection is not built before the timeout elapses, clients will close the socket channel. This value is the initial backoff value and will increase exponentially for each consecutive connection failure, up to the socket.connection.setup.timeout.max.ms value.

    + + + + + + +
    Type:long
    Default:10000 (10 seconds)
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    socket.listen.backlog.size

    +

    The maximum number of pending connections on the socket. In Linux, you may also need to configure somaxconn and tcp_max_syn_backlog kernel parameters accordingly to make the configuration takes effect.

    + + + + + + +
    Type:int
    Default:50
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    ssl.cipher.suites

    +

    A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. By default all the available cipher suites are supported.

    + + + + + + +
    Type:list
    Default:""
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.client.auth

    +

    Configures kafka broker to request client authentication. The following settings are common:

    • ssl.client.auth=required If set to required client authentication is required.
    • ssl.client.auth=requested This means client authentication is optional. unlike required, if this option is set client can choose not to provide authentication information about itself
    • ssl.client.auth=none This means client authentication is not needed.

    + + + + + + +
    Type:string
    Default:none
    Valid Values:[required, requested, none]
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.enabled.protocols

    +

    The list of protocols enabled for SSL connections. The default is 'TLSv1.2,TLSv1.3'. This means that clients and servers will prefer TLSv1.3 if both support it and fallback to TLSv1.2 otherwise (assuming both support at least TLSv1.2). This default should be fine for most use cases. Also see the config documentation for `ssl.protocol` to understand how it can impact the TLS version negotiation behavior.

    + + + + + + +
    Type:list
    Default:TLSv1.2,TLSv1.3
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.key.password

    +

    The password of the private key in the key store file or the PEM key specified in 'ssl.keystore.key'.

    + + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.keymanager.algorithm

    +

    The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.

    + + + + + + +
    Type:string
    Default:SunX509
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.keystore.certificate.chain

    +

    Certificate chain in the format specified by 'ssl.keystore.type'. Default SSL engine factory supports only PEM format with a list of X.509 certificates

    + + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.keystore.key

    +

    Private key in the format specified by 'ssl.keystore.type'. Default SSL engine factory supports only PEM format with PKCS#8 keys. If the key is encrypted, key password must be specified using 'ssl.key.password'

    + + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.keystore.location

    +

    The location of the key store file. This is optional for client and can be used for two-way authentication for client.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.keystore.password

    +

    The store password for the key store file. This is optional for client and only needed if 'ssl.keystore.location' is configured. Key store password is not supported for PEM format.

    + + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.keystore.type

    +

    The file format of the key store file. This is optional for client. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM].

    + + + + + + +
    Type:string
    Default:JKS
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.protocol

    +

    The SSL protocol used to generate the SSLContext. The default is 'TLSv1.3', which should be fine for most use cases. A typical alternative to the default is 'TLSv1.2'. Allowed values for this config are dependent on the JVM. Clients using the defaults for this config and 'ssl.enabled.protocols' will downgrade to 'TLSv1.2' if the server does not support 'TLSv1.3'. If this config is set to 'TLSv1.2', however, clients will not use 'TLSv1.3' even if it is one of the values in `ssl.enabled.protocols` and the server only supports 'TLSv1.3'.

    + + + + + + +
    Type:string
    Default:TLSv1.3
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.provider

    +

    The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.trustmanager.algorithm

    +

    The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.

    + + + + + + +
    Type:string
    Default:PKIX
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.truststore.certificates

    +

    Trusted certificates in the format specified by 'ssl.truststore.type'. Default SSL engine factory supports only PEM format with X.509 certificates.

    + + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.truststore.location

    +

    The location of the trust store file.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.truststore.password

    +

    The password for the trust store file. If a password is not set, trust store file configured will still be used, but integrity checking is disabled. Trust store password is not supported for PEM format.

    + + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.truststore.type

    +

    The file format of the trust store file. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM].

    + + + + + + +
    Type:string
    Default:JKS
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    alter.config.policy.class.name

    +

    The alter configs policy class that should be used for validation. The class should implement the org.apache.kafka.server.policy.AlterConfigPolicy interface.

    Note: This policy runs on the controller instead of the broker.

    + + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    alter.log.dirs.replication.quota.window.num

    +

    The number of samples to retain in memory for alter log dirs replication quotas

    + + + + + + +
    Type:int
    Default:11
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    alter.log.dirs.replication.quota.window.size.seconds

    +

    The time span of each sample for alter log dirs replication quotas

    + + + + + + +
    Type:int
    Default:1
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    authorizer.class.name

    +

    The fully qualified name of a class that implements org.apache.kafka.server.authorizer.Authorizer interface, which is used by the broker for authorization.

    + + + + + + +
    Type:string
    Default:""
    Valid Values:non-null string
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    client.quota.callback.class

    +

    The fully qualified name of a class that implements the ClientQuotaCallback interface, which is used to determine quota limits applied to client requests. By default, the <user> and <client-id> quotas that are stored and applied. For any given request, the most specific quota that matches the user principal of the session and the client-id of the request is applied.

    + + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    connection.failed.authentication.delay.ms

    +

    Connection close delay on failed authentication: this is the time (in milliseconds) by which connection close will be delayed on authentication failure. This must be configured to be less than connections.max.idle.ms to prevent connection timeout.

    + + + + + + +
    Type:int
    Default:100
    Valid Values:[0,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    controller.quorum.retry.backoff.ms

    +

    The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios. This value is the initial backoff value and will increase exponentially for each failed request, up to the retry.backoff.max.ms value.

    + + + + + + +
    Type:int
    Default:20
    Valid Values:[0,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    controller.quota.window.num

    +

    The number of samples to retain in memory for controller mutation quotas

    + + + + + + +
    Type:int
    Default:11
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    controller.quota.window.size.seconds

    +

    The time span of each sample for controller mutations quotas

    + + + + + + +
    Type:int
    Default:1
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    create.topic.policy.class.name

    +

    The create topic policy class that should be used for validation. The class should implement the org.apache.kafka.server.policy.CreateTopicPolicy interface.

    Note: This policy runs on the controller instead of the broker.

    + + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    delegation.token.expiry.check.interval.ms

    +

    Scan interval to remove expired delegation tokens.

    + + + + + + +
    Type:long
    Default:3600000 (1 hour)
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    kafka.metrics.polling.interval.secs

    +

    The metrics polling interval (in seconds) which can be used in kafka.metrics.reporters implementations.

    + + + + + + +
    Type:int
    Default:10
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    kafka.metrics.reporters

    +

    A list of classes to use as Yammer metrics custom reporters. The reporters should implement kafka.metrics.KafkaMetricsReporter trait. If a client wants to expose JMX operations on a custom reporter, the custom reporter needs to additionally implement an MBean trait that extends kafka.metrics.KafkaMetricsReporterMBean trait so that the registered MBean is compliant with the standard MBean convention.

    + + + + + + +
    Type:list
    Default:""
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    listener.security.protocol.map

    +

    Map between listener names and security protocols. This must be defined for the same security protocol to be usable in more than one port or IP. For example, internal and external traffic can be separated even if SSL is required for both. Concretely, the user could define listeners with names INTERNAL and EXTERNAL and this property as: INTERNAL:SSL,EXTERNAL:SSL. As shown, key and value are separated by a colon and map entries are separated by commas. Each listener name should only appear once in the map. Different security (SSL and SASL) settings can be configured for each listener by adding a normalised prefix (the listener name is lowercased) to the config name. For example, to set a different keystore for the INTERNAL listener, a config with name listener.name.internal.ssl.keystore.location would be set. If the config for the listener name is not set, the config will fallback to the generic config (i.e. ssl.keystore.location). Note that in KRaft a default mapping from the listener names defined by controller.listener.names to PLAINTEXT is assumed if no explicit mapping is provided and no other security protocol is in use.

    + + + + + + +
    Type:string
    Default:SASL_SSL:SASL_SSL,PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT
    Valid Values:
    Importance:low
    Update Mode:per-broker
    +
  • +
  • +

    log.dir.failure.timeout.ms

    +

    If the broker is unable to successfully communicate to the controller that some log directory has failed for longer than this time, the broker will fail and shut down.

    + + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    metadata.max.idle.interval.ms

    +

    This configuration controls how often the active controller should write no-op records to the metadata partition. If the value is 0, no-op records are not appended to the metadata partition. The default value is 500

    + + + + + + +
    Type:int
    Default:500
    Valid Values:[0,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    metric.reporters

    +

    A list of classes to use as metrics reporters. Implementing the org.apache.kafka.common.metrics.MetricsReporter interface allows plugging in classes that will be notified of new metric creation. When custom reporters are set and org.apache.kafka.common.metrics.JmxReporter is needed, it has to be explicitly added to the list.

    + + + + + + +
    Type:list
    Default:org.apache.kafka.common.metrics.JmxReporter
    Valid Values:
    Importance:low
    Update Mode:cluster-wide
    +
  • +
  • +

    metrics.num.samples

    +

    The number of samples maintained to compute metrics.

    + + + + + + +
    Type:int
    Default:2
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    metrics.recording.level

    +

    The highest recording level for metrics. It has three levels for recording metrics - info, debug, and trace.

    INFO level records only essential metrics necessary for monitoring system performance and health. It collects vital data without gathering too much detail, making it suitable for production environments where minimal overhead is desired.

    DEBUG level records most metrics, providing more detailed information about the system's operation. It's useful for development and testing environments where you need deeper insights to debug and fine-tune the application.

    TRACE level records all possible metrics, capturing every detail about the system's performance and operation. It's best for controlled environments where in-depth analysis is required, though it can introduce significant overhead.

    + + + + + + +
    Type:string
    Default:INFO
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    metrics.sample.window.ms

    +

    The window of time a metrics sample is computed over.

    + + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    producer.id.expiration.ms

    +

    The time in ms that a topic partition leader will wait before expiring producer IDs. Producer IDs will not expire while a transaction associated to them is still ongoing. Note that producer IDs may expire sooner if the last write from the producer ID is deleted due to the topic's retention settings. Setting this value the same or higher than delivery.timeout.ms can help prevent expiration during retries and protect against message duplication, but the default should be reasonable for most use cases.

    + + + + + + +
    Type:int
    Default:86400000 (1 day)
    Valid Values:[1,...]
    Importance:low
    Update Mode:cluster-wide
    +
  • +
  • +

    quota.window.num

    +

    The number of samples to retain in memory for client quotas

    + + + + + + +
    Type:int
    Default:11
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    quota.window.size.seconds

    +

    The time span of each sample for client quotas

    + + + + + + +
    Type:int
    Default:1
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    remote.log.index.file.cache.total.size.bytes

    +

    The total size of the space allocated to store index files fetched from remote storage in the local storage.

    + + + + + + +
    Type:long
    Default:1073741824 (1 gibibyte)
    Valid Values:[1,...]
    Importance:low
    Update Mode:cluster-wide
    +
  • +
  • +

    remote.log.manager.task.interval.ms

    +

    Interval at which remote log manager runs the scheduled tasks like copy segments, and clean up remote log segments.

    + + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    remote.log.metadata.custom.metadata.max.bytes

    +

    The maximum size of custom metadata in bytes that the broker should accept from a remote storage plugin. If custom metadata exceeds this limit, the updated segment metadata will not be stored, the copied data will be attempted to delete, and the remote copying task for this topic-partition will stop with an error.

    + + + + + + +
    Type:int
    Default:128
    Valid Values:[0,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    replication.quota.window.num

    +

    The number of samples to retain in memory for replication quotas

    + + + + + + +
    Type:int
    Default:11
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    replication.quota.window.size.seconds

    +

    The time span of each sample for replication quotas

    + + + + + + +
    Type:int
    Default:1
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    sasl.login.connect.timeout.ms

    +

    The (optional) value in milliseconds for the external authentication provider connection timeout. Currently applies only to OAUTHBEARER.

    + + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    sasl.login.read.timeout.ms

    +

    The (optional) value in milliseconds for the external authentication provider read timeout. Currently applies only to OAUTHBEARER.

    + + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    sasl.login.retry.backoff.max.ms

    +

    The (optional) value in milliseconds for the maximum wait between login attempts to the external authentication provider. Login uses an exponential backoff algorithm with an initial wait based on the sasl.login.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.login.retry.backoff.max.ms setting. Currently applies only to OAUTHBEARER.

    + + + + + + +
    Type:long
    Default:10000 (10 seconds)
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    sasl.login.retry.backoff.ms

    +

    The (optional) value in milliseconds for the initial wait between login attempts to the external authentication provider. Login uses an exponential backoff algorithm with an initial wait based on the sasl.login.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.login.retry.backoff.max.ms setting. Currently applies only to OAUTHBEARER.

    + + + + + + +
    Type:long
    Default:100
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.exp.seconds

    +

    The number of seconds in the future for which the JWT is valid. The value is used to determine the JWT exp (Expiration) claim based on the current system time when the JWT is created.

    The formula to generate the exp claim is very simple:

    Let:

    x = the current timestamp in seconds, on client
    y = the value of this configuration

    Then:

    exp = x + y

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + + +
    Type:int
    Default:300
    Valid Values:[0,...,86400]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.nbf.seconds

    +

    The number of seconds in the past from which the JWT is valid. The value is used to determine the JWT nbf (Not Before) claim based on the current system time when the JWT is created.

    The formula to generate the nbf claim is very simple:

    Let:

    x = the current timestamp in seconds, on client
    y = the value of this configuration

    Then:

    nbf = x - y

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + + +
    Type:int
    Default:60
    Valid Values:[0,...,3600]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.clock.skew.seconds

    +

    The (optional) value in seconds to allow for differences between the time of the OAuth/OIDC identity provider and the broker.

    + + + + + + +
    Type:int
    Default:30
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.expected.audience

    +

    The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. The JWT will be inspected for the standard OAuth "aud" claim and if this value is set, the broker will match the value from JWT's "aud" claim to see if there is an exact match. If there is no match, the broker will reject the JWT and authentication will fail.

    + + + + + + +
    Type:list
    Default:null
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.expected.issuer

    +

    The (optional) setting for the broker to use to verify that the JWT was created by the expected issuer. The JWT will be inspected for the standard OAuth "iss" claim and if this value is set, the broker will match it exactly against what is in the JWT's "iss" claim. If there is no match, the broker will reject the JWT and authentication will fail.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.refresh.ms

    +

    The (optional) value in milliseconds for the broker to wait between refreshing its JWKS (JSON Web Key Set) cache that contains the keys to verify the signature of the JWT.

    + + + + + + +
    Type:long
    Default:3600000 (1 hour)
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms

    +

    The (optional) value in milliseconds for the maximum wait between attempts to retrieve the JWKS (JSON Web Key Set) from the external authentication provider. JWKS retrieval uses an exponential backoff algorithm with an initial wait based on the sasl.oauthbearer.jwks.endpoint.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms setting.

    + + + + + + +
    Type:long
    Default:10000 (10 seconds)
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.retry.backoff.ms

    +

    The (optional) value in milliseconds for the initial wait between JWKS (JSON Web Key Set) retrieval attempts from the external authentication provider. JWKS retrieval uses an exponential backoff algorithm with an initial wait based on the sasl.oauthbearer.jwks.endpoint.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms setting.

    + + + + + + +
    Type:long
    Default:100
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.scope.claim.name

    +

    The OAuth claim for the scope is often named "scope", but this (optional) setting can provide a different name to use for the scope included in the JWT payload's claims if the OAuth/OIDC provider uses a different name for that claim.

    + + + + + + +
    Type:string
    Default:scope
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    sasl.oauthbearer.sub.claim.name

    +

    The OAuth claim for the subject is often named "sub", but this (optional) setting can provide a different name to use for the subject included in the JWT payload's claims if the OAuth/OIDC provider uses a different name for that claim.

    + + + + + + +
    Type:string
    Default:sub
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    security.providers

    +

    A list of configurable creator classes each returning a provider implementing security algorithms. These classes should implement the org.apache.kafka.common.security.auth.SecurityProviderCreator interface.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    ssl.allow.dn.changes

    +

    Indicates whether changes to the certificate distinguished name should be allowed during a dynamic reconfiguration of certificates or not.

    + + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    ssl.allow.san.changes

    +

    Indicates whether changes to the certificate subject alternative names should be allowed during a dynamic reconfiguration of certificates or not.

    + + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    ssl.endpoint.identification.algorithm

    +

    The endpoint identification algorithm to validate server hostname using server certificate.

    + + + + + + +
    Type:string
    Default:https
    Valid Values:
    Importance:low
    Update Mode:per-broker
    +
  • +
  • +

    ssl.engine.factory.class

    +

    The class of type org.apache.kafka.common.security.auth.SslEngineFactory to provide SSLEngine objects. Default value is org.apache.kafka.common.security.ssl.DefaultSslEngineFactory. Alternatively, setting this to org.apache.kafka.common.security.ssl.CommonNameLoggingSslEngineFactory will log the common name of expired SSL certificates used by clients to authenticate at any of the brokers with log level INFO. Note that this will cause a tiny delay during establishment of new connections from mTLS clients to brokers due to the extra code for examining the certificate chain provided by the client. Note further that the implementation uses a custom truststore based on the standard Java truststore and thus might be considered a security risk due to not being as mature as the standard one.

    + + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:low
    Update Mode:per-broker
    +
  • +
  • +

    ssl.principal.mapping.rules

    +

    A list of rules for mapping from distinguished name from the client certificate to short name. The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, distinguished name of the X.500 certificate will be the principal. For more details on the format please see security authorization and acls. Note that this configuration is ignored if an extension of KafkaPrincipalBuilder is provided by the principal.builder.class configuration.

    + + + + + + +
    Type:string
    Default:DEFAULT
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    ssl.secure.random.implementation

    +

    The SecureRandom PRNG implementation to use for SSL cryptography operations.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    Update Mode:per-broker
    +
  • +
  • +

    telemetry.max.bytes

    +

    The maximum size (after compression if compression is used) of telemetry metrics pushed from a client to the broker. The default value is 1048576 (1 MB).

    + + + + + + +
    Type:int
    Default:1048576 (1 mebibyte)
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    transaction.abort.timed.out.transaction.cleanup.interval.ms

    +

    The interval at which to rollback transactions that have timed out

    + + + + + + +
    Type:int
    Default:10000 (10 seconds)
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    transaction.partition.verification.enable

    +

    Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition

    + + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    Update Mode:cluster-wide
    +
  • +
  • +

    transaction.remove.expired.transaction.cleanup.interval.ms

    +

    The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing

    + + + + + + +
    Type:int
    Default:3600000 (1 hour)
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    transaction.two.phase.commit.enable

    +

    Allow participation in Two-Phase Commit (2PC) transactions with an external transaction coordinator

    + + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
+ diff --git a/static/41/generated/mirror_checkpoint_config.html b/static/41/generated/mirror_checkpoint_config.html new file mode 100644 index 000000000..9094bb49e --- /dev/null +++ b/static/41/generated/mirror_checkpoint_config.html @@ -0,0 +1,133 @@ +
    +
  • +

    groups

    +

    Consumer groups to replicate. Supports comma-separated group IDs and regexes.

    + + + + + +
    Type:list
    Default:.*
    Valid Values:
    Importance:high
    +
  • +
  • +

    groups.exclude

    +

    Exclude groups. Supports comma-separated group IDs and regexes. Excludes take precedence over includes.

    + + + + + +
    Type:list
    Default:console-consumer-.*,connect-.*,__.*
    Valid Values:
    Importance:high
    +
  • +
  • +

    checkpoints.topic.replication.factor

    +

    Replication factor for checkpoints topic.

    + + + + + +
    Type:short
    Default:3
    Valid Values:
    Importance:low
    +
  • +
  • +

    consumer.poll.timeout.ms

    +

    Timeout when polling source cluster.

    + + + + + +
    Type:long
    Default:1000 (1 second)
    Valid Values:
    Importance:low
    +
  • +
  • +

    emit.checkpoints.enabled

    +

    Whether to replicate consumer offsets to target cluster.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    emit.checkpoints.interval.seconds

    +

    Frequency of checkpoints.

    + + + + + +
    Type:long
    Default:60
    Valid Values:
    Importance:low
    +
  • +
  • +

    group.filter.class

    +

    GroupFilter to use. Selects consumer groups to replicate.

    + + + + + +
    Type:class
    Default:org.apache.kafka.connect.mirror.DefaultGroupFilter
    Valid Values:
    Importance:low
    +
  • +
  • +

    offset-syncs.topic.location

    +

    The location (source/target) of the offset-syncs topic.

    + + + + + +
    Type:string
    Default:source
    Valid Values:[source, target]
    Importance:low
    +
  • +
  • +

    refresh.groups.enabled

    +

    Whether to periodically check for new consumer groups.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    refresh.groups.interval.seconds

    +

    Frequency of group refresh.

    + + + + + +
    Type:long
    Default:600
    Valid Values:
    Importance:low
    +
  • +
  • +

    sync.group.offsets.enabled

    +

    Whether to periodically write the translated offsets to __consumer_offsets topic in target cluster, as long as no active consumers in that group are connected to the target cluster

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:low
    +
  • +
  • +

    sync.group.offsets.interval.seconds

    +

    Frequency of consumer group offset sync.

    + + + + + +
    Type:long
    Default:60
    Valid Values:
    Importance:low
    +
  • +
  • +

    topic.filter.class

    +

    TopicFilter to use. Selects topics to replicate.

    + + + + + +
    Type:class
    Default:org.apache.kafka.connect.mirror.DefaultTopicFilter
    Valid Values:
    Importance:low
    +
  • +
+ diff --git a/static/41/generated/mirror_connector_config.html b/static/41/generated/mirror_connector_config.html new file mode 100644 index 000000000..25c8d171b --- /dev/null +++ b/static/41/generated/mirror_connector_config.html @@ -0,0 +1,933 @@ +
    +
  • +

    source.cluster.alias

    +

    Alias of source cluster

    + + + + + +
    Type:string
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.key.password

    +

    The password of the private key in the key store file or the PEM key specified in 'ssl.keystore.key'.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.certificate.chain

    +

    Certificate chain in the format specified by 'ssl.keystore.type'. Default SSL engine factory supports only PEM format with a list of X.509 certificates

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.key

    +

    Private key in the format specified by 'ssl.keystore.type'. Default SSL engine factory supports only PEM format with PKCS#8 keys. If the key is encrypted, key password must be specified using 'ssl.key.password'

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.location

    +

    The location of the key store file. This is optional for client and can be used for two-way authentication for client.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.password

    +

    The store password for the key store file. This is optional for client and only needed if 'ssl.keystore.location' is configured. Key store password is not supported for PEM format.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.truststore.certificates

    +

    Trusted certificates in the format specified by 'ssl.truststore.type'. Default SSL engine factory supports only PEM format with X.509 certificates.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.truststore.location

    +

    The location of the trust store file.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.truststore.password

    +

    The password for the trust store file. If a password is not set, trust store file configured will still be used, but integrity checking is disabled. Trust store password is not supported for PEM format.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    target.cluster.alias

    +

    Alias of target cluster. Used in metrics reporting.

    + + + + + +
    Type:string
    Default:target
    Valid Values:
    Importance:high
    +
  • +
  • +

    sasl.client.callback.handler.class

    +

    The fully qualified name of a SASL client callback handler class that implements the AuthenticateCallbackHandler interface.

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.jaas.config

    +

    JAAS login context parameters for SASL connections in the format used by JAAS configuration files. JAAS configuration file format is described here. The format for the value is: loginModuleClass controlFlag (optionName=optionValue)*;. For brokers, the config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.jaas.config=com.example.ScramLoginModule required;

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.kerberos.service.name

    +

    The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.login.callback.handler.class

    +

    The fully qualified name of a SASL login callback handler class that implements the AuthenticateCallbackHandler interface. For brokers, login callback handler config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.login.class

    +

    The fully qualified name of a class that implements the Login interface. For brokers, login config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.mechanism

    +

    SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. GSSAPI is the default mechanism.

    + + + + + +
    Type:string
    Default:GSSAPI
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.algorithm

    +

    The algorithm the Apache Kafka client should use to sign the assertion sent to the identity provider. It is also used as the value of the OAuth alg (Algorithm) header in the JWT assertion.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:RS256
    Valid Values:(case insensitive) [ES256, RS256]
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.aud

    +

    The JWT aud (Audience) claim which will be included in the client JWT assertion created locally.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.iss

    +

    The value to be used as the iss (Issuer) claim which will be included in the client JWT assertion created locally.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.jti.include

    +

    Flag that determines if the JWT assertion should generate a unique ID for the JWT and include it in the jti (JWT ID) claim.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.sub

    +

    The value to be used as the sub (Subject) claim which will be included in the client JWT assertion created locally.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.file

    +

    File that contains a pre-generated JWT assertion.

    The underlying implementation caches the file contents to avoid the performance hit of loading the file on each access. The caching mechanism will detect whenthe file changes to allow for the file to be reloaded on modifications. This allows for "live" assertion rotation without restarting the Kafka client.

    The file contains the assertion in the serialized, three part JWT format:

    1. The header section is a base 64-encoded JWT header that contains values like alg (Algorithm), typ (Type, always the literal value JWT), etc.
    2. The payload section includes the base 64-encoded set of JWT claims, such as aud (Audience), iss (Issuer), sub (Subject), etc.
    3. The signature section is the concatenated header and payload sections that was signed using a private key

    See RFC 7519 and RFC 7515 for more details on the JWT and JWS formats.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, all other sasl.oauthbearer.assertion.* configurations are ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.private.key.file

    +

    File that contains a private key in the standard PEM format which is used to sign the JWT assertion sent to the identity provider.

    The underlying implementation caches the file contents to avoid the performance hit of loading the file on each access. The caching mechanism will detect when the file changes to allow for the file to be reloaded on modifications. This allows for "live" private key rotation without restarting the Kafka client.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.private.key.passphrase

    +

    The optional passphrase to decrypt the private key file specified by sasl.oauthbearer.assertion.private.key.file.

    Note: If the file referred to by sasl.oauthbearer.assertion.private.key.file is modified on the file system at runtime and it was created with a different passphrase than it was previously, the client will not be able to access the private key file because the passphrase is now out of date. For that reason, when using private key passphrases, either use the same passphrase each time, or—for improved security—restart the Kafka client using the new passphrase configuration.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.template.file

    +

    This optional configuration specifies the file containing the JWT headers and/or payload claims to be used when creating the JWT assertion.

    Not all identity providers require the same set of claims; some may require a given claim while others may prohibit it. In order to provide the most flexibility, this configuration allows the user to provide the static header values and claims that are to be included in the JWT.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.client.credentials.client.id

    +

    The ID (defined in/by the OAuth identity provider) to identify the client requesting the token.

    The client ID was previously stored as part of the sasl.jaas.config configuration with the key clientId. For backward compatibility, the clientId JAAS option can still be used, but it is deprecated and will be removed in a future version.

    Order of precedence:

    • sasl.oauthbearer.client.credentials.client.id from configuration
    • clientId from JAAS

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.client.credentials.client.secret

    +

    The secret (defined by either the user or preassigned, depending on the identity provider) of the client requesting the token.

    The client secret was previously stored as part of the sasl.jaas.config configuration with the key clientSecret. For backward compatibility, the clientSecret JAAS option can still be used, but it is deprecated and will be removed in a future version.

    Order of precedence:

    • sasl.oauthbearer.client.credentials.client.secret from configuration
    • clientSecret from JAAS

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.url

    +

    The OAuth/OIDC provider URL from which the provider's JWKS (JSON Web Key Set) can be retrieved. The URL can be HTTP(S)-based or file-based. If the URL is HTTP(S)-based, the JWKS data will be retrieved from the OAuth/OIDC provider via the configured URL on broker startup. All then-current keys will be cached on the broker for incoming requests. If an authentication request is received for a JWT that includes a "kid" header claim value that isn't yet in the cache, the JWKS endpoint will be queried again on demand. However, the broker polls the URL every sasl.oauthbearer.jwks.endpoint.refresh.ms milliseconds to refresh the cache with any forthcoming keys before any JWT requests that include them are received. If the URL is file-based, the broker will load the JWKS file from a configured location on startup. In the event that the JWT includes a "kid" header value that isn't in the JWKS file, the broker will reject the JWT and authentication will fail.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.jwt.retriever.class

    +

    The fully-qualified class name of a JwtRetriever implementation used to request tokens from the identity provider.

    The default configuration value represents a class that maintains backward compatibility with previous versions of Apache Kafka. The default implementation uses the configuration to determine which concrete implementation to create.

    Other implementations that are provided include:

    • org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever
    • org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
    • org.apache.kafka.common.security.oauthbearer.FileJwtRetriever
    • org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever

    + + + + + +
    Type:class
    Default:org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.jwt.validator.class

    +

    The fully-qualified class name of a JwtValidator implementation used to validate the JWT from the identity provider.

    The default validator (org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator) maintains backward compatibility with previous versions of Apache Kafka. The default validator uses configuration to determine which concrete implementation to create.

    The built-in JwtValidator implementations are:

    • org.apache.kafka.common.security.oauthbearer.BrokerJwtValidator
    • org.apache.kafka.common.security.oauthbearer.ClientJwtValidator
    • org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator

    + + + + + +
    Type:class
    Default:org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.scope

    +

    This is the level of access a client application is granted to a resource or API which is included in the token request. If provided, it should match one or more scopes configured in the identity provider.

    The scope was previously stored as part of the sasl.jaas.config configuration with the key scope. For backward compatibility, the scope JAAS option can still be used, but it is deprecated and will be removed in a future version.

    Order of precedence:

    • sasl.oauthbearer.scope from configuration
    • scope from JAAS

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.token.endpoint.url

    +

    The URL for the OAuth/OIDC identity provider. If the URL is HTTP(S)-based, it is the issuer's token endpoint URL to which requests will be made to login based on the configuration in sasl.oauthbearer.jwt.retriever.class. If the URL is file-based, it specifies a file containing an access token (in JWT serialized form) issued by the OAuth/OIDC identity provider to use for authorization.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    security.protocol

    +

    Protocol used to communicate with brokers.

    + + + + + +
    Type:string
    Default:PLAINTEXT
    Valid Values:(case insensitive) [SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT]
    Importance:medium
    +
  • +
  • +

    ssl.enabled.protocols

    +

    The list of protocols enabled for SSL connections. The default is 'TLSv1.2,TLSv1.3'. This means that clients and servers will prefer TLSv1.3 if both support it and fallback to TLSv1.2 otherwise (assuming both support at least TLSv1.2). This default should be fine for most use cases. Also see the config documentation for `ssl.protocol` to understand how it can impact the TLS version negotiation behavior.

    + + + + + +
    Type:list
    Default:TLSv1.2,TLSv1.3
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.keystore.type

    +

    The file format of the key store file. This is optional for client. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM].

    + + + + + +
    Type:string
    Default:JKS
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.protocol

    +

    The SSL protocol used to generate the SSLContext. The default is 'TLSv1.3', which should be fine for most use cases. A typical alternative to the default is 'TLSv1.2'. Allowed values for this config are dependent on the JVM. Clients using the defaults for this config and 'ssl.enabled.protocols' will downgrade to 'TLSv1.2' if the server does not support 'TLSv1.3'. If this config is set to 'TLSv1.2', however, clients will not use 'TLSv1.3' even if it is one of the values in `ssl.enabled.protocols` and the server only supports 'TLSv1.3'.

    + + + + + +
    Type:string
    Default:TLSv1.3
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.provider

    +

    The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.truststore.type

    +

    The file format of the trust store file. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM].

    + + + + + +
    Type:string
    Default:JKS
    Valid Values:
    Importance:medium
    +
  • +
  • +

    admin.timeout.ms

    +

    Timeout for administrative tasks, e.g. detecting new topics.

    + + + + + +
    Type:long
    Default:60000 (1 minute)
    Valid Values:
    Importance:low
    +
  • +
  • +

    enabled

    +

    Whether to replicate source->target.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    forwarding.admin.class

    +

    Class which extends ForwardingAdmin to define custom cluster resource management (topics, configs, etc). The class must have a constructor with signature (Map config) that is used to configure a KafkaAdminClient and may also be used to configure clients for external systems if necessary.

    + + + + + +
    Type:class
    Default:org.apache.kafka.clients.admin.ForwardingAdmin
    Valid Values:
    Importance:low
    +
  • +
  • +

    metric.reporters

    +

    A list of classes to use as metrics reporters. Implementing the org.apache.kafka.common.metrics.MetricsReporter interface allows plugging in classes that will be notified of new metric creation. When custom reporters are set and org.apache.kafka.common.metrics.JmxReporter is needed, it has to be explicitly added to the list.

    + + + + + +
    Type:list
    Default:org.apache.kafka.common.metrics.JmxReporter
    Valid Values:
    Importance:low
    +
  • +
  • +

    replication.policy.class

    +

    Class which defines the remote topic naming convention.

    + + + + + +
    Type:class
    Default:org.apache.kafka.connect.mirror.DefaultReplicationPolicy
    Valid Values:
    Importance:low
    +
  • +
  • +

    replication.policy.internal.topic.separator.enabled

    +

    Whether to use replication.policy.separator to control the names of topics used for checkpoints and offset syncs. By default, custom separators are used in these topic names; however, if upgrading MirrorMaker 2 from older versions that did not allow for these topic names to be customized, it may be necessary to set this property to 'false' in order to continue using the same names for those topics.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    replication.policy.separator

    +

    Separator used in remote topic naming convention.

    + + + + + +
    Type:string
    Default:.
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.kinit.cmd

    +

    Kerberos kinit command path.

    + + + + + +
    Type:string
    Default:/usr/bin/kinit
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.min.time.before.relogin

    +

    Login thread sleep time between refresh attempts.

    + + + + + +
    Type:long
    Default:60000
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.ticket.renew.jitter

    +

    Percentage of random jitter added to the renewal time.

    + + + + + +
    Type:double
    Default:0.05
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.ticket.renew.window.factor

    +

    Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.

    + + + + + +
    Type:double
    Default:0.8
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.connect.timeout.ms

    +

    The (optional) value in milliseconds for the external authentication provider connection timeout. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.read.timeout.ms

    +

    The (optional) value in milliseconds for the external authentication provider read timeout. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.buffer.seconds

    +

    The amount of buffer time before credential expiration to maintain when refreshing a credential, in seconds. If a refresh would otherwise occur closer to expiration than the number of buffer seconds then the refresh will be moved up to maintain as much of the buffer time as possible. Legal values are between 0 and 3600 (1 hour); a default value of 300 (5 minutes) is used if no value is specified. This value and sasl.login.refresh.min.period.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:short
    Default:300
    Valid Values:[0,...,3600]
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.min.period.seconds

    +

    The desired minimum time for the login refresh thread to wait before refreshing a credential, in seconds. Legal values are between 0 and 900 (15 minutes); a default value of 60 (1 minute) is used if no value is specified. This value and sasl.login.refresh.buffer.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:short
    Default:60
    Valid Values:[0,...,900]
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.window.factor

    +

    Login refresh thread will sleep until the specified window factor relative to the credential's lifetime has been reached, at which time it will try to refresh the credential. Legal values are between 0.5 (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used if no value is specified. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:double
    Default:0.8
    Valid Values:[0.5,...,1.0]
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.window.jitter

    +

    The maximum amount of random jitter relative to the credential's lifetime that is added to the login refresh thread's sleep time. Legal values are between 0 and 0.25 (25%) inclusive; a default value of 0.05 (5%) is used if no value is specified. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:double
    Default:0.05
    Valid Values:[0.0,...,0.25]
    Importance:low
    +
  • +
  • +

    sasl.login.retry.backoff.max.ms

    +

    The (optional) value in milliseconds for the maximum wait between login attempts to the external authentication provider. Login uses an exponential backoff algorithm with an initial wait based on the sasl.login.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.login.retry.backoff.max.ms setting. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:long
    Default:10000 (10 seconds)
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.retry.backoff.ms

    +

    The (optional) value in milliseconds for the initial wait between login attempts to the external authentication provider. Login uses an exponential backoff algorithm with an initial wait based on the sasl.login.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.login.retry.backoff.max.ms setting. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:long
    Default:100
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.exp.seconds

    +

    The number of seconds in the future for which the JWT is valid. The value is used to determine the JWT exp (Expiration) claim based on the current system time when the JWT is created.

    The formula to generate the exp claim is very simple:

    Let:

    x = the current timestamp in seconds, on client
    y = the value of this configuration

    Then:

    exp = x + y

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:int
    Default:300
    Valid Values:[0,...,86400]
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.nbf.seconds

    +

    The number of seconds in the past from which the JWT is valid. The value is used to determine the JWT nbf (Not Before) claim based on the current system time when the JWT is created.

    The formula to generate the nbf claim is very simple:

    Let:

    x = the current timestamp in seconds, on client
    y = the value of this configuration

    Then:

    nbf = x - y

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:int
    Default:60
    Valid Values:[0,...,3600]
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.clock.skew.seconds

    +

    The (optional) value in seconds to allow for differences between the time of the OAuth/OIDC identity provider and the broker.

    + + + + + +
    Type:int
    Default:30
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.expected.audience

    +

    The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. The JWT will be inspected for the standard OAuth "aud" claim and if this value is set, the broker will match the value from JWT's "aud" claim to see if there is an exact match. If there is no match, the broker will reject the JWT and authentication will fail.

    + + + + + +
    Type:list
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.expected.issuer

    +

    The (optional) setting for the broker to use to verify that the JWT was created by the expected issuer. The JWT will be inspected for the standard OAuth "iss" claim and if this value is set, the broker will match it exactly against what is in the JWT's "iss" claim. If there is no match, the broker will reject the JWT and authentication will fail.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.header.urlencode

    +

    The (optional) setting to enable the OAuth client to URL-encode the client_id and client_secret in the authorization header in accordance with RFC6749, see here for more details. The default value is set to 'false' for backward compatibility

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.refresh.ms

    +

    The (optional) value in milliseconds for the broker to wait between refreshing its JWKS (JSON Web Key Set) cache that contains the keys to verify the signature of the JWT.

    + + + + + +
    Type:long
    Default:3600000 (1 hour)
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms

    +

    The (optional) value in milliseconds for the maximum wait between attempts to retrieve the JWKS (JSON Web Key Set) from the external authentication provider. JWKS retrieval uses an exponential backoff algorithm with an initial wait based on the sasl.oauthbearer.jwks.endpoint.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms setting.

    + + + + + +
    Type:long
    Default:10000 (10 seconds)
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.retry.backoff.ms

    +

    The (optional) value in milliseconds for the initial wait between JWKS (JSON Web Key Set) retrieval attempts from the external authentication provider. JWKS retrieval uses an exponential backoff algorithm with an initial wait based on the sasl.oauthbearer.jwks.endpoint.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms setting.

    + + + + + +
    Type:long
    Default:100
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.scope.claim.name

    +

    The OAuth claim for the scope is often named "scope", but this (optional) setting can provide a different name to use for the scope included in the JWT payload's claims if the OAuth/OIDC provider uses a different name for that claim.

    + + + + + +
    Type:string
    Default:scope
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.sub.claim.name

    +

    The OAuth claim for the subject is often named "sub", but this (optional) setting can provide a different name to use for the subject included in the JWT payload's claims if the OAuth/OIDC provider uses a different name for that claim.

    + + + + + +
    Type:string
    Default:sub
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.cipher.suites

    +

    A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. By default all the available cipher suites are supported.

    + + + + + +
    Type:list
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.endpoint.identification.algorithm

    +

    The endpoint identification algorithm to validate server hostname using server certificate.

    + + + + + +
    Type:string
    Default:https
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.engine.factory.class

    +

    The class of type org.apache.kafka.common.security.auth.SslEngineFactory to provide SSLEngine objects. Default value is org.apache.kafka.common.security.ssl.DefaultSslEngineFactory. Alternatively, setting this to org.apache.kafka.common.security.ssl.CommonNameLoggingSslEngineFactory will log the common name of expired SSL certificates used by clients to authenticate at any of the brokers with log level INFO. Note that this will cause a tiny delay during establishment of new connections from mTLS clients to brokers due to the extra code for examining the certificate chain provided by the client. Note further that the implementation uses a custom truststore based on the standard Java truststore and thus might be considered a security risk due to not being as mature as the standard one.

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.keymanager.algorithm

    +

    The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.

    + + + + + +
    Type:string
    Default:SunX509
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.secure.random.implementation

    +

    The SecureRandom PRNG implementation to use for SSL cryptography operations.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.trustmanager.algorithm

    +

    The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.

    + + + + + +
    Type:string
    Default:PKIX
    Valid Values:
    Importance:low
    +
  • +
  • +

    name

    +

    Globally unique name to use for this connector.

    + + + + + +
    Type:string
    Default:
    Valid Values:non-empty string without ISO control characters
    Importance:high
    +
  • +
  • +

    connector.class

    +

    Name or alias of the class for this connector. Must be a subclass of org.apache.kafka.connect.connector.Connector. If the connector is org.apache.kafka.connect.file.FileStreamSinkConnector, you can either specify this full name, or use "FileStreamSink" or "FileStreamSinkConnector" to make the configuration a bit shorter

    + + + + + +
    Type:string
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    connector.plugin.version

    +

    Version of the connector.

    + + + + + +
    Type:string
    Default:null
    Valid Values:org.apache.kafka.connect.runtime.ConnectorConfig$PluginVersionValidator@442d9b6e
    Importance:medium
    +
  • +
  • +

    tasks.max

    +

    Maximum number of tasks to use for this connector.

    + + + + + +
    Type:int
    Default:1
    Valid Values:[1,...]
    Importance:high
    +
  • +
  • +

    tasks.max.enforce

    +

    (Deprecated) Whether to enforce that the tasks.max property is respected by the connector. By default, connectors that generate too many tasks will fail, and existing sets of tasks that exceed the tasks.max property will also be failed. If this property is set to false, then connectors will be allowed to generate more than the maximum number of tasks, and existing sets of tasks that exceed the tasks.max property will be allowed to run. This property is deprecated and will be removed in an upcoming major release.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    key.converter

    +

    Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the keys in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro.

    + + + + + +
    Type:class
    Default:null
    Valid Values:A concrete subclass of org.apache.kafka.connect.storage.Converter, A class with a public, no-argument constructor
    Importance:low
    +
  • +
  • +

    key.converter.plugin.version

    +

    Version of the key converter.

    + + + + + +
    Type:string
    Default:null
    Valid Values:org.apache.kafka.connect.runtime.ConnectorConfig$PluginVersionValidator@ee7d9f1
    Importance:low
    +
  • +
  • +

    value.converter

    +

    Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the values in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro.

    + + + + + +
    Type:class
    Default:null
    Valid Values:A concrete subclass of org.apache.kafka.connect.storage.Converter, A class with a public, no-argument constructor
    Importance:low
    +
  • +
  • +

    value.converter.plugin.version

    +

    Version of the value converter.

    + + + + + +
    Type:string
    Default:null
    Valid Values:org.apache.kafka.connect.runtime.ConnectorConfig$PluginVersionValidator@15615099
    Importance:low
    +
  • +
  • +

    header.converter

    +

    HeaderConverter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the header values in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro. By default, the SimpleHeaderConverter is used to serialize header values to strings and deserialize them by inferring the schemas.

    + + + + + +
    Type:class
    Default:null
    Valid Values:A concrete subclass of org.apache.kafka.connect.storage.HeaderConverter, A class with a public, no-argument constructor
    Importance:low
    +
  • +
  • +

    header.converter.plugin.version

    +

    Version of the header converter.

    + + + + + +
    Type:string
    Default:null
    Valid Values:org.apache.kafka.connect.runtime.ConnectorConfig$PluginVersionValidator@1edf1c96
    Importance:low
    +
  • +
  • +

    config.action.reload

    +

    The action that Connect should take on the connector when changes in external configuration providers result in a change in the connector's configuration properties. A value of 'none' indicates that Connect will do nothing. A value of 'restart' indicates that Connect should restart/reload the connector with the updated configuration properties.The restart may actually be scheduled in the future if the external configuration provider indicates that a configuration value will expire in the future.

    + + + + + +
    Type:string
    Default:restart
    Valid Values:[none, restart]
    Importance:low
    +
  • +
  • +

    transforms

    +

    Aliases for the transformations to be applied to records.

    + + + + + +
    Type:list
    Default:""
    Valid Values:non-null string, unique transformation aliases
    Importance:low
    +
  • +
  • +

    predicates

    +

    Aliases for the predicates used by transformations.

    + + + + + +
    Type:list
    Default:""
    Valid Values:non-null string, unique predicate aliases
    Importance:low
    +
  • +
  • +

    errors.retry.timeout

    +

    The maximum duration in milliseconds that a failed operation will be reattempted. The default is 0, which means no retries will be attempted. Use -1 for infinite retries.

    + + + + + +
    Type:long
    Default:0
    Valid Values:
    Importance:medium
    +
  • +
  • +

    errors.retry.delay.max.ms

    +

    The maximum duration in milliseconds between consecutive retry attempts. Jitter will be added to the delay once this limit is reached to prevent thundering herd issues.

    + + + + + +
    Type:long
    Default:60000 (1 minute)
    Valid Values:
    Importance:medium
    +
  • +
  • +

    errors.tolerance

    +

    Behavior for tolerating errors during connector operation. 'none' is the default value and signals that any error will result in an immediate connector task failure; 'all' changes the behavior to skip over problematic records.

    + + + + + +
    Type:string
    Default:none
    Valid Values:[none, all]
    Importance:medium
    +
  • +
  • +

    errors.log.enable

    +

    If true, write each error and the details of the failed operation and problematic record to the Connect application log. This is 'false' by default, so that only errors that are not tolerated are reported.

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:medium
    +
  • +
  • +

    errors.log.include.messages

    +

    Whether to include in the log the Connect record that resulted in a failure. For sink records, the topic, partition, offset, and timestamp will be logged. For source records, the key and value (and their schemas), all headers, and the timestamp, Kafka topic, Kafka partition, source partition, and source offset will be logged. This is 'false' by default, which will prevent record keys, values, and headers from being written to log files.

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:medium
    +
  • +
+ diff --git a/static/41/generated/mirror_heartbeat_config.html b/static/41/generated/mirror_heartbeat_config.html new file mode 100644 index 000000000..f74447fee --- /dev/null +++ b/static/41/generated/mirror_heartbeat_config.html @@ -0,0 +1,33 @@ + + diff --git a/static/41/generated/mirror_source_config.html b/static/41/generated/mirror_source_config.html new file mode 100644 index 000000000..99301a033 --- /dev/null +++ b/static/41/generated/mirror_source_config.html @@ -0,0 +1,183 @@ +
    +
  • +

    config.properties.exclude

    +

    Topic config properties that should not be replicated. Supports comma-separated property names and regexes.

    + + + + + +
    Type:list
    Default:follower\.replication\.throttled\.replicas,leader\.replication\.throttled\.replicas,message\.timestamp\.difference\.max\.ms,message\.timestamp\.type,unclean\.leader\.election\.enable,min\.insync\.replicas
    Valid Values:
    Importance:high
    +
  • +
  • +

    topics

    +

    Topics to replicate. Supports comma-separated topic names and regexes.

    + + + + + +
    Type:list
    Default:.*
    Valid Values:
    Importance:high
    +
  • +
  • +

    topics.exclude

    +

    Excluded topics. Supports comma-separated topic names and regexes. Excludes take precedence over includes.

    + + + + + +
    Type:list
    Default:mm2.*\.internal,.*\.replica,__.*
    Valid Values:
    Importance:high
    +
  • +
  • +

    config.property.filter.class

    +

    ConfigPropertyFilter to use. Selects topic config properties to replicate.

    + + + + + +
    Type:class
    Default:org.apache.kafka.connect.mirror.DefaultConfigPropertyFilter
    Valid Values:
    Importance:low
    +
  • +
  • +

    consumer.poll.timeout.ms

    +

    Timeout when polling source cluster.

    + + + + + +
    Type:long
    Default:1000 (1 second)
    Valid Values:
    Importance:low
    +
  • +
  • +

    emit.offset-syncs.enabled

    +

    Whether to store the new offset of the replicated records in offset-syncs topic or not. MirrorCheckpointConnector will not be able to sync group offsets or emit checkpoints if emit.checkpoints.enabled and/or sync.group.offsets.enabled are enabled while emit.offset-syncs.enabled is disabled.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    heartbeats.replication.enabled

    +

    Whether to replicate the heartbeats topics even when the topic filter does not include them. If set to true, heartbeats topics identified by the replication policy will always be replicated, regardless of the topic filter configuration. If set to false, heartbeats topics will only be replicated if the topic filter allows.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    offset-syncs.topic.location

    +

    The location (source/target) of the offset-syncs topic.

    + + + + + +
    Type:string
    Default:source
    Valid Values:[source, target]
    Importance:low
    +
  • +
  • +

    offset-syncs.topic.replication.factor

    +

    Replication factor for offset-syncs topic.

    + + + + + +
    Type:short
    Default:3
    Valid Values:
    Importance:low
    +
  • +
  • +

    offset.lag.max

    +

    How out-of-sync a remote partition can be before it is resynced.

    + + + + + +
    Type:long
    Default:100
    Valid Values:
    Importance:low
    +
  • +
  • +

    refresh.topics.enabled

    +

    Whether to periodically check for new topics and partitions.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    refresh.topics.interval.seconds

    +

    Frequency of topic refresh.

    + + + + + +
    Type:long
    Default:600
    Valid Values:
    Importance:low
    +
  • +
  • +

    replication.factor

    +

    Replication factor for newly created remote topics.

    + + + + + +
    Type:int
    Default:2
    Valid Values:
    Importance:low
    +
  • +
  • +

    sync.topic.acls.enabled

    +

    Whether to periodically configure remote topic ACLs to match their corresponding upstream topics.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    sync.topic.acls.interval.seconds

    +

    Frequency of topic ACL sync.

    + + + + + +
    Type:long
    Default:600
    Valid Values:
    Importance:low
    +
  • +
  • +

    sync.topic.configs.enabled

    +

    Whether to periodically configure remote topics to match their corresponding upstream topics.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    sync.topic.configs.interval.seconds

    +

    Frequency of topic config sync.

    + + + + + +
    Type:long
    Default:600
    Valid Values:
    Importance:low
    +
  • +
  • +

    topic.filter.class

    +

    TopicFilter to use. Selects topics to replicate.

    + + + + + +
    Type:class
    Default:org.apache.kafka.connect.mirror.DefaultTopicFilter
    Valid Values:
    Importance:low
    +
  • +
+ diff --git a/static/41/generated/producer_config.html b/static/41/generated/producer_config.html new file mode 100644 index 000000000..1f763e0a2 --- /dev/null +++ b/static/41/generated/producer_config.html @@ -0,0 +1,1123 @@ +
    +
  • +

    key.serializer

    +

    Serializer class for key that implements the org.apache.kafka.common.serialization.Serializer interface.

    + + + + + +
    Type:class
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    value.serializer

    +

    Serializer class for value that implements the org.apache.kafka.common.serialization.Serializer interface.

    + + + + + +
    Type:class
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    bootstrap.servers

    +

    A list of host/port pairs used to establish the initial connection to the Kafka cluster. Clients use this list to bootstrap and discover the full set of Kafka brokers. While the order of servers in the list does not matter, we recommend including more than one server to ensure resilience if any servers are down. This list does not need to contain the entire set of brokers, as Kafka clients automatically manage and update connections to the cluster efficiently. This list must be in the form host1:port1,host2:port2,....

    + + + + + +
    Type:list
    Default:""
    Valid Values:non-null string
    Importance:high
    +
  • +
  • +

    buffer.memory

    +

    The total bytes of memory the producer can use to buffer records waiting to be sent to the server. If records are sent faster than they can be delivered to the server the producer will block for max.block.ms after which it will fail with an exception.

    This setting should correspond roughly to the total memory the producer will use, but is not a hard bound since not all memory the producer uses is used for buffering. Some additional memory will be used for compression (if compression is enabled) as well as for maintaining in-flight requests.

    + + + + + +
    Type:long
    Default:33554432
    Valid Values:[0,...]
    Importance:high
    +
  • +
  • +

    compression.type

    +

    The compression type for all data generated by the producer. The default is none (i.e. no compression). Valid values are none, gzip, snappy, lz4, or zstd. Compression is of full batches of data, so the efficacy of batching will also impact the compression ratio (more batching means better compression).

    + + + + + +
    Type:string
    Default:none
    Valid Values:[none, gzip, snappy, lz4, zstd]
    Importance:high
    +
  • +
  • +

    retries

    +

    Number of times to retry a request that fails with a transient error. Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. Requests will be retried this many times until they succeed, fail with a non-transient error, or the delivery.timeout.ms expires. Note that this automatic retry will simply resend the same record upon receiving the error. Setting a value of zero will disable this automatic retry behaviour, so that the transient errors will be propagated to the application to be handled. Users should generally prefer to leave this config unset and instead use delivery.timeout.ms to control retry behavior.

    Enabling idempotence requires this config value to be greater than 0. If conflicting configurations are set and idempotence is not explicitly enabled, idempotence is disabled.

    Allowing retries while setting enable.idempotence to false and max.in.flight.requests.per.connection to greater than 1 will potentially change the ordering of records because if two batches are sent to a single partition, and the first fails and is retried but the second succeeds, then the records in the second batch may appear first.

    + + + + + +
    Type:int
    Default:2147483647
    Valid Values:[0,...,2147483647]
    Importance:high
    +
  • +
  • +

    ssl.key.password

    +

    The password of the private key in the key store file or the PEM key specified in 'ssl.keystore.key'.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.certificate.chain

    +

    Certificate chain in the format specified by 'ssl.keystore.type'. Default SSL engine factory supports only PEM format with a list of X.509 certificates

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.key

    +

    Private key in the format specified by 'ssl.keystore.type'. Default SSL engine factory supports only PEM format with PKCS#8 keys. If the key is encrypted, key password must be specified using 'ssl.key.password'

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.location

    +

    The location of the key store file. This is optional for client and can be used for two-way authentication for client.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.password

    +

    The store password for the key store file. This is optional for client and only needed if 'ssl.keystore.location' is configured. Key store password is not supported for PEM format.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.truststore.certificates

    +

    Trusted certificates in the format specified by 'ssl.truststore.type'. Default SSL engine factory supports only PEM format with X.509 certificates.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.truststore.location

    +

    The location of the trust store file.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.truststore.password

    +

    The password for the trust store file. If a password is not set, trust store file configured will still be used, but integrity checking is disabled. Trust store password is not supported for PEM format.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    batch.size

    +

    The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. This helps performance on both the client and the server. This configuration controls the default batch size in bytes.

    No attempt will be made to batch records larger than this size.

    Requests sent to brokers will contain multiple batches, one for each partition with data available to be sent.

    A small batch size will make batching less common and may reduce throughput (a batch size of zero will disable batching entirely). A very large batch size may use memory a bit more wastefully as we will always allocate a buffer of the specified batch size in anticipation of additional records.

    Note: This setting gives the upper bound of the batch size to be sent. If we have fewer than this many bytes accumulated for this partition, we will 'linger' for the linger.ms time waiting for more records to show up. This linger.ms setting defaults to 5, which means the producer will wait for 5ms or until the record batch is of batch.size (whichever happens first) before sending the record batch. Note that broker backpressure can result in a higher effective linger time than this setting. The default changed from 0 to 5 in Apache Kafka 4.0 as the efficiency gains from larger batches typically result in similar or lower producer latency despite the increased linger.

    + + + + + +
    Type:int
    Default:16384
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    client.dns.lookup

    +

    Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again (both the JVM and the OS cache DNS name lookups, however). If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips.

    + + + + + +
    Type:string
    Default:use_all_dns_ips
    Valid Values:[use_all_dns_ips, resolve_canonical_bootstrap_servers_only]
    Importance:medium
    +
  • +
  • +

    client.id

    +

    An id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.

    + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:medium
    +
  • +
  • +

    compression.gzip.level

    +

    The compression level to use if compression.type is set to gzip.

    + + + + + +
    Type:int
    Default:-1
    Valid Values:[1,...,9] or -1
    Importance:medium
    +
  • +
  • +

    compression.lz4.level

    +

    The compression level to use if compression.type is set to lz4.

    + + + + + +
    Type:int
    Default:9
    Valid Values:[1,...,17]
    Importance:medium
    +
  • +
  • +

    compression.zstd.level

    +

    The compression level to use if compression.type is set to zstd.

    + + + + + +
    Type:int
    Default:3
    Valid Values:[-131072,...,22]
    Importance:medium
    +
  • +
  • +

    connections.max.idle.ms

    +

    Close idle connections after the number of milliseconds specified by this config.

    + + + + + +
    Type:long
    Default:540000 (9 minutes)
    Valid Values:
    Importance:medium
    +
  • +
  • +

    delivery.timeout.ms

    +

    An upper bound on the time to report success or failure after a call to send() returns. This limits the total time that a record will be delayed prior to sending, the time to await acknowledgement from the broker (if expected), and the time allowed for retriable send failures. The producer may report failure to send a record earlier than this config if either an unrecoverable error is encountered, the retries have been exhausted, or the record is added to a batch which reached an earlier delivery expiration deadline. The value of this config should be greater than or equal to the sum of request.timeout.ms and linger.ms.

    + + + + + +
    Type:int
    Default:120000 (2 minutes)
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    linger.ms

    +

    The producer groups together any records that arrive in between request transmissions into a single batched request. Normally this occurs only under load when records arrive faster than they can be sent out. However in some circumstances the client may want to reduce the number of requests even under moderate load. This setting accomplishes this by adding a small amount of artificial delay—that is, rather than immediately sending out a record, the producer will wait for up to the given delay to allow other records to be sent so that the sends can be batched together. This can be thought of as analogous to Nagle's algorithm in TCP. This setting gives the upper bound on the delay for batching: once we get batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if we have fewer than this many bytes accumulated for this partition we will 'linger' for the specified time waiting for more records to show up. This setting defaults to 5 (i.e. 5ms delay). Increasing linger.ms=50, for example, would have the effect of reducing the number of requests sent but would add up to 50ms of latency to records sent in the absence of load.The default changed from 0 to 5 in Apache Kafka 4.0 as the efficiency gains from larger batches typically result in similar or lower producer latency despite the increased linger.

    + + + + + +
    Type:long
    Default:5
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    max.block.ms

    +

    The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. For send() this timeout bounds the total time waiting for both metadata fetch and buffer allocation (blocking in the user-supplied serializers or partitioner is not counted against this timeout). For partitionsFor() this timeout bounds the time spent waiting for metadata if it is unavailable. The transaction-related methods always block, but may timeout if the transaction coordinator could not be discovered or did not respond within the timeout.

    + + + + + +
    Type:long
    Default:60000 (1 minute)
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    max.request.size

    +

    The maximum size of a request in bytes. This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. This is also effectively a cap on the maximum uncompressed record batch size. Note that the server has its own cap on the record batch size (after compression if compression is enabled) which may be different from this.

    + + + + + +
    Type:int
    Default:1048576
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    partitioner.class

    +

    Determines which partition to send a record to when records are produced. Available options are:

    • If not set, the default partitioning logic is used. This strategy send records to a partition until at least batch.size bytes is produced to the partition. It works with the strategy:
      1. If no partition is specified but a key is present, choose a partition based on a hash of the key.
      2. If no partition or key is present, choose the sticky partition that changes when at least batch.size bytes are produced to the partition.
    • org.apache.kafka.clients.producer.RoundRobinPartitioner: A partitioning strategy where each record in a series of consecutive records is sent to a different partition, regardless of whether the 'key' is provided or not, until partitions run out and the process starts over again. Note: There's a known issue that will cause uneven distribution when a new batch is created. See KAFKA-9965 for more detail.

    Implementing the org.apache.kafka.clients.producer.Partitioner interface allows you to plug in a custom partitioner.

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    partitioner.ignore.keys

    +

    When set to 'true' the producer won't use record keys to choose a partition. If 'false', producer would choose a partition based on a hash of the key when a key is present. Note: this setting has no effect if a custom partitioner is used.

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:medium
    +
  • +
  • +

    receive.buffer.bytes

    +

    The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.

    + + + + + +
    Type:int
    Default:32768 (32 kibibytes)
    Valid Values:[-1,...]
    Importance:medium
    +
  • +
  • +

    request.timeout.ms

    +

    The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. This should be larger than replica.lag.time.max.ms (a broker configuration) to reduce the possibility of message duplication due to unnecessary producer retries.

    + + + + + +
    Type:int
    Default:30000 (30 seconds)
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    sasl.client.callback.handler.class

    +

    The fully qualified name of a SASL client callback handler class that implements the AuthenticateCallbackHandler interface.

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.jaas.config

    +

    JAAS login context parameters for SASL connections in the format used by JAAS configuration files. JAAS configuration file format is described here. The format for the value is: loginModuleClass controlFlag (optionName=optionValue)*;. For brokers, the config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.jaas.config=com.example.ScramLoginModule required;

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.kerberos.service.name

    +

    The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.login.callback.handler.class

    +

    The fully qualified name of a SASL login callback handler class that implements the AuthenticateCallbackHandler interface. For brokers, login callback handler config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.login.class

    +

    The fully qualified name of a class that implements the Login interface. For brokers, login config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.mechanism

    +

    SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. GSSAPI is the default mechanism.

    + + + + + +
    Type:string
    Default:GSSAPI
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.algorithm

    +

    The algorithm the Apache Kafka client should use to sign the assertion sent to the identity provider. It is also used as the value of the OAuth alg (Algorithm) header in the JWT assertion.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:RS256
    Valid Values:(case insensitive) [ES256, RS256]
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.aud

    +

    The JWT aud (Audience) claim which will be included in the client JWT assertion created locally.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.iss

    +

    The value to be used as the iss (Issuer) claim which will be included in the client JWT assertion created locally.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.jti.include

    +

    Flag that determines if the JWT assertion should generate a unique ID for the JWT and include it in the jti (JWT ID) claim.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.sub

    +

    The value to be used as the sub (Subject) claim which will be included in the client JWT assertion created locally.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.file

    +

    File that contains a pre-generated JWT assertion.

    The underlying implementation caches the file contents to avoid the performance hit of loading the file on each access. The caching mechanism will detect whenthe file changes to allow for the file to be reloaded on modifications. This allows for "live" assertion rotation without restarting the Kafka client.

    The file contains the assertion in the serialized, three part JWT format:

    1. The header section is a base 64-encoded JWT header that contains values like alg (Algorithm), typ (Type, always the literal value JWT), etc.
    2. The payload section includes the base 64-encoded set of JWT claims, such as aud (Audience), iss (Issuer), sub (Subject), etc.
    3. The signature section is the concatenated header and payload sections that was signed using a private key

    See RFC 7519 and RFC 7515 for more details on the JWT and JWS formats.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, all other sasl.oauthbearer.assertion.* configurations are ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.private.key.file

    +

    File that contains a private key in the standard PEM format which is used to sign the JWT assertion sent to the identity provider.

    The underlying implementation caches the file contents to avoid the performance hit of loading the file on each access. The caching mechanism will detect when the file changes to allow for the file to be reloaded on modifications. This allows for "live" private key rotation without restarting the Kafka client.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.private.key.passphrase

    +

    The optional passphrase to decrypt the private key file specified by sasl.oauthbearer.assertion.private.key.file.

    Note: If the file referred to by sasl.oauthbearer.assertion.private.key.file is modified on the file system at runtime and it was created with a different passphrase than it was previously, the client will not be able to access the private key file because the passphrase is now out of date. For that reason, when using private key passphrases, either use the same passphrase each time, or—for improved security—restart the Kafka client using the new passphrase configuration.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.assertion.template.file

    +

    This optional configuration specifies the file containing the JWT headers and/or payload claims to be used when creating the JWT assertion.

    Not all identity providers require the same set of claims; some may require a given claim while others may prohibit it. In order to provide the most flexibility, this configuration allows the user to provide the static header values and claims that are to be included in the JWT.

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.client.credentials.client.id

    +

    The ID (defined in/by the OAuth identity provider) to identify the client requesting the token.

    The client ID was previously stored as part of the sasl.jaas.config configuration with the key clientId. For backward compatibility, the clientId JAAS option can still be used, but it is deprecated and will be removed in a future version.

    Order of precedence:

    • sasl.oauthbearer.client.credentials.client.id from configuration
    • clientId from JAAS

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.client.credentials.client.secret

    +

    The secret (defined by either the user or preassigned, depending on the identity provider) of the client requesting the token.

    The client secret was previously stored as part of the sasl.jaas.config configuration with the key clientSecret. For backward compatibility, the clientSecret JAAS option can still be used, but it is deprecated and will be removed in a future version.

    Order of precedence:

    • sasl.oauthbearer.client.credentials.client.secret from configuration
    • clientSecret from JAAS

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.url

    +

    The OAuth/OIDC provider URL from which the provider's JWKS (JSON Web Key Set) can be retrieved. The URL can be HTTP(S)-based or file-based. If the URL is HTTP(S)-based, the JWKS data will be retrieved from the OAuth/OIDC provider via the configured URL on broker startup. All then-current keys will be cached on the broker for incoming requests. If an authentication request is received for a JWT that includes a "kid" header claim value that isn't yet in the cache, the JWKS endpoint will be queried again on demand. However, the broker polls the URL every sasl.oauthbearer.jwks.endpoint.refresh.ms milliseconds to refresh the cache with any forthcoming keys before any JWT requests that include them are received. If the URL is file-based, the broker will load the JWKS file from a configured location on startup. In the event that the JWT includes a "kid" header value that isn't in the JWKS file, the broker will reject the JWT and authentication will fail.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.jwt.retriever.class

    +

    The fully-qualified class name of a JwtRetriever implementation used to request tokens from the identity provider.

    The default configuration value represents a class that maintains backward compatibility with previous versions of Apache Kafka. The default implementation uses the configuration to determine which concrete implementation to create.

    Other implementations that are provided include:

    • org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever
    • org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
    • org.apache.kafka.common.security.oauthbearer.FileJwtRetriever
    • org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever

    + + + + + +
    Type:class
    Default:org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.jwt.validator.class

    +

    The fully-qualified class name of a JwtValidator implementation used to validate the JWT from the identity provider.

    The default validator (org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator) maintains backward compatibility with previous versions of Apache Kafka. The default validator uses configuration to determine which concrete implementation to create.

    The built-in JwtValidator implementations are:

    • org.apache.kafka.common.security.oauthbearer.BrokerJwtValidator
    • org.apache.kafka.common.security.oauthbearer.ClientJwtValidator
    • org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator

    + + + + + +
    Type:class
    Default:org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.scope

    +

    This is the level of access a client application is granted to a resource or API which is included in the token request. If provided, it should match one or more scopes configured in the identity provider.

    The scope was previously stored as part of the sasl.jaas.config configuration with the key scope. For backward compatibility, the scope JAAS option can still be used, but it is deprecated and will be removed in a future version.

    Order of precedence:

    • sasl.oauthbearer.scope from configuration
    • scope from JAAS

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.oauthbearer.token.endpoint.url

    +

    The URL for the OAuth/OIDC identity provider. If the URL is HTTP(S)-based, it is the issuer's token endpoint URL to which requests will be made to login based on the configuration in sasl.oauthbearer.jwt.retriever.class. If the URL is file-based, it specifies a file containing an access token (in JWT serialized form) issued by the OAuth/OIDC identity provider to use for authorization.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    security.protocol

    +

    Protocol used to communicate with brokers.

    + + + + + +
    Type:string
    Default:PLAINTEXT
    Valid Values:(case insensitive) [SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT]
    Importance:medium
    +
  • +
  • +

    send.buffer.bytes

    +

    The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.

    + + + + + +
    Type:int
    Default:131072 (128 kibibytes)
    Valid Values:[-1,...]
    Importance:medium
    +
  • +
  • +

    socket.connection.setup.timeout.max.ms

    +

    The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. To avoid connection storms, a randomization factor of 0.2 will be applied to the timeout resulting in a random range between 20% below and 20% above the computed value.

    + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:
    Importance:medium
    +
  • +
  • +

    socket.connection.setup.timeout.ms

    +

    The amount of time the client will wait for the socket connection to be established. If the connection is not built before the timeout elapses, clients will close the socket channel. This value is the initial backoff value and will increase exponentially for each consecutive connection failure, up to the socket.connection.setup.timeout.max.ms value.

    + + + + + +
    Type:long
    Default:10000 (10 seconds)
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.enabled.protocols

    +

    The list of protocols enabled for SSL connections. The default is 'TLSv1.2,TLSv1.3'. This means that clients and servers will prefer TLSv1.3 if both support it and fallback to TLSv1.2 otherwise (assuming both support at least TLSv1.2). This default should be fine for most use cases. Also see the config documentation for `ssl.protocol` to understand how it can impact the TLS version negotiation behavior.

    + + + + + +
    Type:list
    Default:TLSv1.2,TLSv1.3
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.keystore.type

    +

    The file format of the key store file. This is optional for client. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM].

    + + + + + +
    Type:string
    Default:JKS
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.protocol

    +

    The SSL protocol used to generate the SSLContext. The default is 'TLSv1.3', which should be fine for most use cases. A typical alternative to the default is 'TLSv1.2'. Allowed values for this config are dependent on the JVM. Clients using the defaults for this config and 'ssl.enabled.protocols' will downgrade to 'TLSv1.2' if the server does not support 'TLSv1.3'. If this config is set to 'TLSv1.2', however, clients will not use 'TLSv1.3' even if it is one of the values in `ssl.enabled.protocols` and the server only supports 'TLSv1.3'.

    + + + + + +
    Type:string
    Default:TLSv1.3
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.provider

    +

    The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.truststore.type

    +

    The file format of the trust store file. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM].

    + + + + + +
    Type:string
    Default:JKS
    Valid Values:
    Importance:medium
    +
  • +
  • +

    acks

    +

    The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. The following settings are allowed:

    • acks=0 If set to zero then the producer will not wait for any acknowledgment from the server at all. The record will be immediately added to the socket buffer and considered sent. No guarantee can be made that the server has received the record in this case, and the retries configuration will not take effect (as the client won't generally know of any failures). The offset given back for each record will always be set to -1.
    • acks=1 This will mean the leader will write the record to its local log but will respond without awaiting full acknowledgement from all followers. In this case should the leader fail immediately after acknowledging the record but before the followers have replicated it then the record will be lost.
    • acks=all This means the leader will wait for the full set of in-sync replicas to acknowledge the record. This guarantees that the record will not be lost as long as at least one in-sync replica remains alive. This is the strongest available guarantee. This is equivalent to the acks=-1 setting.

    Note that enabling idempotence requires this config value to be 'all'. If conflicting configurations are set and idempotence is not explicitly enabled, idempotence is disabled.

    + + + + + +
    Type:string
    Default:all
    Valid Values:[all, -1, 0, 1]
    Importance:low
    +
  • +
  • +

    enable.idempotence

    +

    When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. Note that enabling idempotence requires max.in.flight.requests.per.connection to be less than or equal to 5 (with message ordering preserved for any allowable value), retries to be greater than 0, and acks must be 'all'.

    Idempotence is enabled by default if no conflicting configurations are set. If conflicting configurations are set and idempotence is not explicitly enabled, idempotence is disabled. If idempotence is explicitly enabled and conflicting configurations are set, a ConfigException is thrown.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    enable.metrics.push

    +

    Whether to enable pushing of client metrics to the cluster, if the cluster has a client metrics subscription which matches this client.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    interceptor.classes

    +

    A list of classes to use as interceptors. Implementing the org.apache.kafka.clients.producer.ProducerInterceptor interface allows you to intercept (and possibly mutate) the records received by the producer before they are published to the Kafka cluster. By default, there are no interceptors.

    + + + + + +
    Type:list
    Default:""
    Valid Values:non-null string
    Importance:low
    +
  • +
  • +

    max.in.flight.requests.per.connection

    +

    The maximum number of unacknowledged requests the client will send on a single connection before blocking. Note that if this configuration is set to be greater than 1 and enable.idempotence is set to false, there is a risk of message reordering after a failed send due to retries (i.e., if retries are enabled); if retries are disabled or if enable.idempotence is set to true, ordering will be preserved. Additionally, enabling idempotence requires the value of this configuration to be less than or equal to 5, because broker only retains at most 5 batches for each producer. If the value is more than 5, previous batches may be removed on broker side.

    + + + + + +
    Type:int
    Default:5
    Valid Values:[1,...]
    Importance:low
    +
  • +
  • +

    metadata.max.age.ms

    +

    The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.

    + + + + + +
    Type:long
    Default:300000 (5 minutes)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    metadata.max.idle.ms

    +

    Controls how long the producer will cache metadata for a topic that's idle. If the elapsed time since a topic was last produced to exceeds the metadata idle duration, then the topic's metadata is forgotten and the next access to it will force a metadata fetch request.

    + + + + + +
    Type:long
    Default:300000 (5 minutes)
    Valid Values:[5000,...]
    Importance:low
    +
  • +
  • +

    metadata.recovery.rebootstrap.trigger.ms

    +

    If a client configured to rebootstrap using metadata.recovery.strategy=rebootstrap is unable to obtain metadata from any of the brokers in the last known metadata for this interval, client repeats the bootstrap process using bootstrap.servers configuration.

    + + + + + +
    Type:long
    Default:300000 (5 minutes)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    metadata.recovery.strategy

    +

    Controls how the client recovers when none of the brokers known to it is available. If set to none, the client fails. If set to rebootstrap, the client repeats the bootstrap process using bootstrap.servers. Rebootstrapping is useful when a client communicates with brokers so infrequently that the set of brokers may change entirely before the client refreshes metadata. Metadata recovery is triggered when all last-known brokers appear unavailable simultaneously. Brokers appear unavailable when disconnected and no current retry attempt is in-progress. Consider increasing reconnect.backoff.ms and reconnect.backoff.max.ms and decreasing socket.connection.setup.timeout.ms and socket.connection.setup.timeout.max.ms for the client. Rebootstrap is also triggered if connection cannot be established to any of the brokers for metadata.recovery.rebootstrap.trigger.ms milliseconds or if server requests rebootstrap.

    + + + + + +
    Type:string
    Default:rebootstrap
    Valid Values:(case insensitive) [REBOOTSTRAP, NONE]
    Importance:low
    +
  • +
  • +

    metric.reporters

    +

    A list of classes to use as metrics reporters. Implementing the org.apache.kafka.common.metrics.MetricsReporter interface allows plugging in classes that will be notified of new metric creation. When custom reporters are set and org.apache.kafka.common.metrics.JmxReporter is needed, it has to be explicitly added to the list.

    + + + + + +
    Type:list
    Default:org.apache.kafka.common.metrics.JmxReporter
    Valid Values:non-null string
    Importance:low
    +
  • +
  • +

    metrics.num.samples

    +

    The number of samples maintained to compute metrics.

    + + + + + +
    Type:int
    Default:2
    Valid Values:[1,...]
    Importance:low
    +
  • +
  • +

    metrics.recording.level

    +

    The highest recording level for metrics. It has three levels for recording metrics - info, debug, and trace.

    INFO level records only essential metrics necessary for monitoring system performance and health. It collects vital data without gathering too much detail, making it suitable for production environments where minimal overhead is desired.

    DEBUG level records most metrics, providing more detailed information about the system's operation. It's useful for development and testing environments where you need deeper insights to debug and fine-tune the application.

    TRACE level records all possible metrics, capturing every detail about the system's performance and operation. It's best for controlled environments where in-depth analysis is required, though it can introduce significant overhead.

    + + + + + +
    Type:string
    Default:INFO
    Valid Values:[INFO, DEBUG, TRACE]
    Importance:low
    +
  • +
  • +

    metrics.sample.window.ms

    +

    The window of time a metrics sample is computed over.

    + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    partitioner.adaptive.partitioning.enable

    +

    When set to 'true', the producer will try to adapt to broker performance and produce more messages to partitions hosted on faster brokers. If 'false', the producer will try to distribute messages uniformly. Note: this setting has no effect if a custom partitioner is used.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    partitioner.availability.timeout.ms

    +

    If a broker cannot process produce requests from a partition for partitioner.availability.timeout.ms time, the partitioner treats that partition as not available. If the value is 0, this logic is disabled. Note: this setting has no effect if a custom partitioner is used or partitioner.adaptive.partitioning.enable is set to 'false'.

    + + + + + +
    Type:long
    Default:0
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    reconnect.backoff.max.ms

    +

    The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.

    + + + + + +
    Type:long
    Default:1000 (1 second)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    reconnect.backoff.ms

    +

    The base amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all connection attempts by the client to a broker. This value is the initial backoff value and will increase exponentially for each consecutive connection failure, up to the reconnect.backoff.max.ms value.

    + + + + + +
    Type:long
    Default:50
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    retry.backoff.max.ms

    +

    The maximum amount of time in milliseconds to wait when retrying a request to the broker that has repeatedly failed. If provided, the backoff per client will increase exponentially for each failed request, up to this maximum. To prevent all clients from being synchronized upon retry, a randomized jitter with a factor of 0.2 will be applied to the backoff, resulting in the backoff falling within a range between 20% below and 20% above the computed value. If retry.backoff.ms is set to be higher than retry.backoff.max.ms, then retry.backoff.max.ms will be used as a constant backoff from the beginning without any exponential increase

    + + + + + +
    Type:long
    Default:1000 (1 second)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    retry.backoff.ms

    +

    The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios. This value is the initial backoff value and will increase exponentially for each failed request, up to the retry.backoff.max.ms value.

    + + + + + +
    Type:long
    Default:100
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    sasl.kerberos.kinit.cmd

    +

    Kerberos kinit command path.

    + + + + + +
    Type:string
    Default:/usr/bin/kinit
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.min.time.before.relogin

    +

    Login thread sleep time between refresh attempts.

    + + + + + +
    Type:long
    Default:60000
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.ticket.renew.jitter

    +

    Percentage of random jitter added to the renewal time.

    + + + + + +
    Type:double
    Default:0.05
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.ticket.renew.window.factor

    +

    Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.

    + + + + + +
    Type:double
    Default:0.8
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.connect.timeout.ms

    +

    The (optional) value in milliseconds for the external authentication provider connection timeout. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.read.timeout.ms

    +

    The (optional) value in milliseconds for the external authentication provider read timeout. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.buffer.seconds

    +

    The amount of buffer time before credential expiration to maintain when refreshing a credential, in seconds. If a refresh would otherwise occur closer to expiration than the number of buffer seconds then the refresh will be moved up to maintain as much of the buffer time as possible. Legal values are between 0 and 3600 (1 hour); a default value of 300 (5 minutes) is used if no value is specified. This value and sasl.login.refresh.min.period.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:short
    Default:300
    Valid Values:[0,...,3600]
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.min.period.seconds

    +

    The desired minimum time for the login refresh thread to wait before refreshing a credential, in seconds. Legal values are between 0 and 900 (15 minutes); a default value of 60 (1 minute) is used if no value is specified. This value and sasl.login.refresh.buffer.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:short
    Default:60
    Valid Values:[0,...,900]
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.window.factor

    +

    Login refresh thread will sleep until the specified window factor relative to the credential's lifetime has been reached, at which time it will try to refresh the credential. Legal values are between 0.5 (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used if no value is specified. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:double
    Default:0.8
    Valid Values:[0.5,...,1.0]
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.window.jitter

    +

    The maximum amount of random jitter relative to the credential's lifetime that is added to the login refresh thread's sleep time. Legal values are between 0 and 0.25 (25%) inclusive; a default value of 0.05 (5%) is used if no value is specified. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:double
    Default:0.05
    Valid Values:[0.0,...,0.25]
    Importance:low
    +
  • +
  • +

    sasl.login.retry.backoff.max.ms

    +

    The (optional) value in milliseconds for the maximum wait between login attempts to the external authentication provider. Login uses an exponential backoff algorithm with an initial wait based on the sasl.login.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.login.retry.backoff.max.ms setting. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:long
    Default:10000 (10 seconds)
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.retry.backoff.ms

    +

    The (optional) value in milliseconds for the initial wait between login attempts to the external authentication provider. Login uses an exponential backoff algorithm with an initial wait based on the sasl.login.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.login.retry.backoff.max.ms setting. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:long
    Default:100
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.exp.seconds

    +

    The number of seconds in the future for which the JWT is valid. The value is used to determine the JWT exp (Expiration) claim based on the current system time when the JWT is created.

    The formula to generate the exp claim is very simple:

    Let:

    x = the current timestamp in seconds, on client
    y = the value of this configuration

    Then:

    exp = x + y

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:int
    Default:300
    Valid Values:[0,...,86400]
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.assertion.claim.nbf.seconds

    +

    The number of seconds in the past from which the JWT is valid. The value is used to determine the JWT nbf (Not Before) claim based on the current system time when the JWT is created.

    The formula to generate the nbf claim is very simple:

    Let:

    x = the current timestamp in seconds, on client
    y = the value of this configuration

    Then:

    nbf = x - y

    Note: If a value for sasl.oauthbearer.assertion.file is provided, this configuration will be ignored.

    + + + + + +
    Type:int
    Default:60
    Valid Values:[0,...,3600]
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.clock.skew.seconds

    +

    The (optional) value in seconds to allow for differences between the time of the OAuth/OIDC identity provider and the broker.

    + + + + + +
    Type:int
    Default:30
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.expected.audience

    +

    The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. The JWT will be inspected for the standard OAuth "aud" claim and if this value is set, the broker will match the value from JWT's "aud" claim to see if there is an exact match. If there is no match, the broker will reject the JWT and authentication will fail.

    + + + + + +
    Type:list
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.expected.issuer

    +

    The (optional) setting for the broker to use to verify that the JWT was created by the expected issuer. The JWT will be inspected for the standard OAuth "iss" claim and if this value is set, the broker will match it exactly against what is in the JWT's "iss" claim. If there is no match, the broker will reject the JWT and authentication will fail.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.header.urlencode

    +

    The (optional) setting to enable the OAuth client to URL-encode the client_id and client_secret in the authorization header in accordance with RFC6749, see here for more details. The default value is set to 'false' for backward compatibility

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.refresh.ms

    +

    The (optional) value in milliseconds for the broker to wait between refreshing its JWKS (JSON Web Key Set) cache that contains the keys to verify the signature of the JWT.

    + + + + + +
    Type:long
    Default:3600000 (1 hour)
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms

    +

    The (optional) value in milliseconds for the maximum wait between attempts to retrieve the JWKS (JSON Web Key Set) from the external authentication provider. JWKS retrieval uses an exponential backoff algorithm with an initial wait based on the sasl.oauthbearer.jwks.endpoint.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms setting.

    + + + + + +
    Type:long
    Default:10000 (10 seconds)
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.jwks.endpoint.retry.backoff.ms

    +

    The (optional) value in milliseconds for the initial wait between JWKS (JSON Web Key Set) retrieval attempts from the external authentication provider. JWKS retrieval uses an exponential backoff algorithm with an initial wait based on the sasl.oauthbearer.jwks.endpoint.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms setting.

    + + + + + +
    Type:long
    Default:100
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.scope.claim.name

    +

    The OAuth claim for the scope is often named "scope", but this (optional) setting can provide a different name to use for the scope included in the JWT payload's claims if the OAuth/OIDC provider uses a different name for that claim.

    + + + + + +
    Type:string
    Default:scope
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.oauthbearer.sub.claim.name

    +

    The OAuth claim for the subject is often named "sub", but this (optional) setting can provide a different name to use for the subject included in the JWT payload's claims if the OAuth/OIDC provider uses a different name for that claim.

    + + + + + +
    Type:string
    Default:sub
    Valid Values:
    Importance:low
    +
  • +
  • +

    security.providers

    +

    A list of configurable creator classes each returning a provider implementing security algorithms. These classes should implement the org.apache.kafka.common.security.auth.SecurityProviderCreator interface.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.cipher.suites

    +

    A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. By default all the available cipher suites are supported.

    + + + + + +
    Type:list
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.endpoint.identification.algorithm

    +

    The endpoint identification algorithm to validate server hostname using server certificate.

    + + + + + +
    Type:string
    Default:https
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.engine.factory.class

    +

    The class of type org.apache.kafka.common.security.auth.SslEngineFactory to provide SSLEngine objects. Default value is org.apache.kafka.common.security.ssl.DefaultSslEngineFactory. Alternatively, setting this to org.apache.kafka.common.security.ssl.CommonNameLoggingSslEngineFactory will log the common name of expired SSL certificates used by clients to authenticate at any of the brokers with log level INFO. Note that this will cause a tiny delay during establishment of new connections from mTLS clients to brokers due to the extra code for examining the certificate chain provided by the client. Note further that the implementation uses a custom truststore based on the standard Java truststore and thus might be considered a security risk due to not being as mature as the standard one.

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.keymanager.algorithm

    +

    The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.

    + + + + + +
    Type:string
    Default:SunX509
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.secure.random.implementation

    +

    The SecureRandom PRNG implementation to use for SSL cryptography operations.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.trustmanager.algorithm

    +

    The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.

    + + + + + +
    Type:string
    Default:PKIX
    Valid Values:
    Importance:low
    +
  • +
  • +

    transaction.timeout.ms

    +

    The maximum amount of time in milliseconds that a transaction will remain open before the coordinator proactively aborts it. The start of the transaction is set at the time that the first partition is added to it. If this value is larger than the transaction.max.timeout.ms setting in the broker, the request will fail with a InvalidTxnTimeoutException error.

    + + + + + +
    Type:int
    Default:60000 (1 minute)
    Valid Values:
    Importance:low
    +
  • +
  • +

    transaction.two.phase.commit.enable

    +

    If set to true, then the broker is informed that the client is participating in two phase commit protocol and transactions that this client starts never expire.

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:low
    +
  • +
  • +

    transactional.id

    +

    The TransactionalId to use for transactional delivery. This enables reliability semantics which span multiple producer sessions since it allows the client to guarantee that transactions using the same TransactionalId have been completed prior to starting any new transactions. If no TransactionalId is provided, then the producer is limited to idempotent delivery. If a TransactionalId is configured, enable.idempotence is implied. By default the TransactionId is not configured, which means transactions cannot be used. Note that, by default, transactions require a cluster of at least three brokers which is the recommended setting for production; for development you can change this, by adjusting broker setting transaction.state.log.replication.factor.

    + + + + + +
    Type:string
    Default:null
    Valid Values:non-empty string
    Importance:low
    +
  • +
+ diff --git a/static/41/generated/producer_metrics.html b/static/41/generated/producer_metrics.html new file mode 100644 index 000000000..b18f52e3f --- /dev/null +++ b/static/41/generated/producer_metrics.html @@ -0,0 +1,163 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Metric/Attribute nameDescriptionMbean name
batch-size-avgThe average number of bytes sent per partition per-request.kafka.producer:type=producer-metrics,client-id="{client-id}"
batch-size-maxThe max number of bytes sent per partition per-request.kafka.producer:type=producer-metrics,client-id="{client-id}"
batch-split-rateThe average number of batch splits per secondkafka.producer:type=producer-metrics,client-id="{client-id}"
batch-split-totalThe total number of batch splitskafka.producer:type=producer-metrics,client-id="{client-id}"
compression-rate-avgThe average compression rate of record batches, defined as the average ratio of the compressed batch size over the uncompressed size.kafka.producer:type=producer-metrics,client-id="{client-id}"
metadata-ageThe age in seconds of the current producer metadata being used.kafka.producer:type=producer-metrics,client-id="{client-id}"
produce-throttle-time-avgThe average time in ms a request was throttled by a brokerkafka.producer:type=producer-metrics,client-id="{client-id}"
produce-throttle-time-maxThe maximum time in ms a request was throttled by a brokerkafka.producer:type=producer-metrics,client-id="{client-id}"
record-error-rateThe average per-second number of record sends that resulted in errorskafka.producer:type=producer-metrics,client-id="{client-id}"
record-error-totalThe total number of record sends that resulted in errorskafka.producer:type=producer-metrics,client-id="{client-id}"
record-queue-time-avgThe average time in ms record batches spent in the send buffer.kafka.producer:type=producer-metrics,client-id="{client-id}"
record-queue-time-maxThe maximum time in ms record batches spent in the send buffer.kafka.producer:type=producer-metrics,client-id="{client-id}"
record-retry-rateThe average per-second number of retried record sendskafka.producer:type=producer-metrics,client-id="{client-id}"
record-retry-totalThe total number of retried record sendskafka.producer:type=producer-metrics,client-id="{client-id}"
record-send-rateThe average number of records sent per second.kafka.producer:type=producer-metrics,client-id="{client-id}"
record-send-totalThe total number of records sent.kafka.producer:type=producer-metrics,client-id="{client-id}"
record-size-avgThe average record sizekafka.producer:type=producer-metrics,client-id="{client-id}"
record-size-maxThe maximum record sizekafka.producer:type=producer-metrics,client-id="{client-id}"
records-per-request-avgThe average number of records per request.kafka.producer:type=producer-metrics,client-id="{client-id}"
request-latency-avgThe average request latency in mskafka.producer:type=producer-metrics,client-id="{client-id}"
request-latency-maxThe maximum request latency in mskafka.producer:type=producer-metrics,client-id="{client-id}"
requests-in-flightThe current number of in-flight requests awaiting a response.kafka.producer:type=producer-metrics,client-id="{client-id}"
byte-rateThe average number of bytes sent per second for a topic.kafka.producer:type=producer-topic-metrics,client-id="{client-id}",topic="{topic}"
byte-totalThe total number of bytes sent for a topic.kafka.producer:type=producer-topic-metrics,client-id="{client-id}",topic="{topic}"
compression-rateThe average compression rate of record batches for a topic, defined as the average ratio of the compressed batch size over the uncompressed size.kafka.producer:type=producer-topic-metrics,client-id="{client-id}",topic="{topic}"
record-error-rateThe average per-second number of record sends that resulted in errors for a topickafka.producer:type=producer-topic-metrics,client-id="{client-id}",topic="{topic}"
record-error-totalThe total number of record sends that resulted in errors for a topickafka.producer:type=producer-topic-metrics,client-id="{client-id}",topic="{topic}"
record-retry-rateThe average per-second number of retried record sends for a topickafka.producer:type=producer-topic-metrics,client-id="{client-id}",topic="{topic}"
record-retry-totalThe total number of retried record sends for a topickafka.producer:type=producer-topic-metrics,client-id="{client-id}",topic="{topic}"
record-send-rateThe average number of records sent per second for a topic.kafka.producer:type=producer-topic-metrics,client-id="{client-id}",topic="{topic}"
record-send-totalThe total number of records sent for a topic.kafka.producer:type=producer-topic-metrics,client-id="{client-id}",topic="{topic}"
diff --git a/static/41/generated/protocol_api_keys.html b/static/41/generated/protocol_api_keys.html new file mode 100644 index 000000000..91d42a6d8 --- /dev/null +++ b/static/41/generated/protocol_api_keys.html @@ -0,0 +1,155 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameKey
Produce0
Fetch1
ListOffsets2
Metadata3
OffsetCommit8
OffsetFetch9
FindCoordinator10
JoinGroup11
Heartbeat12
LeaveGroup13
SyncGroup14
DescribeGroups15
ListGroups16
SaslHandshake17
ApiVersions18
CreateTopics19
DeleteTopics20
DeleteRecords21
InitProducerId22
OffsetForLeaderEpoch23
AddPartitionsToTxn24
AddOffsetsToTxn25
EndTxn26
WriteTxnMarkers27
TxnOffsetCommit28
DescribeAcls29
CreateAcls30
DeleteAcls31
DescribeConfigs32
AlterConfigs33
AlterReplicaLogDirs34
DescribeLogDirs35
SaslAuthenticate36
CreatePartitions37
CreateDelegationToken38
RenewDelegationToken39
ExpireDelegationToken40
DescribeDelegationToken41
DeleteGroups42
ElectLeaders43
IncrementalAlterConfigs44
AlterPartitionReassignments45
ListPartitionReassignments46
OffsetDelete47
DescribeClientQuotas48
AlterClientQuotas49
DescribeUserScramCredentials50
AlterUserScramCredentials51
DescribeQuorum55
UpdateFeatures57
DescribeCluster60
DescribeProducers61
UnregisterBroker64
DescribeTransactions65
ListTransactions66
ConsumerGroupHeartbeat68
ConsumerGroupDescribe69
GetTelemetrySubscriptions71
PushTelemetry72
ListConfigResources74
DescribeTopicPartitions75
ShareGroupHeartbeat76
ShareGroupDescribe77
ShareFetch78
ShareAcknowledge79
AddRaftVoter80
RemoveRaftVoter81
InitializeShareGroupState83
ReadShareGroupState84
WriteShareGroupState85
DeleteShareGroupState86
ReadShareGroupStateSummary87
DescribeShareGroupOffsets90
AlterShareGroupOffsets91
DeleteShareGroupOffsets92
+ diff --git a/static/41/generated/protocol_errors.html b/static/41/generated/protocol_errors.html new file mode 100644 index 000000000..8b32b0f84 --- /dev/null +++ b/static/41/generated/protocol_errors.html @@ -0,0 +1,143 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ErrorCodeRetriableDescription
UNKNOWN_SERVER_ERROR-1FalseThe server experienced an unexpected error when processing the request.
NONE0False
OFFSET_OUT_OF_RANGE1FalseThe requested offset is not within the range of offsets maintained by the server.
CORRUPT_MESSAGE2TrueThis message has failed its CRC checksum, exceeds the valid size, has a null key for a compacted topic, or is otherwise corrupt.
UNKNOWN_TOPIC_OR_PARTITION3TrueThis server does not host this topic-partition.
INVALID_FETCH_SIZE4FalseThe requested fetch size is invalid.
LEADER_NOT_AVAILABLE5TrueThere is no leader for this topic-partition as we are in the middle of a leadership election.
NOT_LEADER_OR_FOLLOWER6TrueFor requests intended only for the leader, this error indicates that the broker is not the current leader. For requests intended for any replica, this error indicates that the broker is not a replica of the topic partition.
REQUEST_TIMED_OUT7TrueThe request timed out.
BROKER_NOT_AVAILABLE8FalseThe broker is not available.
REPLICA_NOT_AVAILABLE9TrueThe replica is not available for the requested topic-partition. Produce/Fetch requests and other requests intended only for the leader or follower return NOT_LEADER_OR_FOLLOWER if the broker is not a replica of the topic-partition.
MESSAGE_TOO_LARGE10FalseThe request included a message larger than the max message size the server will accept.
STALE_CONTROLLER_EPOCH11FalseThe controller moved to another broker.
OFFSET_METADATA_TOO_LARGE12FalseThe metadata field of the offset request was too large.
NETWORK_EXCEPTION13TrueThe server disconnected before a response was received.
COORDINATOR_LOAD_IN_PROGRESS14TrueThe coordinator is loading and hence can't process requests.
COORDINATOR_NOT_AVAILABLE15TrueThe coordinator is not available.
NOT_COORDINATOR16TrueThis is not the correct coordinator.
INVALID_TOPIC_EXCEPTION17FalseThe request attempted to perform an operation on an invalid topic.
RECORD_LIST_TOO_LARGE18FalseThe request included message batch larger than the configured segment size on the server.
NOT_ENOUGH_REPLICAS19TrueMessages are rejected since there are fewer in-sync replicas than required.
NOT_ENOUGH_REPLICAS_AFTER_APPEND20TrueMessages are written to the log, but to fewer in-sync replicas than required.
INVALID_REQUIRED_ACKS21FalseProduce request specified an invalid value for required acks.
ILLEGAL_GENERATION22FalseSpecified group generation id is not valid.
INCONSISTENT_GROUP_PROTOCOL23FalseThe group member's supported protocols are incompatible with those of existing members or first group member tried to join with empty protocol type or empty protocol list.
INVALID_GROUP_ID24FalseThe group id is invalid.
UNKNOWN_MEMBER_ID25FalseThe coordinator is not aware of this member.
INVALID_SESSION_TIMEOUT26FalseThe session timeout is not within the range allowed by the broker (as configured by group.min.session.timeout.ms and group.max.session.timeout.ms).
REBALANCE_IN_PROGRESS27FalseThe group is rebalancing, so a rejoin is needed.
INVALID_COMMIT_OFFSET_SIZE28FalseThe committing offset data size is not valid.
TOPIC_AUTHORIZATION_FAILED29FalseTopic authorization failed.
GROUP_AUTHORIZATION_FAILED30FalseGroup authorization failed.
CLUSTER_AUTHORIZATION_FAILED31FalseCluster authorization failed.
INVALID_TIMESTAMP32FalseThe timestamp of the message is out of acceptable range.
UNSUPPORTED_SASL_MECHANISM33FalseThe broker does not support the requested SASL mechanism.
ILLEGAL_SASL_STATE34FalseRequest is not valid given the current SASL state.
UNSUPPORTED_VERSION35FalseThe version of API is not supported.
TOPIC_ALREADY_EXISTS36FalseTopic with this name already exists.
INVALID_PARTITIONS37FalseNumber of partitions is below 1.
INVALID_REPLICATION_FACTOR38FalseReplication factor is below 1 or larger than the number of available brokers.
INVALID_REPLICA_ASSIGNMENT39FalseReplica assignment is invalid.
INVALID_CONFIG40FalseConfiguration is invalid.
NOT_CONTROLLER41TrueThis is not the correct controller for this cluster.
INVALID_REQUEST42FalseThis most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details.
UNSUPPORTED_FOR_MESSAGE_FORMAT43FalseThe message format version on the broker does not support the request.
POLICY_VIOLATION44FalseRequest parameters do not satisfy the configured policy.
OUT_OF_ORDER_SEQUENCE_NUMBER45FalseThe broker received an out of order sequence number.
DUPLICATE_SEQUENCE_NUMBER46FalseThe broker received a duplicate sequence number.
INVALID_PRODUCER_EPOCH47FalseProducer attempted to produce with an old epoch.
INVALID_TXN_STATE48FalseThe producer attempted a transactional operation in an invalid state.
INVALID_PRODUCER_ID_MAPPING49FalseThe producer attempted to use a producer id which is not currently assigned to its transactional id.
INVALID_TRANSACTION_TIMEOUT50FalseThe transaction timeout is larger than the maximum value allowed by the broker (as configured by transaction.max.timeout.ms).
CONCURRENT_TRANSACTIONS51TrueThe producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing.
TRANSACTION_COORDINATOR_FENCED52FalseIndicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer.
TRANSACTIONAL_ID_AUTHORIZATION_FAILED53FalseTransactional Id authorization failed.
SECURITY_DISABLED54FalseSecurity features are disabled.
OPERATION_NOT_ATTEMPTED55FalseThe broker did not attempt to execute this operation. This may happen for batched RPCs where some operations in the batch failed, causing the broker to respond without trying the rest.
KAFKA_STORAGE_ERROR56TrueDisk error when trying to access log file on the disk.
LOG_DIR_NOT_FOUND57FalseThe user-specified log directory is not found in the broker config.
SASL_AUTHENTICATION_FAILED58FalseSASL Authentication failed.
UNKNOWN_PRODUCER_ID59FalseThis exception is raised by the broker if it could not locate the producer metadata associated with the producerId in question. This could happen if, for instance, the producer's records were deleted because their retention time had elapsed. Once the last records of the producerId are removed, the producer's metadata is removed from the broker, and future appends by the producer will return this exception.
REASSIGNMENT_IN_PROGRESS60FalseA partition reassignment is in progress.
DELEGATION_TOKEN_AUTH_DISABLED61FalseDelegation Token feature is not enabled.
DELEGATION_TOKEN_NOT_FOUND62FalseDelegation Token is not found on server.
DELEGATION_TOKEN_OWNER_MISMATCH63FalseSpecified Principal is not valid Owner/Renewer.
DELEGATION_TOKEN_REQUEST_NOT_ALLOWED64FalseDelegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels.
DELEGATION_TOKEN_AUTHORIZATION_FAILED65FalseDelegation Token authorization failed.
DELEGATION_TOKEN_EXPIRED66FalseDelegation Token is expired.
INVALID_PRINCIPAL_TYPE67FalseSupplied principalType is not supported.
NON_EMPTY_GROUP68FalseThe group is not empty.
GROUP_ID_NOT_FOUND69FalseThe group id does not exist.
FETCH_SESSION_ID_NOT_FOUND70TrueThe fetch session ID was not found.
INVALID_FETCH_SESSION_EPOCH71TrueThe fetch session epoch is invalid.
LISTENER_NOT_FOUND72TrueThere is no listener on the leader broker that matches the listener on which metadata request was processed.
TOPIC_DELETION_DISABLED73FalseTopic deletion is disabled.
FENCED_LEADER_EPOCH74TrueThe leader epoch in the request is older than the epoch on the broker.
UNKNOWN_LEADER_EPOCH75TrueThe leader epoch in the request is newer than the epoch on the broker.
UNSUPPORTED_COMPRESSION_TYPE76FalseThe requesting client does not support the compression type of given partition.
STALE_BROKER_EPOCH77FalseBroker epoch has changed.
OFFSET_NOT_AVAILABLE78TrueThe leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing.
MEMBER_ID_REQUIRED79FalseThe group member needs to have a valid member id before actually entering a consumer group.
PREFERRED_LEADER_NOT_AVAILABLE80TrueThe preferred leader was not available.
GROUP_MAX_SIZE_REACHED81FalseThe group has reached its maximum size.
FENCED_INSTANCE_ID82FalseThe broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id.
ELIGIBLE_LEADERS_NOT_AVAILABLE83TrueEligible topic partition leaders are not available.
ELECTION_NOT_NEEDED84TrueLeader election not needed for topic partition.
NO_REASSIGNMENT_IN_PROGRESS85FalseNo partition reassignment is in progress.
GROUP_SUBSCRIBED_TO_TOPIC86FalseDeleting offsets of a topic is forbidden while the consumer group is actively subscribed to it.
INVALID_RECORD87FalseThis record has failed the validation on broker and hence will be rejected.
UNSTABLE_OFFSET_COMMIT88TrueThere are unstable offsets that need to be cleared.
THROTTLING_QUOTA_EXCEEDED89TrueThe throttling quota has been exceeded.
PRODUCER_FENCED90FalseThere is a newer producer with the same transactionalId which fences the current one.
RESOURCE_NOT_FOUND91FalseA request illegally referred to a resource that does not exist.
DUPLICATE_RESOURCE92FalseA request illegally referred to the same resource twice.
UNACCEPTABLE_CREDENTIAL93FalseRequested credential would not meet criteria for acceptability.
INCONSISTENT_VOTER_SET94FalseIndicates that the either the sender or recipient of a voter-only request is not one of the expected voters.
INVALID_UPDATE_VERSION95FalseThe given update version was invalid.
FEATURE_UPDATE_FAILED96FalseUnable to update finalized features due to an unexpected server error.
PRINCIPAL_DESERIALIZATION_FAILURE97FalseRequest principal deserialization failed during forwarding. This indicates an internal error on the broker cluster security setup.
SNAPSHOT_NOT_FOUND98FalseRequested snapshot was not found.
POSITION_OUT_OF_RANGE99FalseRequested position is not greater than or equal to zero, and less than the size of the snapshot.
UNKNOWN_TOPIC_ID100TrueThis server does not host this topic ID.
DUPLICATE_BROKER_REGISTRATION101FalseThis broker ID is already in use.
BROKER_ID_NOT_REGISTERED102FalseThe given broker ID was not registered.
INCONSISTENT_TOPIC_ID103TrueThe log's topic ID did not match the topic ID in the request.
INCONSISTENT_CLUSTER_ID104FalseThe clusterId in the request does not match that found on the server.
TRANSACTIONAL_ID_NOT_FOUND105FalseThe transactionalId could not be found.
FETCH_SESSION_TOPIC_ID_ERROR106TrueThe fetch session encountered inconsistent topic ID usage.
INELIGIBLE_REPLICA107FalseThe new ISR contains at least one ineligible replica.
NEW_LEADER_ELECTED108FalseThe AlterPartition request successfully updated the partition state but the leader has changed.
OFFSET_MOVED_TO_TIERED_STORAGE109FalseThe requested offset is moved to tiered storage.
FENCED_MEMBER_EPOCH110FalseThe member epoch is fenced by the group coordinator. The member must abandon all its partitions and rejoin.
UNRELEASED_INSTANCE_ID111FalseThe instance ID is still used by another member in the consumer group. That member must leave first.
UNSUPPORTED_ASSIGNOR112FalseThe assignor or its version range is not supported by the consumer group.
STALE_MEMBER_EPOCH113FalseThe member epoch is stale. The member must retry after receiving its updated member epoch via the ConsumerGroupHeartbeat API.
MISMATCHED_ENDPOINT_TYPE114FalseThe request was sent to an endpoint of the wrong type.
UNSUPPORTED_ENDPOINT_TYPE115FalseThis endpoint type is not supported yet.
UNKNOWN_CONTROLLER_ID116FalseThis controller ID is not known.
UNKNOWN_SUBSCRIPTION_ID117FalseClient sent a push telemetry request with an invalid or outdated subscription ID.
TELEMETRY_TOO_LARGE118FalseClient sent a push telemetry request larger than the maximum size the broker will accept.
INVALID_REGISTRATION119FalseThe controller has considered the broker registration to be invalid.
TRANSACTION_ABORTABLE120FalseThe server encountered an error with the transaction. The client can abort the transaction to continue using this transactional ID.
INVALID_RECORD_STATE121FalseThe record state is invalid. The acknowledgement of delivery could not be completed.
SHARE_SESSION_NOT_FOUND122TrueThe share session was not found.
INVALID_SHARE_SESSION_EPOCH123TrueThe share session epoch is invalid.
FENCED_STATE_EPOCH124FalseThe share coordinator rejected the request because the share-group state epoch did not match.
INVALID_VOTER_KEY125FalseThe voter key doesn't match the receiving replica's key.
DUPLICATE_VOTER126FalseThe voter is already part of the set of voters.
VOTER_NOT_FOUND127FalseThe voter is not part of the set of voters.
INVALID_REGULAR_EXPRESSION128FalseThe regular expression is not valid.
REBOOTSTRAP_REQUIRED129FalseClient metadata is stale. The client should rebootstrap to obtain new metadata.
STREAMS_INVALID_TOPOLOGY130FalseThe supplied topology is invalid.
STREAMS_INVALID_TOPOLOGY_EPOCH131FalseThe supplied topology epoch is invalid.
STREAMS_TOPOLOGY_FENCED132FalseThe supplied topology epoch is outdated.
SHARE_SESSION_LIMIT_REACHED133TrueThe limit of share sessions has been reached.
+ diff --git a/static/41/generated/protocol_messages.html b/static/41/generated/protocol_messages.html new file mode 100644 index 000000000..fe19b7663 --- /dev/null +++ b/static/41/generated/protocol_messages.html @@ -0,0 +1,18692 @@ +
Headers:
+
Request Header v1 => request_api_key request_api_version correlation_id client_id 
+  request_api_key => INT16
+  request_api_version => INT16
+  correlation_id => INT32
+  client_id => NULLABLE_STRING
+
+ + + + + + + + + + + +
FieldDescription
request_api_keyThe API key of this request.
request_api_versionThe API version of this request.
correlation_idThe correlation ID of this request.
client_idThe client ID string.
+
Request Header v2 => request_api_key request_api_version correlation_id client_id _tagged_fields 
+  request_api_key => INT16
+  request_api_version => INT16
+  correlation_id => INT32
+  client_id => NULLABLE_STRING
+
+ + + + + + + + + + + + + +
FieldDescription
request_api_keyThe API key of this request.
request_api_versionThe API version of this request.
correlation_idThe correlation ID of this request.
client_idThe client ID string.
_tagged_fieldsThe tagged fields
+
Response Header v0 => correlation_id 
+  correlation_id => INT32
+
+ + + + + +
FieldDescription
correlation_idThe correlation ID of this response.
+
Response Header v1 => correlation_id _tagged_fields 
+  correlation_id => INT32
+
+ + + + + + + +
FieldDescription
correlation_idThe correlation ID of this response.
_tagged_fieldsThe tagged fields
+
Produce API (Key: 0):
+ +Requests:
+
Produce Request (Version: 3) => transactional_id acks timeout_ms [topic_data] 
+  transactional_id => NULLABLE_STRING
+  acks => INT16
+  timeout_ms => INT32
+  topic_data => name [partition_data] 
+    name => STRING
+    partition_data => index records 
+      index => INT32
+      records => RECORDS
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
transactional_idThe transactional ID, or null if the producer is not transactional.
acksThe number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.
timeout_msThe timeout to await a response in milliseconds.
topic_dataEach topic to produce to.
nameThe topic name.
partition_dataEach partition to produce to.
indexThe partition index.
recordsThe record data to be produced.
+
+
Produce Request (Version: 4) => transactional_id acks timeout_ms [topic_data] 
+  transactional_id => NULLABLE_STRING
+  acks => INT16
+  timeout_ms => INT32
+  topic_data => name [partition_data] 
+    name => STRING
+    partition_data => index records 
+      index => INT32
+      records => RECORDS
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
transactional_idThe transactional ID, or null if the producer is not transactional.
acksThe number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.
timeout_msThe timeout to await a response in milliseconds.
topic_dataEach topic to produce to.
nameThe topic name.
partition_dataEach partition to produce to.
indexThe partition index.
recordsThe record data to be produced.
+
+
Produce Request (Version: 5) => transactional_id acks timeout_ms [topic_data] 
+  transactional_id => NULLABLE_STRING
+  acks => INT16
+  timeout_ms => INT32
+  topic_data => name [partition_data] 
+    name => STRING
+    partition_data => index records 
+      index => INT32
+      records => RECORDS
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
transactional_idThe transactional ID, or null if the producer is not transactional.
acksThe number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.
timeout_msThe timeout to await a response in milliseconds.
topic_dataEach topic to produce to.
nameThe topic name.
partition_dataEach partition to produce to.
indexThe partition index.
recordsThe record data to be produced.
+
+
Produce Request (Version: 6) => transactional_id acks timeout_ms [topic_data] 
+  transactional_id => NULLABLE_STRING
+  acks => INT16
+  timeout_ms => INT32
+  topic_data => name [partition_data] 
+    name => STRING
+    partition_data => index records 
+      index => INT32
+      records => RECORDS
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
transactional_idThe transactional ID, or null if the producer is not transactional.
acksThe number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.
timeout_msThe timeout to await a response in milliseconds.
topic_dataEach topic to produce to.
nameThe topic name.
partition_dataEach partition to produce to.
indexThe partition index.
recordsThe record data to be produced.
+
+
Produce Request (Version: 7) => transactional_id acks timeout_ms [topic_data] 
+  transactional_id => NULLABLE_STRING
+  acks => INT16
+  timeout_ms => INT32
+  topic_data => name [partition_data] 
+    name => STRING
+    partition_data => index records 
+      index => INT32
+      records => RECORDS
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
transactional_idThe transactional ID, or null if the producer is not transactional.
acksThe number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.
timeout_msThe timeout to await a response in milliseconds.
topic_dataEach topic to produce to.
nameThe topic name.
partition_dataEach partition to produce to.
indexThe partition index.
recordsThe record data to be produced.
+
+
Produce Request (Version: 8) => transactional_id acks timeout_ms [topic_data] 
+  transactional_id => NULLABLE_STRING
+  acks => INT16
+  timeout_ms => INT32
+  topic_data => name [partition_data] 
+    name => STRING
+    partition_data => index records 
+      index => INT32
+      records => RECORDS
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
transactional_idThe transactional ID, or null if the producer is not transactional.
acksThe number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.
timeout_msThe timeout to await a response in milliseconds.
topic_dataEach topic to produce to.
nameThe topic name.
partition_dataEach partition to produce to.
indexThe partition index.
recordsThe record data to be produced.
+
+
Produce Request (Version: 9) => transactional_id acks timeout_ms [topic_data] _tagged_fields 
+  transactional_id => COMPACT_NULLABLE_STRING
+  acks => INT16
+  timeout_ms => INT32
+  topic_data => name [partition_data] _tagged_fields 
+    name => COMPACT_STRING
+    partition_data => index records _tagged_fields 
+      index => INT32
+      records => COMPACT_RECORDS
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
transactional_idThe transactional ID, or null if the producer is not transactional.
acksThe number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.
timeout_msThe timeout to await a response in milliseconds.
topic_dataEach topic to produce to.
nameThe topic name.
partition_dataEach partition to produce to.
indexThe partition index.
recordsThe record data to be produced.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
Produce Request (Version: 10) => transactional_id acks timeout_ms [topic_data] _tagged_fields 
+  transactional_id => COMPACT_NULLABLE_STRING
+  acks => INT16
+  timeout_ms => INT32
+  topic_data => name [partition_data] _tagged_fields 
+    name => COMPACT_STRING
+    partition_data => index records _tagged_fields 
+      index => INT32
+      records => COMPACT_RECORDS
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
transactional_idThe transactional ID, or null if the producer is not transactional.
acksThe number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.
timeout_msThe timeout to await a response in milliseconds.
topic_dataEach topic to produce to.
nameThe topic name.
partition_dataEach partition to produce to.
indexThe partition index.
recordsThe record data to be produced.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
Produce Request (Version: 11) => transactional_id acks timeout_ms [topic_data] _tagged_fields 
+  transactional_id => COMPACT_NULLABLE_STRING
+  acks => INT16
+  timeout_ms => INT32
+  topic_data => name [partition_data] _tagged_fields 
+    name => COMPACT_STRING
+    partition_data => index records _tagged_fields 
+      index => INT32
+      records => COMPACT_RECORDS
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
transactional_idThe transactional ID, or null if the producer is not transactional.
acksThe number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.
timeout_msThe timeout to await a response in milliseconds.
topic_dataEach topic to produce to.
nameThe topic name.
partition_dataEach partition to produce to.
indexThe partition index.
recordsThe record data to be produced.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
Produce Request (Version: 12) => transactional_id acks timeout_ms [topic_data] _tagged_fields 
+  transactional_id => COMPACT_NULLABLE_STRING
+  acks => INT16
+  timeout_ms => INT32
+  topic_data => name [partition_data] _tagged_fields 
+    name => COMPACT_STRING
+    partition_data => index records _tagged_fields 
+      index => INT32
+      records => COMPACT_RECORDS
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
transactional_idThe transactional ID, or null if the producer is not transactional.
acksThe number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.
timeout_msThe timeout to await a response in milliseconds.
topic_dataEach topic to produce to.
nameThe topic name.
partition_dataEach partition to produce to.
indexThe partition index.
recordsThe record data to be produced.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
Produce Request (Version: 13) => transactional_id acks timeout_ms [topic_data] _tagged_fields 
+  transactional_id => COMPACT_NULLABLE_STRING
+  acks => INT16
+  timeout_ms => INT32
+  topic_data => topic_id [partition_data] _tagged_fields 
+    topic_id => UUID
+    partition_data => index records _tagged_fields 
+      index => INT32
+      records => COMPACT_RECORDS
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
transactional_idThe transactional ID, or null if the producer is not transactional.
acksThe number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.
timeout_msThe timeout to await a response in milliseconds.
topic_dataEach topic to produce to.
topic_idThe unique topic ID
partition_dataEach partition to produce to.
indexThe partition index.
recordsThe record data to be produced.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
Produce Response (Version: 3) => [responses] throttle_time_ms 
+  responses => name [partition_responses] 
+    name => STRING
+    partition_responses => index error_code base_offset log_append_time_ms 
+      index => INT32
+      error_code => INT16
+      base_offset => INT64
+      log_append_time_ms => INT64
+  throttle_time_ms => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
responsesEach produce response.
nameThe topic name.
partition_responsesEach partition that we produced to within the topic.
indexThe partition index.
error_codeThe error code, or 0 if there was no error.
base_offsetThe base offset.
log_append_time_msThe timestamp returned by broker after appending the messages. If CreateTime is used for the topic, the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp will be the broker local time when the messages are appended.
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+
+
Produce Response (Version: 4) => [responses] throttle_time_ms 
+  responses => name [partition_responses] 
+    name => STRING
+    partition_responses => index error_code base_offset log_append_time_ms 
+      index => INT32
+      error_code => INT16
+      base_offset => INT64
+      log_append_time_ms => INT64
+  throttle_time_ms => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
responsesEach produce response.
nameThe topic name.
partition_responsesEach partition that we produced to within the topic.
indexThe partition index.
error_codeThe error code, or 0 if there was no error.
base_offsetThe base offset.
log_append_time_msThe timestamp returned by broker after appending the messages. If CreateTime is used for the topic, the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp will be the broker local time when the messages are appended.
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+
+
Produce Response (Version: 5) => [responses] throttle_time_ms 
+  responses => name [partition_responses] 
+    name => STRING
+    partition_responses => index error_code base_offset log_append_time_ms log_start_offset 
+      index => INT32
+      error_code => INT16
+      base_offset => INT64
+      log_append_time_ms => INT64
+      log_start_offset => INT64
+  throttle_time_ms => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
responsesEach produce response.
nameThe topic name.
partition_responsesEach partition that we produced to within the topic.
indexThe partition index.
error_codeThe error code, or 0 if there was no error.
base_offsetThe base offset.
log_append_time_msThe timestamp returned by broker after appending the messages. If CreateTime is used for the topic, the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp will be the broker local time when the messages are appended.
log_start_offsetThe log start offset.
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+
+
Produce Response (Version: 6) => [responses] throttle_time_ms 
+  responses => name [partition_responses] 
+    name => STRING
+    partition_responses => index error_code base_offset log_append_time_ms log_start_offset 
+      index => INT32
+      error_code => INT16
+      base_offset => INT64
+      log_append_time_ms => INT64
+      log_start_offset => INT64
+  throttle_time_ms => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
responsesEach produce response.
nameThe topic name.
partition_responsesEach partition that we produced to within the topic.
indexThe partition index.
error_codeThe error code, or 0 if there was no error.
base_offsetThe base offset.
log_append_time_msThe timestamp returned by broker after appending the messages. If CreateTime is used for the topic, the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp will be the broker local time when the messages are appended.
log_start_offsetThe log start offset.
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+
+
Produce Response (Version: 7) => [responses] throttle_time_ms 
+  responses => name [partition_responses] 
+    name => STRING
+    partition_responses => index error_code base_offset log_append_time_ms log_start_offset 
+      index => INT32
+      error_code => INT16
+      base_offset => INT64
+      log_append_time_ms => INT64
+      log_start_offset => INT64
+  throttle_time_ms => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
responsesEach produce response.
nameThe topic name.
partition_responsesEach partition that we produced to within the topic.
indexThe partition index.
error_codeThe error code, or 0 if there was no error.
base_offsetThe base offset.
log_append_time_msThe timestamp returned by broker after appending the messages. If CreateTime is used for the topic, the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp will be the broker local time when the messages are appended.
log_start_offsetThe log start offset.
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+
+
Produce Response (Version: 8) => [responses] throttle_time_ms 
+  responses => name [partition_responses] 
+    name => STRING
+    partition_responses => index error_code base_offset log_append_time_ms log_start_offset [record_errors] error_message 
+      index => INT32
+      error_code => INT16
+      base_offset => INT64
+      log_append_time_ms => INT64
+      log_start_offset => INT64
+      record_errors => batch_index batch_index_error_message 
+        batch_index => INT32
+        batch_index_error_message => NULLABLE_STRING
+      error_message => NULLABLE_STRING
+  throttle_time_ms => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
responsesEach produce response.
nameThe topic name.
partition_responsesEach partition that we produced to within the topic.
indexThe partition index.
error_codeThe error code, or 0 if there was no error.
base_offsetThe base offset.
log_append_time_msThe timestamp returned by broker after appending the messages. If CreateTime is used for the topic, the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp will be the broker local time when the messages are appended.
log_start_offsetThe log start offset.
record_errorsThe batch indices of records that caused the batch to be dropped.
batch_indexThe batch index of the record that caused the batch to be dropped.
batch_index_error_messageThe error message of the record that caused the batch to be dropped.
error_messageThe global error message summarizing the common root cause of the records that caused the batch to be dropped.
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+
+
Produce Response (Version: 9) => [responses] throttle_time_ms _tagged_fields 
+  responses => name [partition_responses] _tagged_fields 
+    name => COMPACT_STRING
+    partition_responses => index error_code base_offset log_append_time_ms log_start_offset [record_errors] error_message _tagged_fields 
+      index => INT32
+      error_code => INT16
+      base_offset => INT64
+      log_append_time_ms => INT64
+      log_start_offset => INT64
+      record_errors => batch_index batch_index_error_message _tagged_fields 
+        batch_index => INT32
+        batch_index_error_message => COMPACT_NULLABLE_STRING
+      error_message => COMPACT_NULLABLE_STRING
+  throttle_time_ms => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
responsesEach produce response.
nameThe topic name.
partition_responsesEach partition that we produced to within the topic.
indexThe partition index.
error_codeThe error code, or 0 if there was no error.
base_offsetThe base offset.
log_append_time_msThe timestamp returned by broker after appending the messages. If CreateTime is used for the topic, the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp will be the broker local time when the messages are appended.
log_start_offsetThe log start offset.
record_errorsThe batch indices of records that caused the batch to be dropped.
batch_indexThe batch index of the record that caused the batch to be dropped.
batch_index_error_messageThe error message of the record that caused the batch to be dropped.
_tagged_fieldsThe tagged fields
error_messageThe global error message summarizing the common root cause of the records that caused the batch to be dropped.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
_tagged_fieldsThe tagged fields
+
+
Produce Response (Version: 10) => [responses] throttle_time_ms _tagged_fields 
+  responses => name [partition_responses] _tagged_fields 
+    name => COMPACT_STRING
+    partition_responses => index error_code base_offset log_append_time_ms log_start_offset [record_errors] error_message _tagged_fields 
+      index => INT32
+      error_code => INT16
+      base_offset => INT64
+      log_append_time_ms => INT64
+      log_start_offset => INT64
+      record_errors => batch_index batch_index_error_message _tagged_fields 
+        batch_index => INT32
+        batch_index_error_message => COMPACT_NULLABLE_STRING
+      error_message => COMPACT_NULLABLE_STRING
+  throttle_time_ms => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
responsesEach produce response.
nameThe topic name.
partition_responsesEach partition that we produced to within the topic.
indexThe partition index.
error_codeThe error code, or 0 if there was no error.
base_offsetThe base offset.
log_append_time_msThe timestamp returned by broker after appending the messages. If CreateTime is used for the topic, the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp will be the broker local time when the messages are appended.
log_start_offsetThe log start offset.
record_errorsThe batch indices of records that caused the batch to be dropped.
batch_indexThe batch index of the record that caused the batch to be dropped.
batch_index_error_messageThe error message of the record that caused the batch to be dropped.
_tagged_fieldsThe tagged fields
error_messageThe global error message summarizing the common root cause of the records that caused the batch to be dropped.
_tagged_fields + + + + + +
TagTagged fieldDescription
0current_leaderThe leader broker that the producer should use for future requests. + + + + + + + + +
FieldDescription
leader_idThe ID of the current leader or -1 if the leader is unknown.
leader_epochThe latest known leader epoch.
_tagged_fieldsThe tagged fields
+
+
_tagged_fieldsThe tagged fields
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
_tagged_fields + + + + + +
TagTagged fieldDescription
0node_endpointsEndpoints for all current-leaders enumerated in PartitionProduceResponses, with errors NOT_LEADER_OR_FOLLOWER. + + + + + + + + + + + + +
FieldDescription
node_idThe ID of the associated node.
hostThe node's hostname.
portThe node's port.
rackThe rack of the node, or null if it has not been assigned to a rack.
_tagged_fieldsThe tagged fields
+
+
+
+
Produce Response (Version: 11) => [responses] throttle_time_ms _tagged_fields 
+  responses => name [partition_responses] _tagged_fields 
+    name => COMPACT_STRING
+    partition_responses => index error_code base_offset log_append_time_ms log_start_offset [record_errors] error_message _tagged_fields 
+      index => INT32
+      error_code => INT16
+      base_offset => INT64
+      log_append_time_ms => INT64
+      log_start_offset => INT64
+      record_errors => batch_index batch_index_error_message _tagged_fields 
+        batch_index => INT32
+        batch_index_error_message => COMPACT_NULLABLE_STRING
+      error_message => COMPACT_NULLABLE_STRING
+  throttle_time_ms => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
responsesEach produce response.
nameThe topic name.
partition_responsesEach partition that we produced to within the topic.
indexThe partition index.
error_codeThe error code, or 0 if there was no error.
base_offsetThe base offset.
log_append_time_msThe timestamp returned by broker after appending the messages. If CreateTime is used for the topic, the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp will be the broker local time when the messages are appended.
log_start_offsetThe log start offset.
record_errorsThe batch indices of records that caused the batch to be dropped.
batch_indexThe batch index of the record that caused the batch to be dropped.
batch_index_error_messageThe error message of the record that caused the batch to be dropped.
_tagged_fieldsThe tagged fields
error_messageThe global error message summarizing the common root cause of the records that caused the batch to be dropped.
_tagged_fields + + + + + +
TagTagged fieldDescription
0current_leaderThe leader broker that the producer should use for future requests. + + + + + + + + +
FieldDescription
leader_idThe ID of the current leader or -1 if the leader is unknown.
leader_epochThe latest known leader epoch.
_tagged_fieldsThe tagged fields
+
+
_tagged_fieldsThe tagged fields
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
_tagged_fields + + + + + +
TagTagged fieldDescription
0node_endpointsEndpoints for all current-leaders enumerated in PartitionProduceResponses, with errors NOT_LEADER_OR_FOLLOWER. + + + + + + + + + + + + +
FieldDescription
node_idThe ID of the associated node.
hostThe node's hostname.
portThe node's port.
rackThe rack of the node, or null if it has not been assigned to a rack.
_tagged_fieldsThe tagged fields
+
+
+
+
Produce Response (Version: 12) => [responses] throttle_time_ms _tagged_fields 
+  responses => name [partition_responses] _tagged_fields 
+    name => COMPACT_STRING
+    partition_responses => index error_code base_offset log_append_time_ms log_start_offset [record_errors] error_message _tagged_fields 
+      index => INT32
+      error_code => INT16
+      base_offset => INT64
+      log_append_time_ms => INT64
+      log_start_offset => INT64
+      record_errors => batch_index batch_index_error_message _tagged_fields 
+        batch_index => INT32
+        batch_index_error_message => COMPACT_NULLABLE_STRING
+      error_message => COMPACT_NULLABLE_STRING
+  throttle_time_ms => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
responsesEach produce response.
nameThe topic name.
partition_responsesEach partition that we produced to within the topic.
indexThe partition index.
error_codeThe error code, or 0 if there was no error.
base_offsetThe base offset.
log_append_time_msThe timestamp returned by broker after appending the messages. If CreateTime is used for the topic, the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp will be the broker local time when the messages are appended.
log_start_offsetThe log start offset.
record_errorsThe batch indices of records that caused the batch to be dropped.
batch_indexThe batch index of the record that caused the batch to be dropped.
batch_index_error_messageThe error message of the record that caused the batch to be dropped.
_tagged_fieldsThe tagged fields
error_messageThe global error message summarizing the common root cause of the records that caused the batch to be dropped.
_tagged_fields + + + + + +
TagTagged fieldDescription
0current_leaderThe leader broker that the producer should use for future requests. + + + + + + + + +
FieldDescription
leader_idThe ID of the current leader or -1 if the leader is unknown.
leader_epochThe latest known leader epoch.
_tagged_fieldsThe tagged fields
+
+
_tagged_fieldsThe tagged fields
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
_tagged_fields + + + + + +
TagTagged fieldDescription
0node_endpointsEndpoints for all current-leaders enumerated in PartitionProduceResponses, with errors NOT_LEADER_OR_FOLLOWER. + + + + + + + + + + + + +
FieldDescription
node_idThe ID of the associated node.
hostThe node's hostname.
portThe node's port.
rackThe rack of the node, or null if it has not been assigned to a rack.
_tagged_fieldsThe tagged fields
+
+
+
+
Produce Response (Version: 13) => [responses] throttle_time_ms _tagged_fields 
+  responses => topic_id [partition_responses] _tagged_fields 
+    topic_id => UUID
+    partition_responses => index error_code base_offset log_append_time_ms log_start_offset [record_errors] error_message _tagged_fields 
+      index => INT32
+      error_code => INT16
+      base_offset => INT64
+      log_append_time_ms => INT64
+      log_start_offset => INT64
+      record_errors => batch_index batch_index_error_message _tagged_fields 
+        batch_index => INT32
+        batch_index_error_message => COMPACT_NULLABLE_STRING
+      error_message => COMPACT_NULLABLE_STRING
+  throttle_time_ms => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
responsesEach produce response.
topic_idThe unique topic ID
partition_responsesEach partition that we produced to within the topic.
indexThe partition index.
error_codeThe error code, or 0 if there was no error.
base_offsetThe base offset.
log_append_time_msThe timestamp returned by broker after appending the messages. If CreateTime is used for the topic, the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp will be the broker local time when the messages are appended.
log_start_offsetThe log start offset.
record_errorsThe batch indices of records that caused the batch to be dropped.
batch_indexThe batch index of the record that caused the batch to be dropped.
batch_index_error_messageThe error message of the record that caused the batch to be dropped.
_tagged_fieldsThe tagged fields
error_messageThe global error message summarizing the common root cause of the records that caused the batch to be dropped.
_tagged_fields + + + + + +
TagTagged fieldDescription
0current_leaderThe leader broker that the producer should use for future requests. + + + + + + + + +
FieldDescription
leader_idThe ID of the current leader or -1 if the leader is unknown.
leader_epochThe latest known leader epoch.
_tagged_fieldsThe tagged fields
+
+
_tagged_fieldsThe tagged fields
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
_tagged_fields + + + + + +
TagTagged fieldDescription
0node_endpointsEndpoints for all current-leaders enumerated in PartitionProduceResponses, with errors NOT_LEADER_OR_FOLLOWER. + + + + + + + + + + + + +
FieldDescription
node_idThe ID of the associated node.
hostThe node's hostname.
portThe node's port.
rackThe rack of the node, or null if it has not been assigned to a rack.
_tagged_fieldsThe tagged fields
+
+
+
+
Fetch API (Key: 1):
+ +Requests:
+
Fetch Request (Version: 4) => replica_id max_wait_ms min_bytes max_bytes isolation_level [topics] 
+  replica_id => INT32
+  max_wait_ms => INT32
+  min_bytes => INT32
+  max_bytes => INT32
+  isolation_level => INT8
+  topics => topic [partitions] 
+    topic => STRING
+    partitions => partition fetch_offset partition_max_bytes 
+      partition => INT32
+      fetch_offset => INT64
+      partition_max_bytes => INT32
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the follower, of -1 if this request is from a consumer.
max_wait_msThe maximum time in milliseconds to wait for the response.
min_bytesThe minimum bytes to accumulate in the response.
max_bytesThe maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
topicsThe topics to fetch.
topicThe name of the topic to fetch.
partitionsThe partitions to fetch.
partitionThe partition index.
fetch_offsetThe message offset.
partition_max_bytesThe maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored.
+
+
Fetch Request (Version: 5) => replica_id max_wait_ms min_bytes max_bytes isolation_level [topics] 
+  replica_id => INT32
+  max_wait_ms => INT32
+  min_bytes => INT32
+  max_bytes => INT32
+  isolation_level => INT8
+  topics => topic [partitions] 
+    topic => STRING
+    partitions => partition fetch_offset log_start_offset partition_max_bytes 
+      partition => INT32
+      fetch_offset => INT64
+      log_start_offset => INT64
+      partition_max_bytes => INT32
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the follower, of -1 if this request is from a consumer.
max_wait_msThe maximum time in milliseconds to wait for the response.
min_bytesThe minimum bytes to accumulate in the response.
max_bytesThe maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
topicsThe topics to fetch.
topicThe name of the topic to fetch.
partitionsThe partitions to fetch.
partitionThe partition index.
fetch_offsetThe message offset.
log_start_offsetThe earliest available offset of the follower replica. The field is only used when the request is sent by the follower.
partition_max_bytesThe maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored.
+
+
Fetch Request (Version: 6) => replica_id max_wait_ms min_bytes max_bytes isolation_level [topics] 
+  replica_id => INT32
+  max_wait_ms => INT32
+  min_bytes => INT32
+  max_bytes => INT32
+  isolation_level => INT8
+  topics => topic [partitions] 
+    topic => STRING
+    partitions => partition fetch_offset log_start_offset partition_max_bytes 
+      partition => INT32
+      fetch_offset => INT64
+      log_start_offset => INT64
+      partition_max_bytes => INT32
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the follower, of -1 if this request is from a consumer.
max_wait_msThe maximum time in milliseconds to wait for the response.
min_bytesThe minimum bytes to accumulate in the response.
max_bytesThe maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
topicsThe topics to fetch.
topicThe name of the topic to fetch.
partitionsThe partitions to fetch.
partitionThe partition index.
fetch_offsetThe message offset.
log_start_offsetThe earliest available offset of the follower replica. The field is only used when the request is sent by the follower.
partition_max_bytesThe maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored.
+
+
Fetch Request (Version: 7) => replica_id max_wait_ms min_bytes max_bytes isolation_level session_id session_epoch [topics] [forgotten_topics_data] 
+  replica_id => INT32
+  max_wait_ms => INT32
+  min_bytes => INT32
+  max_bytes => INT32
+  isolation_level => INT8
+  session_id => INT32
+  session_epoch => INT32
+  topics => topic [partitions] 
+    topic => STRING
+    partitions => partition fetch_offset log_start_offset partition_max_bytes 
+      partition => INT32
+      fetch_offset => INT64
+      log_start_offset => INT64
+      partition_max_bytes => INT32
+  forgotten_topics_data => topic [partitions] 
+    topic => STRING
+    partitions => INT32
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the follower, of -1 if this request is from a consumer.
max_wait_msThe maximum time in milliseconds to wait for the response.
min_bytesThe minimum bytes to accumulate in the response.
max_bytesThe maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
session_idThe fetch session ID.
session_epochThe fetch session epoch, which is used for ordering requests in a session.
topicsThe topics to fetch.
topicThe name of the topic to fetch.
partitionsThe partitions to fetch.
partitionThe partition index.
fetch_offsetThe message offset.
log_start_offsetThe earliest available offset of the follower replica. The field is only used when the request is sent by the follower.
partition_max_bytesThe maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored.
forgotten_topics_dataIn an incremental fetch request, the partitions to remove.
topicThe topic name.
partitionsThe partitions indexes to forget.
+
+
Fetch Request (Version: 8) => replica_id max_wait_ms min_bytes max_bytes isolation_level session_id session_epoch [topics] [forgotten_topics_data] 
+  replica_id => INT32
+  max_wait_ms => INT32
+  min_bytes => INT32
+  max_bytes => INT32
+  isolation_level => INT8
+  session_id => INT32
+  session_epoch => INT32
+  topics => topic [partitions] 
+    topic => STRING
+    partitions => partition fetch_offset log_start_offset partition_max_bytes 
+      partition => INT32
+      fetch_offset => INT64
+      log_start_offset => INT64
+      partition_max_bytes => INT32
+  forgotten_topics_data => topic [partitions] 
+    topic => STRING
+    partitions => INT32
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the follower, of -1 if this request is from a consumer.
max_wait_msThe maximum time in milliseconds to wait for the response.
min_bytesThe minimum bytes to accumulate in the response.
max_bytesThe maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
session_idThe fetch session ID.
session_epochThe fetch session epoch, which is used for ordering requests in a session.
topicsThe topics to fetch.
topicThe name of the topic to fetch.
partitionsThe partitions to fetch.
partitionThe partition index.
fetch_offsetThe message offset.
log_start_offsetThe earliest available offset of the follower replica. The field is only used when the request is sent by the follower.
partition_max_bytesThe maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored.
forgotten_topics_dataIn an incremental fetch request, the partitions to remove.
topicThe topic name.
partitionsThe partitions indexes to forget.
+
+
Fetch Request (Version: 9) => replica_id max_wait_ms min_bytes max_bytes isolation_level session_id session_epoch [topics] [forgotten_topics_data] 
+  replica_id => INT32
+  max_wait_ms => INT32
+  min_bytes => INT32
+  max_bytes => INT32
+  isolation_level => INT8
+  session_id => INT32
+  session_epoch => INT32
+  topics => topic [partitions] 
+    topic => STRING
+    partitions => partition current_leader_epoch fetch_offset log_start_offset partition_max_bytes 
+      partition => INT32
+      current_leader_epoch => INT32
+      fetch_offset => INT64
+      log_start_offset => INT64
+      partition_max_bytes => INT32
+  forgotten_topics_data => topic [partitions] 
+    topic => STRING
+    partitions => INT32
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the follower, of -1 if this request is from a consumer.
max_wait_msThe maximum time in milliseconds to wait for the response.
min_bytesThe minimum bytes to accumulate in the response.
max_bytesThe maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
session_idThe fetch session ID.
session_epochThe fetch session epoch, which is used for ordering requests in a session.
topicsThe topics to fetch.
topicThe name of the topic to fetch.
partitionsThe partitions to fetch.
partitionThe partition index.
current_leader_epochThe current leader epoch of the partition.
fetch_offsetThe message offset.
log_start_offsetThe earliest available offset of the follower replica. The field is only used when the request is sent by the follower.
partition_max_bytesThe maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored.
forgotten_topics_dataIn an incremental fetch request, the partitions to remove.
topicThe topic name.
partitionsThe partitions indexes to forget.
+
+
Fetch Request (Version: 10) => replica_id max_wait_ms min_bytes max_bytes isolation_level session_id session_epoch [topics] [forgotten_topics_data] 
+  replica_id => INT32
+  max_wait_ms => INT32
+  min_bytes => INT32
+  max_bytes => INT32
+  isolation_level => INT8
+  session_id => INT32
+  session_epoch => INT32
+  topics => topic [partitions] 
+    topic => STRING
+    partitions => partition current_leader_epoch fetch_offset log_start_offset partition_max_bytes 
+      partition => INT32
+      current_leader_epoch => INT32
+      fetch_offset => INT64
+      log_start_offset => INT64
+      partition_max_bytes => INT32
+  forgotten_topics_data => topic [partitions] 
+    topic => STRING
+    partitions => INT32
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the follower, of -1 if this request is from a consumer.
max_wait_msThe maximum time in milliseconds to wait for the response.
min_bytesThe minimum bytes to accumulate in the response.
max_bytesThe maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
session_idThe fetch session ID.
session_epochThe fetch session epoch, which is used for ordering requests in a session.
topicsThe topics to fetch.
topicThe name of the topic to fetch.
partitionsThe partitions to fetch.
partitionThe partition index.
current_leader_epochThe current leader epoch of the partition.
fetch_offsetThe message offset.
log_start_offsetThe earliest available offset of the follower replica. The field is only used when the request is sent by the follower.
partition_max_bytesThe maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored.
forgotten_topics_dataIn an incremental fetch request, the partitions to remove.
topicThe topic name.
partitionsThe partitions indexes to forget.
+
+
Fetch Request (Version: 11) => replica_id max_wait_ms min_bytes max_bytes isolation_level session_id session_epoch [topics] [forgotten_topics_data] rack_id 
+  replica_id => INT32
+  max_wait_ms => INT32
+  min_bytes => INT32
+  max_bytes => INT32
+  isolation_level => INT8
+  session_id => INT32
+  session_epoch => INT32
+  topics => topic [partitions] 
+    topic => STRING
+    partitions => partition current_leader_epoch fetch_offset log_start_offset partition_max_bytes 
+      partition => INT32
+      current_leader_epoch => INT32
+      fetch_offset => INT64
+      log_start_offset => INT64
+      partition_max_bytes => INT32
+  forgotten_topics_data => topic [partitions] 
+    topic => STRING
+    partitions => INT32
+  rack_id => STRING
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the follower, of -1 if this request is from a consumer.
max_wait_msThe maximum time in milliseconds to wait for the response.
min_bytesThe minimum bytes to accumulate in the response.
max_bytesThe maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
session_idThe fetch session ID.
session_epochThe fetch session epoch, which is used for ordering requests in a session.
topicsThe topics to fetch.
topicThe name of the topic to fetch.
partitionsThe partitions to fetch.
partitionThe partition index.
current_leader_epochThe current leader epoch of the partition.
fetch_offsetThe message offset.
log_start_offsetThe earliest available offset of the follower replica. The field is only used when the request is sent by the follower.
partition_max_bytesThe maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored.
forgotten_topics_dataIn an incremental fetch request, the partitions to remove.
topicThe topic name.
partitionsThe partitions indexes to forget.
rack_idRack ID of the consumer making this request.
+
+
Fetch Request (Version: 12) => replica_id max_wait_ms min_bytes max_bytes isolation_level session_id session_epoch [topics] [forgotten_topics_data] rack_id _tagged_fields 
+  replica_id => INT32
+  max_wait_ms => INT32
+  min_bytes => INT32
+  max_bytes => INT32
+  isolation_level => INT8
+  session_id => INT32
+  session_epoch => INT32
+  topics => topic [partitions] _tagged_fields 
+    topic => COMPACT_STRING
+    partitions => partition current_leader_epoch fetch_offset last_fetched_epoch log_start_offset partition_max_bytes _tagged_fields 
+      partition => INT32
+      current_leader_epoch => INT32
+      fetch_offset => INT64
+      last_fetched_epoch => INT32
+      log_start_offset => INT64
+      partition_max_bytes => INT32
+  forgotten_topics_data => topic [partitions] _tagged_fields 
+    topic => COMPACT_STRING
+    partitions => INT32
+  rack_id => COMPACT_STRING
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the follower, of -1 if this request is from a consumer.
max_wait_msThe maximum time in milliseconds to wait for the response.
min_bytesThe minimum bytes to accumulate in the response.
max_bytesThe maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
session_idThe fetch session ID.
session_epochThe fetch session epoch, which is used for ordering requests in a session.
topicsThe topics to fetch.
topicThe name of the topic to fetch.
partitionsThe partitions to fetch.
partitionThe partition index.
current_leader_epochThe current leader epoch of the partition.
fetch_offsetThe message offset.
last_fetched_epochThe epoch of the last fetched record or -1 if there is none.
log_start_offsetThe earliest available offset of the follower replica. The field is only used when the request is sent by the follower.
partition_max_bytesThe maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
forgotten_topics_dataIn an incremental fetch request, the partitions to remove.
topicThe topic name.
partitionsThe partitions indexes to forget.
_tagged_fieldsThe tagged fields
rack_idRack ID of the consumer making this request.
_tagged_fields + + + + + +
TagTagged fieldDescription
0cluster_idThe clusterId if known. This is used to validate metadata fetches prior to broker registration.
+
+
+
Fetch Request (Version: 13) => replica_id max_wait_ms min_bytes max_bytes isolation_level session_id session_epoch [topics] [forgotten_topics_data] rack_id _tagged_fields 
+  replica_id => INT32
+  max_wait_ms => INT32
+  min_bytes => INT32
+  max_bytes => INT32
+  isolation_level => INT8
+  session_id => INT32
+  session_epoch => INT32
+  topics => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition current_leader_epoch fetch_offset last_fetched_epoch log_start_offset partition_max_bytes _tagged_fields 
+      partition => INT32
+      current_leader_epoch => INT32
+      fetch_offset => INT64
+      last_fetched_epoch => INT32
+      log_start_offset => INT64
+      partition_max_bytes => INT32
+  forgotten_topics_data => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => INT32
+  rack_id => COMPACT_STRING
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the follower, of -1 if this request is from a consumer.
max_wait_msThe maximum time in milliseconds to wait for the response.
min_bytesThe minimum bytes to accumulate in the response.
max_bytesThe maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
session_idThe fetch session ID.
session_epochThe fetch session epoch, which is used for ordering requests in a session.
topicsThe topics to fetch.
topic_idThe unique topic ID.
partitionsThe partitions to fetch.
partitionThe partition index.
current_leader_epochThe current leader epoch of the partition.
fetch_offsetThe message offset.
last_fetched_epochThe epoch of the last fetched record or -1 if there is none.
log_start_offsetThe earliest available offset of the follower replica. The field is only used when the request is sent by the follower.
partition_max_bytesThe maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
forgotten_topics_dataIn an incremental fetch request, the partitions to remove.
topic_idThe unique topic ID.
partitionsThe partitions indexes to forget.
_tagged_fieldsThe tagged fields
rack_idRack ID of the consumer making this request.
_tagged_fields + + + + + +
TagTagged fieldDescription
0cluster_idThe clusterId if known. This is used to validate metadata fetches prior to broker registration.
+
+
+
Fetch Request (Version: 14) => replica_id max_wait_ms min_bytes max_bytes isolation_level session_id session_epoch [topics] [forgotten_topics_data] rack_id _tagged_fields 
+  replica_id => INT32
+  max_wait_ms => INT32
+  min_bytes => INT32
+  max_bytes => INT32
+  isolation_level => INT8
+  session_id => INT32
+  session_epoch => INT32
+  topics => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition current_leader_epoch fetch_offset last_fetched_epoch log_start_offset partition_max_bytes _tagged_fields 
+      partition => INT32
+      current_leader_epoch => INT32
+      fetch_offset => INT64
+      last_fetched_epoch => INT32
+      log_start_offset => INT64
+      partition_max_bytes => INT32
+  forgotten_topics_data => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => INT32
+  rack_id => COMPACT_STRING
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the follower, of -1 if this request is from a consumer.
max_wait_msThe maximum time in milliseconds to wait for the response.
min_bytesThe minimum bytes to accumulate in the response.
max_bytesThe maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
session_idThe fetch session ID.
session_epochThe fetch session epoch, which is used for ordering requests in a session.
topicsThe topics to fetch.
topic_idThe unique topic ID.
partitionsThe partitions to fetch.
partitionThe partition index.
current_leader_epochThe current leader epoch of the partition.
fetch_offsetThe message offset.
last_fetched_epochThe epoch of the last fetched record or -1 if there is none.
log_start_offsetThe earliest available offset of the follower replica. The field is only used when the request is sent by the follower.
partition_max_bytesThe maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
forgotten_topics_dataIn an incremental fetch request, the partitions to remove.
topic_idThe unique topic ID.
partitionsThe partitions indexes to forget.
_tagged_fieldsThe tagged fields
rack_idRack ID of the consumer making this request.
_tagged_fields + + + + + +
TagTagged fieldDescription
0cluster_idThe clusterId if known. This is used to validate metadata fetches prior to broker registration.
+
+
+
Fetch Request (Version: 15) => max_wait_ms min_bytes max_bytes isolation_level session_id session_epoch [topics] [forgotten_topics_data] rack_id _tagged_fields 
+  max_wait_ms => INT32
+  min_bytes => INT32
+  max_bytes => INT32
+  isolation_level => INT8
+  session_id => INT32
+  session_epoch => INT32
+  topics => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition current_leader_epoch fetch_offset last_fetched_epoch log_start_offset partition_max_bytes _tagged_fields 
+      partition => INT32
+      current_leader_epoch => INT32
+      fetch_offset => INT64
+      last_fetched_epoch => INT32
+      log_start_offset => INT64
+      partition_max_bytes => INT32
+  forgotten_topics_data => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => INT32
+  rack_id => COMPACT_STRING
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
max_wait_msThe maximum time in milliseconds to wait for the response.
min_bytesThe minimum bytes to accumulate in the response.
max_bytesThe maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
session_idThe fetch session ID.
session_epochThe fetch session epoch, which is used for ordering requests in a session.
topicsThe topics to fetch.
topic_idThe unique topic ID.
partitionsThe partitions to fetch.
partitionThe partition index.
current_leader_epochThe current leader epoch of the partition.
fetch_offsetThe message offset.
last_fetched_epochThe epoch of the last fetched record or -1 if there is none.
log_start_offsetThe earliest available offset of the follower replica. The field is only used when the request is sent by the follower.
partition_max_bytesThe maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
forgotten_topics_dataIn an incremental fetch request, the partitions to remove.
topic_idThe unique topic ID.
partitionsThe partitions indexes to forget.
_tagged_fieldsThe tagged fields
rack_idRack ID of the consumer making this request.
_tagged_fields + + + + + + + +
TagTagged fieldDescription
0cluster_idThe clusterId if known. This is used to validate metadata fetches prior to broker registration.
1replica_stateThe state of the replica in the follower. + + + + + + + + +
FieldDescription
replica_idThe replica ID of the follower, or -1 if this request is from a consumer.
replica_epochThe epoch of this follower, or -1 if not available.
_tagged_fieldsThe tagged fields
+
+
+
+
Fetch Request (Version: 16) => max_wait_ms min_bytes max_bytes isolation_level session_id session_epoch [topics] [forgotten_topics_data] rack_id _tagged_fields 
+  max_wait_ms => INT32
+  min_bytes => INT32
+  max_bytes => INT32
+  isolation_level => INT8
+  session_id => INT32
+  session_epoch => INT32
+  topics => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition current_leader_epoch fetch_offset last_fetched_epoch log_start_offset partition_max_bytes _tagged_fields 
+      partition => INT32
+      current_leader_epoch => INT32
+      fetch_offset => INT64
+      last_fetched_epoch => INT32
+      log_start_offset => INT64
+      partition_max_bytes => INT32
+  forgotten_topics_data => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => INT32
+  rack_id => COMPACT_STRING
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
max_wait_msThe maximum time in milliseconds to wait for the response.
min_bytesThe minimum bytes to accumulate in the response.
max_bytesThe maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
session_idThe fetch session ID.
session_epochThe fetch session epoch, which is used for ordering requests in a session.
topicsThe topics to fetch.
topic_idThe unique topic ID.
partitionsThe partitions to fetch.
partitionThe partition index.
current_leader_epochThe current leader epoch of the partition.
fetch_offsetThe message offset.
last_fetched_epochThe epoch of the last fetched record or -1 if there is none.
log_start_offsetThe earliest available offset of the follower replica. The field is only used when the request is sent by the follower.
partition_max_bytesThe maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
forgotten_topics_dataIn an incremental fetch request, the partitions to remove.
topic_idThe unique topic ID.
partitionsThe partitions indexes to forget.
_tagged_fieldsThe tagged fields
rack_idRack ID of the consumer making this request.
_tagged_fields + + + + + + + +
TagTagged fieldDescription
0cluster_idThe clusterId if known. This is used to validate metadata fetches prior to broker registration.
1replica_stateThe state of the replica in the follower. + + + + + + + + +
FieldDescription
replica_idThe replica ID of the follower, or -1 if this request is from a consumer.
replica_epochThe epoch of this follower, or -1 if not available.
_tagged_fieldsThe tagged fields
+
+
+
+
Fetch Request (Version: 17) => max_wait_ms min_bytes max_bytes isolation_level session_id session_epoch [topics] [forgotten_topics_data] rack_id _tagged_fields 
+  max_wait_ms => INT32
+  min_bytes => INT32
+  max_bytes => INT32
+  isolation_level => INT8
+  session_id => INT32
+  session_epoch => INT32
+  topics => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition current_leader_epoch fetch_offset last_fetched_epoch log_start_offset partition_max_bytes _tagged_fields 
+      partition => INT32
+      current_leader_epoch => INT32
+      fetch_offset => INT64
+      last_fetched_epoch => INT32
+      log_start_offset => INT64
+      partition_max_bytes => INT32
+  forgotten_topics_data => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => INT32
+  rack_id => COMPACT_STRING
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
max_wait_msThe maximum time in milliseconds to wait for the response.
min_bytesThe minimum bytes to accumulate in the response.
max_bytesThe maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
session_idThe fetch session ID.
session_epochThe fetch session epoch, which is used for ordering requests in a session.
topicsThe topics to fetch.
topic_idThe unique topic ID.
partitionsThe partitions to fetch.
partitionThe partition index.
current_leader_epochThe current leader epoch of the partition.
fetch_offsetThe message offset.
last_fetched_epochThe epoch of the last fetched record or -1 if there is none.
log_start_offsetThe earliest available offset of the follower replica. The field is only used when the request is sent by the follower.
partition_max_bytesThe maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored.
_tagged_fields + + + + + +
TagTagged fieldDescription
0replica_directory_idThe directory id of the follower fetching.
+
_tagged_fieldsThe tagged fields
forgotten_topics_dataIn an incremental fetch request, the partitions to remove.
topic_idThe unique topic ID.
partitionsThe partitions indexes to forget.
_tagged_fieldsThe tagged fields
rack_idRack ID of the consumer making this request.
_tagged_fields + + + + + + + +
TagTagged fieldDescription
0cluster_idThe clusterId if known. This is used to validate metadata fetches prior to broker registration.
1replica_stateThe state of the replica in the follower. + + + + + + + + +
FieldDescription
replica_idThe replica ID of the follower, or -1 if this request is from a consumer.
replica_epochThe epoch of this follower, or -1 if not available.
_tagged_fieldsThe tagged fields
+
+
+
+
Fetch Request (Version: 18) => max_wait_ms min_bytes max_bytes isolation_level session_id session_epoch [topics] [forgotten_topics_data] rack_id _tagged_fields 
+  max_wait_ms => INT32
+  min_bytes => INT32
+  max_bytes => INT32
+  isolation_level => INT8
+  session_id => INT32
+  session_epoch => INT32
+  topics => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition current_leader_epoch fetch_offset last_fetched_epoch log_start_offset partition_max_bytes _tagged_fields 
+      partition => INT32
+      current_leader_epoch => INT32
+      fetch_offset => INT64
+      last_fetched_epoch => INT32
+      log_start_offset => INT64
+      partition_max_bytes => INT32
+  forgotten_topics_data => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => INT32
+  rack_id => COMPACT_STRING
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
max_wait_msThe maximum time in milliseconds to wait for the response.
min_bytesThe minimum bytes to accumulate in the response.
max_bytesThe maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
session_idThe fetch session ID.
session_epochThe fetch session epoch, which is used for ordering requests in a session.
topicsThe topics to fetch.
topic_idThe unique topic ID.
partitionsThe partitions to fetch.
partitionThe partition index.
current_leader_epochThe current leader epoch of the partition.
fetch_offsetThe message offset.
last_fetched_epochThe epoch of the last fetched record or -1 if there is none.
log_start_offsetThe earliest available offset of the follower replica. The field is only used when the request is sent by the follower.
partition_max_bytesThe maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored.
_tagged_fields + + + + + + + +
TagTagged fieldDescription
0replica_directory_idThe directory id of the follower fetching.
1high_watermarkThe high-watermark known by the replica. -1 if the high-watermark is not known and 9223372036854775807 if the feature is not supported.
+
_tagged_fieldsThe tagged fields
forgotten_topics_dataIn an incremental fetch request, the partitions to remove.
topic_idThe unique topic ID.
partitionsThe partitions indexes to forget.
_tagged_fieldsThe tagged fields
rack_idRack ID of the consumer making this request.
_tagged_fields + + + + + + + +
TagTagged fieldDescription
0cluster_idThe clusterId if known. This is used to validate metadata fetches prior to broker registration.
1replica_stateThe state of the replica in the follower. + + + + + + + + +
FieldDescription
replica_idThe replica ID of the follower, or -1 if this request is from a consumer.
replica_epochThe epoch of this follower, or -1 if not available.
_tagged_fieldsThe tagged fields
+
+
+
+Responses:
+
Fetch Response (Version: 4) => throttle_time_ms [responses] 
+  throttle_time_ms => INT32
+  responses => topic [partitions] 
+    topic => STRING
+    partitions => partition_index error_code high_watermark last_stable_offset [aborted_transactions] records 
+      partition_index => INT32
+      error_code => INT16
+      high_watermark => INT64
+      last_stable_offset => INT64
+      aborted_transactions => producer_id first_offset 
+        producer_id => INT64
+        first_offset => INT64
+      records => RECORDS
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
responsesThe response topics.
topicThe topic name.
partitionsThe topic partitions.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no fetch error.
high_watermarkThe current high water mark.
last_stable_offsetThe last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED).
aborted_transactionsThe aborted transactions.
producer_idThe producer id associated with the aborted transaction.
first_offsetThe first offset in the aborted transaction.
recordsThe record data.
+
+
Fetch Response (Version: 5) => throttle_time_ms [responses] 
+  throttle_time_ms => INT32
+  responses => topic [partitions] 
+    topic => STRING
+    partitions => partition_index error_code high_watermark last_stable_offset log_start_offset [aborted_transactions] records 
+      partition_index => INT32
+      error_code => INT16
+      high_watermark => INT64
+      last_stable_offset => INT64
+      log_start_offset => INT64
+      aborted_transactions => producer_id first_offset 
+        producer_id => INT64
+        first_offset => INT64
+      records => RECORDS
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
responsesThe response topics.
topicThe topic name.
partitionsThe topic partitions.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no fetch error.
high_watermarkThe current high water mark.
last_stable_offsetThe last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED).
log_start_offsetThe current log start offset.
aborted_transactionsThe aborted transactions.
producer_idThe producer id associated with the aborted transaction.
first_offsetThe first offset in the aborted transaction.
recordsThe record data.
+
+
Fetch Response (Version: 6) => throttle_time_ms [responses] 
+  throttle_time_ms => INT32
+  responses => topic [partitions] 
+    topic => STRING
+    partitions => partition_index error_code high_watermark last_stable_offset log_start_offset [aborted_transactions] records 
+      partition_index => INT32
+      error_code => INT16
+      high_watermark => INT64
+      last_stable_offset => INT64
+      log_start_offset => INT64
+      aborted_transactions => producer_id first_offset 
+        producer_id => INT64
+        first_offset => INT64
+      records => RECORDS
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
responsesThe response topics.
topicThe topic name.
partitionsThe topic partitions.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no fetch error.
high_watermarkThe current high water mark.
last_stable_offsetThe last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED).
log_start_offsetThe current log start offset.
aborted_transactionsThe aborted transactions.
producer_idThe producer id associated with the aborted transaction.
first_offsetThe first offset in the aborted transaction.
recordsThe record data.
+
+
Fetch Response (Version: 7) => throttle_time_ms error_code session_id [responses] 
+  throttle_time_ms => INT32
+  error_code => INT16
+  session_id => INT32
+  responses => topic [partitions] 
+    topic => STRING
+    partitions => partition_index error_code high_watermark last_stable_offset log_start_offset [aborted_transactions] records 
+      partition_index => INT32
+      error_code => INT16
+      high_watermark => INT64
+      last_stable_offset => INT64
+      log_start_offset => INT64
+      aborted_transactions => producer_id first_offset 
+        producer_id => INT64
+        first_offset => INT64
+      records => RECORDS
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top level response error code.
session_idThe fetch session ID, or 0 if this is not part of a fetch session.
responsesThe response topics.
topicThe topic name.
partitionsThe topic partitions.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no fetch error.
high_watermarkThe current high water mark.
last_stable_offsetThe last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED).
log_start_offsetThe current log start offset.
aborted_transactionsThe aborted transactions.
producer_idThe producer id associated with the aborted transaction.
first_offsetThe first offset in the aborted transaction.
recordsThe record data.
+
+
Fetch Response (Version: 8) => throttle_time_ms error_code session_id [responses] 
+  throttle_time_ms => INT32
+  error_code => INT16
+  session_id => INT32
+  responses => topic [partitions] 
+    topic => STRING
+    partitions => partition_index error_code high_watermark last_stable_offset log_start_offset [aborted_transactions] records 
+      partition_index => INT32
+      error_code => INT16
+      high_watermark => INT64
+      last_stable_offset => INT64
+      log_start_offset => INT64
+      aborted_transactions => producer_id first_offset 
+        producer_id => INT64
+        first_offset => INT64
+      records => RECORDS
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top level response error code.
session_idThe fetch session ID, or 0 if this is not part of a fetch session.
responsesThe response topics.
topicThe topic name.
partitionsThe topic partitions.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no fetch error.
high_watermarkThe current high water mark.
last_stable_offsetThe last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED).
log_start_offsetThe current log start offset.
aborted_transactionsThe aborted transactions.
producer_idThe producer id associated with the aborted transaction.
first_offsetThe first offset in the aborted transaction.
recordsThe record data.
+
+
Fetch Response (Version: 9) => throttle_time_ms error_code session_id [responses] 
+  throttle_time_ms => INT32
+  error_code => INT16
+  session_id => INT32
+  responses => topic [partitions] 
+    topic => STRING
+    partitions => partition_index error_code high_watermark last_stable_offset log_start_offset [aborted_transactions] records 
+      partition_index => INT32
+      error_code => INT16
+      high_watermark => INT64
+      last_stable_offset => INT64
+      log_start_offset => INT64
+      aborted_transactions => producer_id first_offset 
+        producer_id => INT64
+        first_offset => INT64
+      records => RECORDS
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top level response error code.
session_idThe fetch session ID, or 0 if this is not part of a fetch session.
responsesThe response topics.
topicThe topic name.
partitionsThe topic partitions.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no fetch error.
high_watermarkThe current high water mark.
last_stable_offsetThe last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED).
log_start_offsetThe current log start offset.
aborted_transactionsThe aborted transactions.
producer_idThe producer id associated with the aborted transaction.
first_offsetThe first offset in the aborted transaction.
recordsThe record data.
+
+
Fetch Response (Version: 10) => throttle_time_ms error_code session_id [responses] 
+  throttle_time_ms => INT32
+  error_code => INT16
+  session_id => INT32
+  responses => topic [partitions] 
+    topic => STRING
+    partitions => partition_index error_code high_watermark last_stable_offset log_start_offset [aborted_transactions] records 
+      partition_index => INT32
+      error_code => INT16
+      high_watermark => INT64
+      last_stable_offset => INT64
+      log_start_offset => INT64
+      aborted_transactions => producer_id first_offset 
+        producer_id => INT64
+        first_offset => INT64
+      records => RECORDS
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top level response error code.
session_idThe fetch session ID, or 0 if this is not part of a fetch session.
responsesThe response topics.
topicThe topic name.
partitionsThe topic partitions.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no fetch error.
high_watermarkThe current high water mark.
last_stable_offsetThe last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED).
log_start_offsetThe current log start offset.
aborted_transactionsThe aborted transactions.
producer_idThe producer id associated with the aborted transaction.
first_offsetThe first offset in the aborted transaction.
recordsThe record data.
+
+
Fetch Response (Version: 11) => throttle_time_ms error_code session_id [responses] 
+  throttle_time_ms => INT32
+  error_code => INT16
+  session_id => INT32
+  responses => topic [partitions] 
+    topic => STRING
+    partitions => partition_index error_code high_watermark last_stable_offset log_start_offset [aborted_transactions] preferred_read_replica records 
+      partition_index => INT32
+      error_code => INT16
+      high_watermark => INT64
+      last_stable_offset => INT64
+      log_start_offset => INT64
+      aborted_transactions => producer_id first_offset 
+        producer_id => INT64
+        first_offset => INT64
+      preferred_read_replica => INT32
+      records => RECORDS
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top level response error code.
session_idThe fetch session ID, or 0 if this is not part of a fetch session.
responsesThe response topics.
topicThe topic name.
partitionsThe topic partitions.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no fetch error.
high_watermarkThe current high water mark.
last_stable_offsetThe last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED).
log_start_offsetThe current log start offset.
aborted_transactionsThe aborted transactions.
producer_idThe producer id associated with the aborted transaction.
first_offsetThe first offset in the aborted transaction.
preferred_read_replicaThe preferred read replica for the consumer to use on its next fetch request.
recordsThe record data.
+
+
Fetch Response (Version: 12) => throttle_time_ms error_code session_id [responses] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  session_id => INT32
+  responses => topic [partitions] _tagged_fields 
+    topic => COMPACT_STRING
+    partitions => partition_index error_code high_watermark last_stable_offset log_start_offset [aborted_transactions] preferred_read_replica records _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+      high_watermark => INT64
+      last_stable_offset => INT64
+      log_start_offset => INT64
+      aborted_transactions => producer_id first_offset _tagged_fields 
+        producer_id => INT64
+        first_offset => INT64
+      preferred_read_replica => INT32
+      records => COMPACT_RECORDS
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top level response error code.
session_idThe fetch session ID, or 0 if this is not part of a fetch session.
responsesThe response topics.
topicThe topic name.
partitionsThe topic partitions.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no fetch error.
high_watermarkThe current high water mark.
last_stable_offsetThe last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED).
log_start_offsetThe current log start offset.
aborted_transactionsThe aborted transactions.
producer_idThe producer id associated with the aborted transaction.
first_offsetThe first offset in the aborted transaction.
_tagged_fieldsThe tagged fields
preferred_read_replicaThe preferred read replica for the consumer to use on its next fetch request.
recordsThe record data.
_tagged_fields + + + + + + + + + +
TagTagged fieldDescription
0diverging_epochIn case divergence is detected based on the `LastFetchedEpoch` and `FetchOffset` in the request, this field indicates the largest epoch and its end offset such that subsequent records are known to diverge. + + + + + + + + +
FieldDescription
epochThe largest epoch.
end_offsetThe end offset of the epoch.
_tagged_fieldsThe tagged fields
+
1current_leaderThe current leader of the partition. + + + + + + + + +
FieldDescription
leader_idThe ID of the current leader or -1 if the leader is unknown.
leader_epochThe latest known leader epoch.
_tagged_fieldsThe tagged fields
+
2snapshot_idIn the case of fetching an offset less than the LogStartOffset, this is the end offset and epoch that should be used in the FetchSnapshot request. + + + + + + + + +
FieldDescription
end_offsetThe end offset of the epoch.
epochThe largest epoch.
_tagged_fieldsThe tagged fields
+
+
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
Fetch Response (Version: 13) => throttle_time_ms error_code session_id [responses] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  session_id => INT32
+  responses => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition_index error_code high_watermark last_stable_offset log_start_offset [aborted_transactions] preferred_read_replica records _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+      high_watermark => INT64
+      last_stable_offset => INT64
+      log_start_offset => INT64
+      aborted_transactions => producer_id first_offset _tagged_fields 
+        producer_id => INT64
+        first_offset => INT64
+      preferred_read_replica => INT32
+      records => COMPACT_RECORDS
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top level response error code.
session_idThe fetch session ID, or 0 if this is not part of a fetch session.
responsesThe response topics.
topic_idThe unique topic ID.
partitionsThe topic partitions.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no fetch error.
high_watermarkThe current high water mark.
last_stable_offsetThe last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED).
log_start_offsetThe current log start offset.
aborted_transactionsThe aborted transactions.
producer_idThe producer id associated with the aborted transaction.
first_offsetThe first offset in the aborted transaction.
_tagged_fieldsThe tagged fields
preferred_read_replicaThe preferred read replica for the consumer to use on its next fetch request.
recordsThe record data.
_tagged_fields + + + + + + + + + +
TagTagged fieldDescription
0diverging_epochIn case divergence is detected based on the `LastFetchedEpoch` and `FetchOffset` in the request, this field indicates the largest epoch and its end offset such that subsequent records are known to diverge. + + + + + + + + +
FieldDescription
epochThe largest epoch.
end_offsetThe end offset of the epoch.
_tagged_fieldsThe tagged fields
+
1current_leaderThe current leader of the partition. + + + + + + + + +
FieldDescription
leader_idThe ID of the current leader or -1 if the leader is unknown.
leader_epochThe latest known leader epoch.
_tagged_fieldsThe tagged fields
+
2snapshot_idIn the case of fetching an offset less than the LogStartOffset, this is the end offset and epoch that should be used in the FetchSnapshot request. + + + + + + + + +
FieldDescription
end_offsetThe end offset of the epoch.
epochThe largest epoch.
_tagged_fieldsThe tagged fields
+
+
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
Fetch Response (Version: 14) => throttle_time_ms error_code session_id [responses] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  session_id => INT32
+  responses => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition_index error_code high_watermark last_stable_offset log_start_offset [aborted_transactions] preferred_read_replica records _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+      high_watermark => INT64
+      last_stable_offset => INT64
+      log_start_offset => INT64
+      aborted_transactions => producer_id first_offset _tagged_fields 
+        producer_id => INT64
+        first_offset => INT64
+      preferred_read_replica => INT32
+      records => COMPACT_RECORDS
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top level response error code.
session_idThe fetch session ID, or 0 if this is not part of a fetch session.
responsesThe response topics.
topic_idThe unique topic ID.
partitionsThe topic partitions.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no fetch error.
high_watermarkThe current high water mark.
last_stable_offsetThe last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED).
log_start_offsetThe current log start offset.
aborted_transactionsThe aborted transactions.
producer_idThe producer id associated with the aborted transaction.
first_offsetThe first offset in the aborted transaction.
_tagged_fieldsThe tagged fields
preferred_read_replicaThe preferred read replica for the consumer to use on its next fetch request.
recordsThe record data.
_tagged_fields + + + + + + + + + +
TagTagged fieldDescription
0diverging_epochIn case divergence is detected based on the `LastFetchedEpoch` and `FetchOffset` in the request, this field indicates the largest epoch and its end offset such that subsequent records are known to diverge. + + + + + + + + +
FieldDescription
epochThe largest epoch.
end_offsetThe end offset of the epoch.
_tagged_fieldsThe tagged fields
+
1current_leaderThe current leader of the partition. + + + + + + + + +
FieldDescription
leader_idThe ID of the current leader or -1 if the leader is unknown.
leader_epochThe latest known leader epoch.
_tagged_fieldsThe tagged fields
+
2snapshot_idIn the case of fetching an offset less than the LogStartOffset, this is the end offset and epoch that should be used in the FetchSnapshot request. + + + + + + + + +
FieldDescription
end_offsetThe end offset of the epoch.
epochThe largest epoch.
_tagged_fieldsThe tagged fields
+
+
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
Fetch Response (Version: 15) => throttle_time_ms error_code session_id [responses] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  session_id => INT32
+  responses => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition_index error_code high_watermark last_stable_offset log_start_offset [aborted_transactions] preferred_read_replica records _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+      high_watermark => INT64
+      last_stable_offset => INT64
+      log_start_offset => INT64
+      aborted_transactions => producer_id first_offset _tagged_fields 
+        producer_id => INT64
+        first_offset => INT64
+      preferred_read_replica => INT32
+      records => COMPACT_RECORDS
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top level response error code.
session_idThe fetch session ID, or 0 if this is not part of a fetch session.
responsesThe response topics.
topic_idThe unique topic ID.
partitionsThe topic partitions.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no fetch error.
high_watermarkThe current high water mark.
last_stable_offsetThe last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED).
log_start_offsetThe current log start offset.
aborted_transactionsThe aborted transactions.
producer_idThe producer id associated with the aborted transaction.
first_offsetThe first offset in the aborted transaction.
_tagged_fieldsThe tagged fields
preferred_read_replicaThe preferred read replica for the consumer to use on its next fetch request.
recordsThe record data.
_tagged_fields + + + + + + + + + +
TagTagged fieldDescription
0diverging_epochIn case divergence is detected based on the `LastFetchedEpoch` and `FetchOffset` in the request, this field indicates the largest epoch and its end offset such that subsequent records are known to diverge. + + + + + + + + +
FieldDescription
epochThe largest epoch.
end_offsetThe end offset of the epoch.
_tagged_fieldsThe tagged fields
+
1current_leaderThe current leader of the partition. + + + + + + + + +
FieldDescription
leader_idThe ID of the current leader or -1 if the leader is unknown.
leader_epochThe latest known leader epoch.
_tagged_fieldsThe tagged fields
+
2snapshot_idIn the case of fetching an offset less than the LogStartOffset, this is the end offset and epoch that should be used in the FetchSnapshot request. + + + + + + + + +
FieldDescription
end_offsetThe end offset of the epoch.
epochThe largest epoch.
_tagged_fieldsThe tagged fields
+
+
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
Fetch Response (Version: 16) => throttle_time_ms error_code session_id [responses] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  session_id => INT32
+  responses => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition_index error_code high_watermark last_stable_offset log_start_offset [aborted_transactions] preferred_read_replica records _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+      high_watermark => INT64
+      last_stable_offset => INT64
+      log_start_offset => INT64
+      aborted_transactions => producer_id first_offset _tagged_fields 
+        producer_id => INT64
+        first_offset => INT64
+      preferred_read_replica => INT32
+      records => COMPACT_RECORDS
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top level response error code.
session_idThe fetch session ID, or 0 if this is not part of a fetch session.
responsesThe response topics.
topic_idThe unique topic ID.
partitionsThe topic partitions.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no fetch error.
high_watermarkThe current high water mark.
last_stable_offsetThe last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED).
log_start_offsetThe current log start offset.
aborted_transactionsThe aborted transactions.
producer_idThe producer id associated with the aborted transaction.
first_offsetThe first offset in the aborted transaction.
_tagged_fieldsThe tagged fields
preferred_read_replicaThe preferred read replica for the consumer to use on its next fetch request.
recordsThe record data.
_tagged_fields + + + + + + + + + +
TagTagged fieldDescription
0diverging_epochIn case divergence is detected based on the `LastFetchedEpoch` and `FetchOffset` in the request, this field indicates the largest epoch and its end offset such that subsequent records are known to diverge. + + + + + + + + +
FieldDescription
epochThe largest epoch.
end_offsetThe end offset of the epoch.
_tagged_fieldsThe tagged fields
+
1current_leaderThe current leader of the partition. + + + + + + + + +
FieldDescription
leader_idThe ID of the current leader or -1 if the leader is unknown.
leader_epochThe latest known leader epoch.
_tagged_fieldsThe tagged fields
+
2snapshot_idIn the case of fetching an offset less than the LogStartOffset, this is the end offset and epoch that should be used in the FetchSnapshot request. + + + + + + + + +
FieldDescription
end_offsetThe end offset of the epoch.
epochThe largest epoch.
_tagged_fieldsThe tagged fields
+
+
_tagged_fieldsThe tagged fields
_tagged_fields + + + + + +
TagTagged fieldDescription
0node_endpointsEndpoints for all current-leaders enumerated in PartitionData, with errors NOT_LEADER_OR_FOLLOWER & FENCED_LEADER_EPOCH. + + + + + + + + + + + + +
FieldDescription
node_idThe ID of the associated node.
hostThe node's hostname.
portThe node's port.
rackThe rack of the node, or null if it has not been assigned to a rack.
_tagged_fieldsThe tagged fields
+
+
+
+
Fetch Response (Version: 17) => throttle_time_ms error_code session_id [responses] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  session_id => INT32
+  responses => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition_index error_code high_watermark last_stable_offset log_start_offset [aborted_transactions] preferred_read_replica records _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+      high_watermark => INT64
+      last_stable_offset => INT64
+      log_start_offset => INT64
+      aborted_transactions => producer_id first_offset _tagged_fields 
+        producer_id => INT64
+        first_offset => INT64
+      preferred_read_replica => INT32
+      records => COMPACT_RECORDS
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top level response error code.
session_idThe fetch session ID, or 0 if this is not part of a fetch session.
responsesThe response topics.
topic_idThe unique topic ID.
partitionsThe topic partitions.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no fetch error.
high_watermarkThe current high water mark.
last_stable_offsetThe last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED).
log_start_offsetThe current log start offset.
aborted_transactionsThe aborted transactions.
producer_idThe producer id associated with the aborted transaction.
first_offsetThe first offset in the aborted transaction.
_tagged_fieldsThe tagged fields
preferred_read_replicaThe preferred read replica for the consumer to use on its next fetch request.
recordsThe record data.
_tagged_fields + + + + + + + + + +
TagTagged fieldDescription
0diverging_epochIn case divergence is detected based on the `LastFetchedEpoch` and `FetchOffset` in the request, this field indicates the largest epoch and its end offset such that subsequent records are known to diverge. + + + + + + + + +
FieldDescription
epochThe largest epoch.
end_offsetThe end offset of the epoch.
_tagged_fieldsThe tagged fields
+
1current_leaderThe current leader of the partition. + + + + + + + + +
FieldDescription
leader_idThe ID of the current leader or -1 if the leader is unknown.
leader_epochThe latest known leader epoch.
_tagged_fieldsThe tagged fields
+
2snapshot_idIn the case of fetching an offset less than the LogStartOffset, this is the end offset and epoch that should be used in the FetchSnapshot request. + + + + + + + + +
FieldDescription
end_offsetThe end offset of the epoch.
epochThe largest epoch.
_tagged_fieldsThe tagged fields
+
+
_tagged_fieldsThe tagged fields
_tagged_fields + + + + + +
TagTagged fieldDescription
0node_endpointsEndpoints for all current-leaders enumerated in PartitionData, with errors NOT_LEADER_OR_FOLLOWER & FENCED_LEADER_EPOCH. + + + + + + + + + + + + +
FieldDescription
node_idThe ID of the associated node.
hostThe node's hostname.
portThe node's port.
rackThe rack of the node, or null if it has not been assigned to a rack.
_tagged_fieldsThe tagged fields
+
+
+
+
Fetch Response (Version: 18) => throttle_time_ms error_code session_id [responses] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  session_id => INT32
+  responses => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition_index error_code high_watermark last_stable_offset log_start_offset [aborted_transactions] preferred_read_replica records _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+      high_watermark => INT64
+      last_stable_offset => INT64
+      log_start_offset => INT64
+      aborted_transactions => producer_id first_offset _tagged_fields 
+        producer_id => INT64
+        first_offset => INT64
+      preferred_read_replica => INT32
+      records => COMPACT_RECORDS
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top level response error code.
session_idThe fetch session ID, or 0 if this is not part of a fetch session.
responsesThe response topics.
topic_idThe unique topic ID.
partitionsThe topic partitions.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no fetch error.
high_watermarkThe current high water mark.
last_stable_offsetThe last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED).
log_start_offsetThe current log start offset.
aborted_transactionsThe aborted transactions.
producer_idThe producer id associated with the aborted transaction.
first_offsetThe first offset in the aborted transaction.
_tagged_fieldsThe tagged fields
preferred_read_replicaThe preferred read replica for the consumer to use on its next fetch request.
recordsThe record data.
_tagged_fields + + + + + + + + + +
TagTagged fieldDescription
0diverging_epochIn case divergence is detected based on the `LastFetchedEpoch` and `FetchOffset` in the request, this field indicates the largest epoch and its end offset such that subsequent records are known to diverge. + + + + + + + + +
FieldDescription
epochThe largest epoch.
end_offsetThe end offset of the epoch.
_tagged_fieldsThe tagged fields
+
1current_leaderThe current leader of the partition. + + + + + + + + +
FieldDescription
leader_idThe ID of the current leader or -1 if the leader is unknown.
leader_epochThe latest known leader epoch.
_tagged_fieldsThe tagged fields
+
2snapshot_idIn the case of fetching an offset less than the LogStartOffset, this is the end offset and epoch that should be used in the FetchSnapshot request. + + + + + + + + +
FieldDescription
end_offsetThe end offset of the epoch.
epochThe largest epoch.
_tagged_fieldsThe tagged fields
+
+
_tagged_fieldsThe tagged fields
_tagged_fields + + + + + +
TagTagged fieldDescription
0node_endpointsEndpoints for all current-leaders enumerated in PartitionData, with errors NOT_LEADER_OR_FOLLOWER & FENCED_LEADER_EPOCH. + + + + + + + + + + + + +
FieldDescription
node_idThe ID of the associated node.
hostThe node's hostname.
portThe node's port.
rackThe rack of the node, or null if it has not been assigned to a rack.
_tagged_fieldsThe tagged fields
+
+
+
+
ListOffsets API (Key: 2):
+ +Requests:
+
ListOffsets Request (Version: 1) => replica_id [topics] 
+  replica_id => INT32
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index timestamp 
+      partition_index => INT32
+      timestamp => INT64
+

Request header version: 1

+ + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the requester, or -1 if this request is being made by a normal consumer.
topicsEach topic in the request.
nameThe topic name.
partitionsEach partition in the request.
partition_indexThe partition index.
timestampThe current timestamp.
+
+
ListOffsets Request (Version: 2) => replica_id isolation_level [topics] 
+  replica_id => INT32
+  isolation_level => INT8
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index timestamp 
+      partition_index => INT32
+      timestamp => INT64
+

Request header version: 1

+ + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the requester, or -1 if this request is being made by a normal consumer.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
topicsEach topic in the request.
nameThe topic name.
partitionsEach partition in the request.
partition_indexThe partition index.
timestampThe current timestamp.
+
+
ListOffsets Request (Version: 3) => replica_id isolation_level [topics] 
+  replica_id => INT32
+  isolation_level => INT8
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index timestamp 
+      partition_index => INT32
+      timestamp => INT64
+

Request header version: 1

+ + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the requester, or -1 if this request is being made by a normal consumer.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
topicsEach topic in the request.
nameThe topic name.
partitionsEach partition in the request.
partition_indexThe partition index.
timestampThe current timestamp.
+
+
ListOffsets Request (Version: 4) => replica_id isolation_level [topics] 
+  replica_id => INT32
+  isolation_level => INT8
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index current_leader_epoch timestamp 
+      partition_index => INT32
+      current_leader_epoch => INT32
+      timestamp => INT64
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the requester, or -1 if this request is being made by a normal consumer.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
topicsEach topic in the request.
nameThe topic name.
partitionsEach partition in the request.
partition_indexThe partition index.
current_leader_epochThe current leader epoch.
timestampThe current timestamp.
+
+
ListOffsets Request (Version: 5) => replica_id isolation_level [topics] 
+  replica_id => INT32
+  isolation_level => INT8
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index current_leader_epoch timestamp 
+      partition_index => INT32
+      current_leader_epoch => INT32
+      timestamp => INT64
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the requester, or -1 if this request is being made by a normal consumer.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
topicsEach topic in the request.
nameThe topic name.
partitionsEach partition in the request.
partition_indexThe partition index.
current_leader_epochThe current leader epoch.
timestampThe current timestamp.
+
+
ListOffsets Request (Version: 6) => replica_id isolation_level [topics] _tagged_fields 
+  replica_id => INT32
+  isolation_level => INT8
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index current_leader_epoch timestamp _tagged_fields 
+      partition_index => INT32
+      current_leader_epoch => INT32
+      timestamp => INT64
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the requester, or -1 if this request is being made by a normal consumer.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
topicsEach topic in the request.
nameThe topic name.
partitionsEach partition in the request.
partition_indexThe partition index.
current_leader_epochThe current leader epoch.
timestampThe current timestamp.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ListOffsets Request (Version: 7) => replica_id isolation_level [topics] _tagged_fields 
+  replica_id => INT32
+  isolation_level => INT8
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index current_leader_epoch timestamp _tagged_fields 
+      partition_index => INT32
+      current_leader_epoch => INT32
+      timestamp => INT64
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the requester, or -1 if this request is being made by a normal consumer.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
topicsEach topic in the request.
nameThe topic name.
partitionsEach partition in the request.
partition_indexThe partition index.
current_leader_epochThe current leader epoch.
timestampThe current timestamp.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ListOffsets Request (Version: 8) => replica_id isolation_level [topics] _tagged_fields 
+  replica_id => INT32
+  isolation_level => INT8
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index current_leader_epoch timestamp _tagged_fields 
+      partition_index => INT32
+      current_leader_epoch => INT32
+      timestamp => INT64
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the requester, or -1 if this request is being made by a normal consumer.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
topicsEach topic in the request.
nameThe topic name.
partitionsEach partition in the request.
partition_indexThe partition index.
current_leader_epochThe current leader epoch.
timestampThe current timestamp.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ListOffsets Request (Version: 9) => replica_id isolation_level [topics] _tagged_fields 
+  replica_id => INT32
+  isolation_level => INT8
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index current_leader_epoch timestamp _tagged_fields 
+      partition_index => INT32
+      current_leader_epoch => INT32
+      timestamp => INT64
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the requester, or -1 if this request is being made by a normal consumer.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
topicsEach topic in the request.
nameThe topic name.
partitionsEach partition in the request.
partition_indexThe partition index.
current_leader_epochThe current leader epoch.
timestampThe current timestamp.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ListOffsets Request (Version: 10) => replica_id isolation_level [topics] timeout_ms _tagged_fields 
+  replica_id => INT32
+  isolation_level => INT8
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index current_leader_epoch timestamp _tagged_fields 
+      partition_index => INT32
+      current_leader_epoch => INT32
+      timestamp => INT64
+  timeout_ms => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the requester, or -1 if this request is being made by a normal consumer.
isolation_levelThis setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records.
topicsEach topic in the request.
nameThe topic name.
partitionsEach partition in the request.
partition_indexThe partition index.
current_leader_epochThe current leader epoch.
timestampThe current timestamp.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
timeout_msThe timeout to await a response in milliseconds for requests that require reading from remote storage for topics enabled with tiered storage.
_tagged_fieldsThe tagged fields
+
+Responses:
+
ListOffsets Response (Version: 1) => [topics] 
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index error_code timestamp offset 
+      partition_index => INT32
+      error_code => INT16
+      timestamp => INT64
+      offset => INT64
+

Response header version: 0

+ + + + + + + + + + + + + + + + + +
FieldDescription
topicsEach topic in the response.
nameThe topic name.
partitionsEach partition in the response.
partition_indexThe partition index.
error_codeThe partition error code, or 0 if there was no error.
timestampThe timestamp associated with the returned offset.
offsetThe returned offset.
+
+
ListOffsets Response (Version: 2) => throttle_time_ms [topics] 
+  throttle_time_ms => INT32
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index error_code timestamp offset 
+      partition_index => INT32
+      error_code => INT16
+      timestamp => INT64
+      offset => INT64
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsEach topic in the response.
nameThe topic name.
partitionsEach partition in the response.
partition_indexThe partition index.
error_codeThe partition error code, or 0 if there was no error.
timestampThe timestamp associated with the returned offset.
offsetThe returned offset.
+
+
ListOffsets Response (Version: 3) => throttle_time_ms [topics] 
+  throttle_time_ms => INT32
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index error_code timestamp offset 
+      partition_index => INT32
+      error_code => INT16
+      timestamp => INT64
+      offset => INT64
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsEach topic in the response.
nameThe topic name.
partitionsEach partition in the response.
partition_indexThe partition index.
error_codeThe partition error code, or 0 if there was no error.
timestampThe timestamp associated with the returned offset.
offsetThe returned offset.
+
+
ListOffsets Response (Version: 4) => throttle_time_ms [topics] 
+  throttle_time_ms => INT32
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index error_code timestamp offset leader_epoch 
+      partition_index => INT32
+      error_code => INT16
+      timestamp => INT64
+      offset => INT64
+      leader_epoch => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsEach topic in the response.
nameThe topic name.
partitionsEach partition in the response.
partition_indexThe partition index.
error_codeThe partition error code, or 0 if there was no error.
timestampThe timestamp associated with the returned offset.
offsetThe returned offset.
leader_epochThe leader epoch associated with the returned offset.
+
+
ListOffsets Response (Version: 5) => throttle_time_ms [topics] 
+  throttle_time_ms => INT32
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index error_code timestamp offset leader_epoch 
+      partition_index => INT32
+      error_code => INT16
+      timestamp => INT64
+      offset => INT64
+      leader_epoch => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsEach topic in the response.
nameThe topic name.
partitionsEach partition in the response.
partition_indexThe partition index.
error_codeThe partition error code, or 0 if there was no error.
timestampThe timestamp associated with the returned offset.
offsetThe returned offset.
leader_epochThe leader epoch associated with the returned offset.
+
+
ListOffsets Response (Version: 6) => throttle_time_ms [topics] _tagged_fields 
+  throttle_time_ms => INT32
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index error_code timestamp offset leader_epoch _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+      timestamp => INT64
+      offset => INT64
+      leader_epoch => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsEach topic in the response.
nameThe topic name.
partitionsEach partition in the response.
partition_indexThe partition index.
error_codeThe partition error code, or 0 if there was no error.
timestampThe timestamp associated with the returned offset.
offsetThe returned offset.
leader_epochThe leader epoch associated with the returned offset.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ListOffsets Response (Version: 7) => throttle_time_ms [topics] _tagged_fields 
+  throttle_time_ms => INT32
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index error_code timestamp offset leader_epoch _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+      timestamp => INT64
+      offset => INT64
+      leader_epoch => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsEach topic in the response.
nameThe topic name.
partitionsEach partition in the response.
partition_indexThe partition index.
error_codeThe partition error code, or 0 if there was no error.
timestampThe timestamp associated with the returned offset.
offsetThe returned offset.
leader_epochThe leader epoch associated with the returned offset.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ListOffsets Response (Version: 8) => throttle_time_ms [topics] _tagged_fields 
+  throttle_time_ms => INT32
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index error_code timestamp offset leader_epoch _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+      timestamp => INT64
+      offset => INT64
+      leader_epoch => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsEach topic in the response.
nameThe topic name.
partitionsEach partition in the response.
partition_indexThe partition index.
error_codeThe partition error code, or 0 if there was no error.
timestampThe timestamp associated with the returned offset.
offsetThe returned offset.
leader_epochThe leader epoch associated with the returned offset.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ListOffsets Response (Version: 9) => throttle_time_ms [topics] _tagged_fields 
+  throttle_time_ms => INT32
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index error_code timestamp offset leader_epoch _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+      timestamp => INT64
+      offset => INT64
+      leader_epoch => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsEach topic in the response.
nameThe topic name.
partitionsEach partition in the response.
partition_indexThe partition index.
error_codeThe partition error code, or 0 if there was no error.
timestampThe timestamp associated with the returned offset.
offsetThe returned offset.
leader_epochThe leader epoch associated with the returned offset.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ListOffsets Response (Version: 10) => throttle_time_ms [topics] _tagged_fields 
+  throttle_time_ms => INT32
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index error_code timestamp offset leader_epoch _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+      timestamp => INT64
+      offset => INT64
+      leader_epoch => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsEach topic in the response.
nameThe topic name.
partitionsEach partition in the response.
partition_indexThe partition index.
error_codeThe partition error code, or 0 if there was no error.
timestampThe timestamp associated with the returned offset.
offsetThe returned offset.
leader_epochThe leader epoch associated with the returned offset.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
Metadata API (Key: 3):
+ +Requests:
+
Metadata Request (Version: 0) => [topics] 
+  topics => name 
+    name => STRING
+

Request header version: 1

+ + + + + + + +
FieldDescription
topicsThe topics to fetch metadata for.
nameThe topic name.
+
+
Metadata Request (Version: 1) => [topics] 
+  topics => name 
+    name => STRING
+

Request header version: 1

+ + + + + + + +
FieldDescription
topicsThe topics to fetch metadata for.
nameThe topic name.
+
+
Metadata Request (Version: 2) => [topics] 
+  topics => name 
+    name => STRING
+

Request header version: 1

+ + + + + + + +
FieldDescription
topicsThe topics to fetch metadata for.
nameThe topic name.
+
+
Metadata Request (Version: 3) => [topics] 
+  topics => name 
+    name => STRING
+

Request header version: 1

+ + + + + + + +
FieldDescription
topicsThe topics to fetch metadata for.
nameThe topic name.
+
+
Metadata Request (Version: 4) => [topics] allow_auto_topic_creation 
+  topics => name 
+    name => STRING
+  allow_auto_topic_creation => BOOLEAN
+

Request header version: 1

+ + + + + + + + + +
FieldDescription
topicsThe topics to fetch metadata for.
nameThe topic name.
allow_auto_topic_creationIf this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so.
+
+
Metadata Request (Version: 5) => [topics] allow_auto_topic_creation 
+  topics => name 
+    name => STRING
+  allow_auto_topic_creation => BOOLEAN
+

Request header version: 1

+ + + + + + + + + +
FieldDescription
topicsThe topics to fetch metadata for.
nameThe topic name.
allow_auto_topic_creationIf this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so.
+
+
Metadata Request (Version: 6) => [topics] allow_auto_topic_creation 
+  topics => name 
+    name => STRING
+  allow_auto_topic_creation => BOOLEAN
+

Request header version: 1

+ + + + + + + + + +
FieldDescription
topicsThe topics to fetch metadata for.
nameThe topic name.
allow_auto_topic_creationIf this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so.
+
+
Metadata Request (Version: 7) => [topics] allow_auto_topic_creation 
+  topics => name 
+    name => STRING
+  allow_auto_topic_creation => BOOLEAN
+

Request header version: 1

+ + + + + + + + + +
FieldDescription
topicsThe topics to fetch metadata for.
nameThe topic name.
allow_auto_topic_creationIf this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so.
+
+
Metadata Request (Version: 8) => [topics] allow_auto_topic_creation include_cluster_authorized_operations include_topic_authorized_operations 
+  topics => name 
+    name => STRING
+  allow_auto_topic_creation => BOOLEAN
+  include_cluster_authorized_operations => BOOLEAN
+  include_topic_authorized_operations => BOOLEAN
+

Request header version: 1

+ + + + + + + + + + + + + +
FieldDescription
topicsThe topics to fetch metadata for.
nameThe topic name.
allow_auto_topic_creationIf this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so.
include_cluster_authorized_operationsWhether to include cluster authorized operations.
include_topic_authorized_operationsWhether to include topic authorized operations.
+
+
Metadata Request (Version: 9) => [topics] allow_auto_topic_creation include_cluster_authorized_operations include_topic_authorized_operations _tagged_fields 
+  topics => name _tagged_fields 
+    name => COMPACT_STRING
+  allow_auto_topic_creation => BOOLEAN
+  include_cluster_authorized_operations => BOOLEAN
+  include_topic_authorized_operations => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + + + + + +
FieldDescription
topicsThe topics to fetch metadata for.
nameThe topic name.
_tagged_fieldsThe tagged fields
allow_auto_topic_creationIf this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so.
include_cluster_authorized_operationsWhether to include cluster authorized operations.
include_topic_authorized_operationsWhether to include topic authorized operations.
_tagged_fieldsThe tagged fields
+
+
Metadata Request (Version: 10) => [topics] allow_auto_topic_creation include_cluster_authorized_operations include_topic_authorized_operations _tagged_fields 
+  topics => topic_id name _tagged_fields 
+    topic_id => UUID
+    name => COMPACT_NULLABLE_STRING
+  allow_auto_topic_creation => BOOLEAN
+  include_cluster_authorized_operations => BOOLEAN
+  include_topic_authorized_operations => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
topicsThe topics to fetch metadata for.
topic_idThe topic id.
nameThe topic name.
_tagged_fieldsThe tagged fields
allow_auto_topic_creationIf this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so.
include_cluster_authorized_operationsWhether to include cluster authorized operations.
include_topic_authorized_operationsWhether to include topic authorized operations.
_tagged_fieldsThe tagged fields
+
+
Metadata Request (Version: 11) => [topics] allow_auto_topic_creation include_topic_authorized_operations _tagged_fields 
+  topics => topic_id name _tagged_fields 
+    topic_id => UUID
+    name => COMPACT_NULLABLE_STRING
+  allow_auto_topic_creation => BOOLEAN
+  include_topic_authorized_operations => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + + + + + +
FieldDescription
topicsThe topics to fetch metadata for.
topic_idThe topic id.
nameThe topic name.
_tagged_fieldsThe tagged fields
allow_auto_topic_creationIf this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so.
include_topic_authorized_operationsWhether to include topic authorized operations.
_tagged_fieldsThe tagged fields
+
+
Metadata Request (Version: 12) => [topics] allow_auto_topic_creation include_topic_authorized_operations _tagged_fields 
+  topics => topic_id name _tagged_fields 
+    topic_id => UUID
+    name => COMPACT_NULLABLE_STRING
+  allow_auto_topic_creation => BOOLEAN
+  include_topic_authorized_operations => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + + + + + +
FieldDescription
topicsThe topics to fetch metadata for.
topic_idThe topic id.
nameThe topic name.
_tagged_fieldsThe tagged fields
allow_auto_topic_creationIf this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so.
include_topic_authorized_operationsWhether to include topic authorized operations.
_tagged_fieldsThe tagged fields
+
+
Metadata Request (Version: 13) => [topics] allow_auto_topic_creation include_topic_authorized_operations _tagged_fields 
+  topics => topic_id name _tagged_fields 
+    topic_id => UUID
+    name => COMPACT_NULLABLE_STRING
+  allow_auto_topic_creation => BOOLEAN
+  include_topic_authorized_operations => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + + + + + +
FieldDescription
topicsThe topics to fetch metadata for.
topic_idThe topic id.
nameThe topic name.
_tagged_fieldsThe tagged fields
allow_auto_topic_creationIf this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so.
include_topic_authorized_operationsWhether to include topic authorized operations.
_tagged_fieldsThe tagged fields
+
+Responses:
+
Metadata Response (Version: 0) => [brokers] [topics] 
+  brokers => node_id host port 
+    node_id => INT32
+    host => STRING
+    port => INT32
+  topics => error_code name [partitions] 
+    error_code => INT16
+    name => STRING
+    partitions => error_code partition_index leader_id [replica_nodes] [isr_nodes] 
+      error_code => INT16
+      partition_index => INT32
+      leader_id => INT32
+      replica_nodes => INT32
+      isr_nodes => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
brokersA list of brokers present in the cluster.
node_idThe broker ID.
hostThe broker hostname.
portThe broker port.
topicsEach topic in the response.
error_codeThe topic error, or 0 if there was no error.
nameThe topic name. Null for non-existing topics queried by ID. This is never null when ErrorCode is zero. One of Name and TopicId is always populated.
partitionsEach partition in the topic.
error_codeThe partition error, or 0 if there was no error.
partition_indexThe partition index.
leader_idThe ID of the leader broker.
replica_nodesThe set of all nodes that host this partition.
isr_nodesThe set of nodes that are in sync with the leader for this partition.
+
+
Metadata Response (Version: 1) => [brokers] controller_id [topics] 
+  brokers => node_id host port rack 
+    node_id => INT32
+    host => STRING
+    port => INT32
+    rack => NULLABLE_STRING
+  controller_id => INT32
+  topics => error_code name is_internal [partitions] 
+    error_code => INT16
+    name => STRING
+    is_internal => BOOLEAN
+    partitions => error_code partition_index leader_id [replica_nodes] [isr_nodes] 
+      error_code => INT16
+      partition_index => INT32
+      leader_id => INT32
+      replica_nodes => INT32
+      isr_nodes => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
brokersA list of brokers present in the cluster.
node_idThe broker ID.
hostThe broker hostname.
portThe broker port.
rackThe rack of the broker, or null if it has not been assigned to a rack.
controller_idThe ID of the controller broker.
topicsEach topic in the response.
error_codeThe topic error, or 0 if there was no error.
nameThe topic name. Null for non-existing topics queried by ID. This is never null when ErrorCode is zero. One of Name and TopicId is always populated.
is_internalTrue if the topic is internal.
partitionsEach partition in the topic.
error_codeThe partition error, or 0 if there was no error.
partition_indexThe partition index.
leader_idThe ID of the leader broker.
replica_nodesThe set of all nodes that host this partition.
isr_nodesThe set of nodes that are in sync with the leader for this partition.
+
+
Metadata Response (Version: 2) => [brokers] cluster_id controller_id [topics] 
+  brokers => node_id host port rack 
+    node_id => INT32
+    host => STRING
+    port => INT32
+    rack => NULLABLE_STRING
+  cluster_id => NULLABLE_STRING
+  controller_id => INT32
+  topics => error_code name is_internal [partitions] 
+    error_code => INT16
+    name => STRING
+    is_internal => BOOLEAN
+    partitions => error_code partition_index leader_id [replica_nodes] [isr_nodes] 
+      error_code => INT16
+      partition_index => INT32
+      leader_id => INT32
+      replica_nodes => INT32
+      isr_nodes => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
brokersA list of brokers present in the cluster.
node_idThe broker ID.
hostThe broker hostname.
portThe broker port.
rackThe rack of the broker, or null if it has not been assigned to a rack.
cluster_idThe cluster ID that responding broker belongs to.
controller_idThe ID of the controller broker.
topicsEach topic in the response.
error_codeThe topic error, or 0 if there was no error.
nameThe topic name. Null for non-existing topics queried by ID. This is never null when ErrorCode is zero. One of Name and TopicId is always populated.
is_internalTrue if the topic is internal.
partitionsEach partition in the topic.
error_codeThe partition error, or 0 if there was no error.
partition_indexThe partition index.
leader_idThe ID of the leader broker.
replica_nodesThe set of all nodes that host this partition.
isr_nodesThe set of nodes that are in sync with the leader for this partition.
+
+
Metadata Response (Version: 3) => throttle_time_ms [brokers] cluster_id controller_id [topics] 
+  throttle_time_ms => INT32
+  brokers => node_id host port rack 
+    node_id => INT32
+    host => STRING
+    port => INT32
+    rack => NULLABLE_STRING
+  cluster_id => NULLABLE_STRING
+  controller_id => INT32
+  topics => error_code name is_internal [partitions] 
+    error_code => INT16
+    name => STRING
+    is_internal => BOOLEAN
+    partitions => error_code partition_index leader_id [replica_nodes] [isr_nodes] 
+      error_code => INT16
+      partition_index => INT32
+      leader_id => INT32
+      replica_nodes => INT32
+      isr_nodes => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
brokersA list of brokers present in the cluster.
node_idThe broker ID.
hostThe broker hostname.
portThe broker port.
rackThe rack of the broker, or null if it has not been assigned to a rack.
cluster_idThe cluster ID that responding broker belongs to.
controller_idThe ID of the controller broker.
topicsEach topic in the response.
error_codeThe topic error, or 0 if there was no error.
nameThe topic name. Null for non-existing topics queried by ID. This is never null when ErrorCode is zero. One of Name and TopicId is always populated.
is_internalTrue if the topic is internal.
partitionsEach partition in the topic.
error_codeThe partition error, or 0 if there was no error.
partition_indexThe partition index.
leader_idThe ID of the leader broker.
replica_nodesThe set of all nodes that host this partition.
isr_nodesThe set of nodes that are in sync with the leader for this partition.
+
+
Metadata Response (Version: 4) => throttle_time_ms [brokers] cluster_id controller_id [topics] 
+  throttle_time_ms => INT32
+  brokers => node_id host port rack 
+    node_id => INT32
+    host => STRING
+    port => INT32
+    rack => NULLABLE_STRING
+  cluster_id => NULLABLE_STRING
+  controller_id => INT32
+  topics => error_code name is_internal [partitions] 
+    error_code => INT16
+    name => STRING
+    is_internal => BOOLEAN
+    partitions => error_code partition_index leader_id [replica_nodes] [isr_nodes] 
+      error_code => INT16
+      partition_index => INT32
+      leader_id => INT32
+      replica_nodes => INT32
+      isr_nodes => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
brokersA list of brokers present in the cluster.
node_idThe broker ID.
hostThe broker hostname.
portThe broker port.
rackThe rack of the broker, or null if it has not been assigned to a rack.
cluster_idThe cluster ID that responding broker belongs to.
controller_idThe ID of the controller broker.
topicsEach topic in the response.
error_codeThe topic error, or 0 if there was no error.
nameThe topic name. Null for non-existing topics queried by ID. This is never null when ErrorCode is zero. One of Name and TopicId is always populated.
is_internalTrue if the topic is internal.
partitionsEach partition in the topic.
error_codeThe partition error, or 0 if there was no error.
partition_indexThe partition index.
leader_idThe ID of the leader broker.
replica_nodesThe set of all nodes that host this partition.
isr_nodesThe set of nodes that are in sync with the leader for this partition.
+
+
Metadata Response (Version: 5) => throttle_time_ms [brokers] cluster_id controller_id [topics] 
+  throttle_time_ms => INT32
+  brokers => node_id host port rack 
+    node_id => INT32
+    host => STRING
+    port => INT32
+    rack => NULLABLE_STRING
+  cluster_id => NULLABLE_STRING
+  controller_id => INT32
+  topics => error_code name is_internal [partitions] 
+    error_code => INT16
+    name => STRING
+    is_internal => BOOLEAN
+    partitions => error_code partition_index leader_id [replica_nodes] [isr_nodes] [offline_replicas] 
+      error_code => INT16
+      partition_index => INT32
+      leader_id => INT32
+      replica_nodes => INT32
+      isr_nodes => INT32
+      offline_replicas => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
brokersA list of brokers present in the cluster.
node_idThe broker ID.
hostThe broker hostname.
portThe broker port.
rackThe rack of the broker, or null if it has not been assigned to a rack.
cluster_idThe cluster ID that responding broker belongs to.
controller_idThe ID of the controller broker.
topicsEach topic in the response.
error_codeThe topic error, or 0 if there was no error.
nameThe topic name. Null for non-existing topics queried by ID. This is never null when ErrorCode is zero. One of Name and TopicId is always populated.
is_internalTrue if the topic is internal.
partitionsEach partition in the topic.
error_codeThe partition error, or 0 if there was no error.
partition_indexThe partition index.
leader_idThe ID of the leader broker.
replica_nodesThe set of all nodes that host this partition.
isr_nodesThe set of nodes that are in sync with the leader for this partition.
offline_replicasThe set of offline replicas of this partition.
+
+
Metadata Response (Version: 6) => throttle_time_ms [brokers] cluster_id controller_id [topics] 
+  throttle_time_ms => INT32
+  brokers => node_id host port rack 
+    node_id => INT32
+    host => STRING
+    port => INT32
+    rack => NULLABLE_STRING
+  cluster_id => NULLABLE_STRING
+  controller_id => INT32
+  topics => error_code name is_internal [partitions] 
+    error_code => INT16
+    name => STRING
+    is_internal => BOOLEAN
+    partitions => error_code partition_index leader_id [replica_nodes] [isr_nodes] [offline_replicas] 
+      error_code => INT16
+      partition_index => INT32
+      leader_id => INT32
+      replica_nodes => INT32
+      isr_nodes => INT32
+      offline_replicas => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
brokersA list of brokers present in the cluster.
node_idThe broker ID.
hostThe broker hostname.
portThe broker port.
rackThe rack of the broker, or null if it has not been assigned to a rack.
cluster_idThe cluster ID that responding broker belongs to.
controller_idThe ID of the controller broker.
topicsEach topic in the response.
error_codeThe topic error, or 0 if there was no error.
nameThe topic name. Null for non-existing topics queried by ID. This is never null when ErrorCode is zero. One of Name and TopicId is always populated.
is_internalTrue if the topic is internal.
partitionsEach partition in the topic.
error_codeThe partition error, or 0 if there was no error.
partition_indexThe partition index.
leader_idThe ID of the leader broker.
replica_nodesThe set of all nodes that host this partition.
isr_nodesThe set of nodes that are in sync with the leader for this partition.
offline_replicasThe set of offline replicas of this partition.
+
+
Metadata Response (Version: 7) => throttle_time_ms [brokers] cluster_id controller_id [topics] 
+  throttle_time_ms => INT32
+  brokers => node_id host port rack 
+    node_id => INT32
+    host => STRING
+    port => INT32
+    rack => NULLABLE_STRING
+  cluster_id => NULLABLE_STRING
+  controller_id => INT32
+  topics => error_code name is_internal [partitions] 
+    error_code => INT16
+    name => STRING
+    is_internal => BOOLEAN
+    partitions => error_code partition_index leader_id leader_epoch [replica_nodes] [isr_nodes] [offline_replicas] 
+      error_code => INT16
+      partition_index => INT32
+      leader_id => INT32
+      leader_epoch => INT32
+      replica_nodes => INT32
+      isr_nodes => INT32
+      offline_replicas => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
brokersA list of brokers present in the cluster.
node_idThe broker ID.
hostThe broker hostname.
portThe broker port.
rackThe rack of the broker, or null if it has not been assigned to a rack.
cluster_idThe cluster ID that responding broker belongs to.
controller_idThe ID of the controller broker.
topicsEach topic in the response.
error_codeThe topic error, or 0 if there was no error.
nameThe topic name. Null for non-existing topics queried by ID. This is never null when ErrorCode is zero. One of Name and TopicId is always populated.
is_internalTrue if the topic is internal.
partitionsEach partition in the topic.
error_codeThe partition error, or 0 if there was no error.
partition_indexThe partition index.
leader_idThe ID of the leader broker.
leader_epochThe leader epoch of this partition.
replica_nodesThe set of all nodes that host this partition.
isr_nodesThe set of nodes that are in sync with the leader for this partition.
offline_replicasThe set of offline replicas of this partition.
+
+
Metadata Response (Version: 8) => throttle_time_ms [brokers] cluster_id controller_id [topics] cluster_authorized_operations 
+  throttle_time_ms => INT32
+  brokers => node_id host port rack 
+    node_id => INT32
+    host => STRING
+    port => INT32
+    rack => NULLABLE_STRING
+  cluster_id => NULLABLE_STRING
+  controller_id => INT32
+  topics => error_code name is_internal [partitions] topic_authorized_operations 
+    error_code => INT16
+    name => STRING
+    is_internal => BOOLEAN
+    partitions => error_code partition_index leader_id leader_epoch [replica_nodes] [isr_nodes] [offline_replicas] 
+      error_code => INT16
+      partition_index => INT32
+      leader_id => INT32
+      leader_epoch => INT32
+      replica_nodes => INT32
+      isr_nodes => INT32
+      offline_replicas => INT32
+    topic_authorized_operations => INT32
+  cluster_authorized_operations => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
brokersA list of brokers present in the cluster.
node_idThe broker ID.
hostThe broker hostname.
portThe broker port.
rackThe rack of the broker, or null if it has not been assigned to a rack.
cluster_idThe cluster ID that responding broker belongs to.
controller_idThe ID of the controller broker.
topicsEach topic in the response.
error_codeThe topic error, or 0 if there was no error.
nameThe topic name. Null for non-existing topics queried by ID. This is never null when ErrorCode is zero. One of Name and TopicId is always populated.
is_internalTrue if the topic is internal.
partitionsEach partition in the topic.
error_codeThe partition error, or 0 if there was no error.
partition_indexThe partition index.
leader_idThe ID of the leader broker.
leader_epochThe leader epoch of this partition.
replica_nodesThe set of all nodes that host this partition.
isr_nodesThe set of nodes that are in sync with the leader for this partition.
offline_replicasThe set of offline replicas of this partition.
topic_authorized_operations32-bit bitfield to represent authorized operations for this topic.
cluster_authorized_operations32-bit bitfield to represent authorized operations for this cluster.
+
+
Metadata Response (Version: 9) => throttle_time_ms [brokers] cluster_id controller_id [topics] cluster_authorized_operations _tagged_fields 
+  throttle_time_ms => INT32
+  brokers => node_id host port rack _tagged_fields 
+    node_id => INT32
+    host => COMPACT_STRING
+    port => INT32
+    rack => COMPACT_NULLABLE_STRING
+  cluster_id => COMPACT_NULLABLE_STRING
+  controller_id => INT32
+  topics => error_code name is_internal [partitions] topic_authorized_operations _tagged_fields 
+    error_code => INT16
+    name => COMPACT_STRING
+    is_internal => BOOLEAN
+    partitions => error_code partition_index leader_id leader_epoch [replica_nodes] [isr_nodes] [offline_replicas] _tagged_fields 
+      error_code => INT16
+      partition_index => INT32
+      leader_id => INT32
+      leader_epoch => INT32
+      replica_nodes => INT32
+      isr_nodes => INT32
+      offline_replicas => INT32
+    topic_authorized_operations => INT32
+  cluster_authorized_operations => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
brokersA list of brokers present in the cluster.
node_idThe broker ID.
hostThe broker hostname.
portThe broker port.
rackThe rack of the broker, or null if it has not been assigned to a rack.
_tagged_fieldsThe tagged fields
cluster_idThe cluster ID that responding broker belongs to.
controller_idThe ID of the controller broker.
topicsEach topic in the response.
error_codeThe topic error, or 0 if there was no error.
nameThe topic name. Null for non-existing topics queried by ID. This is never null when ErrorCode is zero. One of Name and TopicId is always populated.
is_internalTrue if the topic is internal.
partitionsEach partition in the topic.
error_codeThe partition error, or 0 if there was no error.
partition_indexThe partition index.
leader_idThe ID of the leader broker.
leader_epochThe leader epoch of this partition.
replica_nodesThe set of all nodes that host this partition.
isr_nodesThe set of nodes that are in sync with the leader for this partition.
offline_replicasThe set of offline replicas of this partition.
_tagged_fieldsThe tagged fields
topic_authorized_operations32-bit bitfield to represent authorized operations for this topic.
_tagged_fieldsThe tagged fields
cluster_authorized_operations32-bit bitfield to represent authorized operations for this cluster.
_tagged_fieldsThe tagged fields
+
+
Metadata Response (Version: 10) => throttle_time_ms [brokers] cluster_id controller_id [topics] cluster_authorized_operations _tagged_fields 
+  throttle_time_ms => INT32
+  brokers => node_id host port rack _tagged_fields 
+    node_id => INT32
+    host => COMPACT_STRING
+    port => INT32
+    rack => COMPACT_NULLABLE_STRING
+  cluster_id => COMPACT_NULLABLE_STRING
+  controller_id => INT32
+  topics => error_code name topic_id is_internal [partitions] topic_authorized_operations _tagged_fields 
+    error_code => INT16
+    name => COMPACT_STRING
+    topic_id => UUID
+    is_internal => BOOLEAN
+    partitions => error_code partition_index leader_id leader_epoch [replica_nodes] [isr_nodes] [offline_replicas] _tagged_fields 
+      error_code => INT16
+      partition_index => INT32
+      leader_id => INT32
+      leader_epoch => INT32
+      replica_nodes => INT32
+      isr_nodes => INT32
+      offline_replicas => INT32
+    topic_authorized_operations => INT32
+  cluster_authorized_operations => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
brokersA list of brokers present in the cluster.
node_idThe broker ID.
hostThe broker hostname.
portThe broker port.
rackThe rack of the broker, or null if it has not been assigned to a rack.
_tagged_fieldsThe tagged fields
cluster_idThe cluster ID that responding broker belongs to.
controller_idThe ID of the controller broker.
topicsEach topic in the response.
error_codeThe topic error, or 0 if there was no error.
nameThe topic name. Null for non-existing topics queried by ID. This is never null when ErrorCode is zero. One of Name and TopicId is always populated.
topic_idThe topic id. Zero for non-existing topics queried by name. This is never zero when ErrorCode is zero. One of Name and TopicId is always populated.
is_internalTrue if the topic is internal.
partitionsEach partition in the topic.
error_codeThe partition error, or 0 if there was no error.
partition_indexThe partition index.
leader_idThe ID of the leader broker.
leader_epochThe leader epoch of this partition.
replica_nodesThe set of all nodes that host this partition.
isr_nodesThe set of nodes that are in sync with the leader for this partition.
offline_replicasThe set of offline replicas of this partition.
_tagged_fieldsThe tagged fields
topic_authorized_operations32-bit bitfield to represent authorized operations for this topic.
_tagged_fieldsThe tagged fields
cluster_authorized_operations32-bit bitfield to represent authorized operations for this cluster.
_tagged_fieldsThe tagged fields
+
+
Metadata Response (Version: 11) => throttle_time_ms [brokers] cluster_id controller_id [topics] _tagged_fields 
+  throttle_time_ms => INT32
+  brokers => node_id host port rack _tagged_fields 
+    node_id => INT32
+    host => COMPACT_STRING
+    port => INT32
+    rack => COMPACT_NULLABLE_STRING
+  cluster_id => COMPACT_NULLABLE_STRING
+  controller_id => INT32
+  topics => error_code name topic_id is_internal [partitions] topic_authorized_operations _tagged_fields 
+    error_code => INT16
+    name => COMPACT_STRING
+    topic_id => UUID
+    is_internal => BOOLEAN
+    partitions => error_code partition_index leader_id leader_epoch [replica_nodes] [isr_nodes] [offline_replicas] _tagged_fields 
+      error_code => INT16
+      partition_index => INT32
+      leader_id => INT32
+      leader_epoch => INT32
+      replica_nodes => INT32
+      isr_nodes => INT32
+      offline_replicas => INT32
+    topic_authorized_operations => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
brokersA list of brokers present in the cluster.
node_idThe broker ID.
hostThe broker hostname.
portThe broker port.
rackThe rack of the broker, or null if it has not been assigned to a rack.
_tagged_fieldsThe tagged fields
cluster_idThe cluster ID that responding broker belongs to.
controller_idThe ID of the controller broker.
topicsEach topic in the response.
error_codeThe topic error, or 0 if there was no error.
nameThe topic name. Null for non-existing topics queried by ID. This is never null when ErrorCode is zero. One of Name and TopicId is always populated.
topic_idThe topic id. Zero for non-existing topics queried by name. This is never zero when ErrorCode is zero. One of Name and TopicId is always populated.
is_internalTrue if the topic is internal.
partitionsEach partition in the topic.
error_codeThe partition error, or 0 if there was no error.
partition_indexThe partition index.
leader_idThe ID of the leader broker.
leader_epochThe leader epoch of this partition.
replica_nodesThe set of all nodes that host this partition.
isr_nodesThe set of nodes that are in sync with the leader for this partition.
offline_replicasThe set of offline replicas of this partition.
_tagged_fieldsThe tagged fields
topic_authorized_operations32-bit bitfield to represent authorized operations for this topic.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
Metadata Response (Version: 12) => throttle_time_ms [brokers] cluster_id controller_id [topics] _tagged_fields 
+  throttle_time_ms => INT32
+  brokers => node_id host port rack _tagged_fields 
+    node_id => INT32
+    host => COMPACT_STRING
+    port => INT32
+    rack => COMPACT_NULLABLE_STRING
+  cluster_id => COMPACT_NULLABLE_STRING
+  controller_id => INT32
+  topics => error_code name topic_id is_internal [partitions] topic_authorized_operations _tagged_fields 
+    error_code => INT16
+    name => COMPACT_NULLABLE_STRING
+    topic_id => UUID
+    is_internal => BOOLEAN
+    partitions => error_code partition_index leader_id leader_epoch [replica_nodes] [isr_nodes] [offline_replicas] _tagged_fields 
+      error_code => INT16
+      partition_index => INT32
+      leader_id => INT32
+      leader_epoch => INT32
+      replica_nodes => INT32
+      isr_nodes => INT32
+      offline_replicas => INT32
+    topic_authorized_operations => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
brokersA list of brokers present in the cluster.
node_idThe broker ID.
hostThe broker hostname.
portThe broker port.
rackThe rack of the broker, or null if it has not been assigned to a rack.
_tagged_fieldsThe tagged fields
cluster_idThe cluster ID that responding broker belongs to.
controller_idThe ID of the controller broker.
topicsEach topic in the response.
error_codeThe topic error, or 0 if there was no error.
nameThe topic name. Null for non-existing topics queried by ID. This is never null when ErrorCode is zero. One of Name and TopicId is always populated.
topic_idThe topic id. Zero for non-existing topics queried by name. This is never zero when ErrorCode is zero. One of Name and TopicId is always populated.
is_internalTrue if the topic is internal.
partitionsEach partition in the topic.
error_codeThe partition error, or 0 if there was no error.
partition_indexThe partition index.
leader_idThe ID of the leader broker.
leader_epochThe leader epoch of this partition.
replica_nodesThe set of all nodes that host this partition.
isr_nodesThe set of nodes that are in sync with the leader for this partition.
offline_replicasThe set of offline replicas of this partition.
_tagged_fieldsThe tagged fields
topic_authorized_operations32-bit bitfield to represent authorized operations for this topic.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
Metadata Response (Version: 13) => throttle_time_ms [brokers] cluster_id controller_id [topics] error_code _tagged_fields 
+  throttle_time_ms => INT32
+  brokers => node_id host port rack _tagged_fields 
+    node_id => INT32
+    host => COMPACT_STRING
+    port => INT32
+    rack => COMPACT_NULLABLE_STRING
+  cluster_id => COMPACT_NULLABLE_STRING
+  controller_id => INT32
+  topics => error_code name topic_id is_internal [partitions] topic_authorized_operations _tagged_fields 
+    error_code => INT16
+    name => COMPACT_NULLABLE_STRING
+    topic_id => UUID
+    is_internal => BOOLEAN
+    partitions => error_code partition_index leader_id leader_epoch [replica_nodes] [isr_nodes] [offline_replicas] _tagged_fields 
+      error_code => INT16
+      partition_index => INT32
+      leader_id => INT32
+      leader_epoch => INT32
+      replica_nodes => INT32
+      isr_nodes => INT32
+      offline_replicas => INT32
+    topic_authorized_operations => INT32
+  error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
brokersA list of brokers present in the cluster.
node_idThe broker ID.
hostThe broker hostname.
portThe broker port.
rackThe rack of the broker, or null if it has not been assigned to a rack.
_tagged_fieldsThe tagged fields
cluster_idThe cluster ID that responding broker belongs to.
controller_idThe ID of the controller broker.
topicsEach topic in the response.
error_codeThe topic error, or 0 if there was no error.
nameThe topic name. Null for non-existing topics queried by ID. This is never null when ErrorCode is zero. One of Name and TopicId is always populated.
topic_idThe topic id. Zero for non-existing topics queried by name. This is never zero when ErrorCode is zero. One of Name and TopicId is always populated.
is_internalTrue if the topic is internal.
partitionsEach partition in the topic.
error_codeThe partition error, or 0 if there was no error.
partition_indexThe partition index.
leader_idThe ID of the leader broker.
leader_epochThe leader epoch of this partition.
replica_nodesThe set of all nodes that host this partition.
isr_nodesThe set of nodes that are in sync with the leader for this partition.
offline_replicasThe set of offline replicas of this partition.
_tagged_fieldsThe tagged fields
topic_authorized_operations32-bit bitfield to represent authorized operations for this topic.
_tagged_fieldsThe tagged fields
error_codeThe top-level error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
+
+
OffsetCommit API (Key: 8):
+ +Requests:
+
OffsetCommit Request (Version: 2) => group_id generation_id_or_member_epoch member_id retention_time_ms [topics] 
+  group_id => STRING
+  generation_id_or_member_epoch => INT32
+  member_id => STRING
+  retention_time_ms => INT64
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index committed_offset committed_metadata 
+      partition_index => INT32
+      committed_offset => INT64
+      committed_metadata => NULLABLE_STRING
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe unique group identifier.
generation_id_or_member_epochThe generation of the group if using the classic group protocol or the member epoch if using the consumer protocol.
member_idThe member ID assigned by the group coordinator.
retention_time_msThe time period in ms to retain the offset.
topicsThe topics to commit offsets for.
nameThe topic name.
partitionsEach partition to commit offsets for.
partition_indexThe partition index.
committed_offsetThe message offset to be committed.
committed_metadataAny associated metadata the client wants to keep.
+
+
OffsetCommit Request (Version: 3) => group_id generation_id_or_member_epoch member_id retention_time_ms [topics] 
+  group_id => STRING
+  generation_id_or_member_epoch => INT32
+  member_id => STRING
+  retention_time_ms => INT64
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index committed_offset committed_metadata 
+      partition_index => INT32
+      committed_offset => INT64
+      committed_metadata => NULLABLE_STRING
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe unique group identifier.
generation_id_or_member_epochThe generation of the group if using the classic group protocol or the member epoch if using the consumer protocol.
member_idThe member ID assigned by the group coordinator.
retention_time_msThe time period in ms to retain the offset.
topicsThe topics to commit offsets for.
nameThe topic name.
partitionsEach partition to commit offsets for.
partition_indexThe partition index.
committed_offsetThe message offset to be committed.
committed_metadataAny associated metadata the client wants to keep.
+
+
OffsetCommit Request (Version: 4) => group_id generation_id_or_member_epoch member_id retention_time_ms [topics] 
+  group_id => STRING
+  generation_id_or_member_epoch => INT32
+  member_id => STRING
+  retention_time_ms => INT64
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index committed_offset committed_metadata 
+      partition_index => INT32
+      committed_offset => INT64
+      committed_metadata => NULLABLE_STRING
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe unique group identifier.
generation_id_or_member_epochThe generation of the group if using the classic group protocol or the member epoch if using the consumer protocol.
member_idThe member ID assigned by the group coordinator.
retention_time_msThe time period in ms to retain the offset.
topicsThe topics to commit offsets for.
nameThe topic name.
partitionsEach partition to commit offsets for.
partition_indexThe partition index.
committed_offsetThe message offset to be committed.
committed_metadataAny associated metadata the client wants to keep.
+
+
OffsetCommit Request (Version: 5) => group_id generation_id_or_member_epoch member_id [topics] 
+  group_id => STRING
+  generation_id_or_member_epoch => INT32
+  member_id => STRING
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index committed_offset committed_metadata 
+      partition_index => INT32
+      committed_offset => INT64
+      committed_metadata => NULLABLE_STRING
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe unique group identifier.
generation_id_or_member_epochThe generation of the group if using the classic group protocol or the member epoch if using the consumer protocol.
member_idThe member ID assigned by the group coordinator.
topicsThe topics to commit offsets for.
nameThe topic name.
partitionsEach partition to commit offsets for.
partition_indexThe partition index.
committed_offsetThe message offset to be committed.
committed_metadataAny associated metadata the client wants to keep.
+
+
OffsetCommit Request (Version: 6) => group_id generation_id_or_member_epoch member_id [topics] 
+  group_id => STRING
+  generation_id_or_member_epoch => INT32
+  member_id => STRING
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index committed_offset committed_leader_epoch committed_metadata 
+      partition_index => INT32
+      committed_offset => INT64
+      committed_leader_epoch => INT32
+      committed_metadata => NULLABLE_STRING
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe unique group identifier.
generation_id_or_member_epochThe generation of the group if using the classic group protocol or the member epoch if using the consumer protocol.
member_idThe member ID assigned by the group coordinator.
topicsThe topics to commit offsets for.
nameThe topic name.
partitionsEach partition to commit offsets for.
partition_indexThe partition index.
committed_offsetThe message offset to be committed.
committed_leader_epochThe leader epoch of this partition.
committed_metadataAny associated metadata the client wants to keep.
+
+
OffsetCommit Request (Version: 7) => group_id generation_id_or_member_epoch member_id group_instance_id [topics] 
+  group_id => STRING
+  generation_id_or_member_epoch => INT32
+  member_id => STRING
+  group_instance_id => NULLABLE_STRING
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index committed_offset committed_leader_epoch committed_metadata 
+      partition_index => INT32
+      committed_offset => INT64
+      committed_leader_epoch => INT32
+      committed_metadata => NULLABLE_STRING
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe unique group identifier.
generation_id_or_member_epochThe generation of the group if using the classic group protocol or the member epoch if using the consumer protocol.
member_idThe member ID assigned by the group coordinator.
group_instance_idThe unique identifier of the consumer instance provided by end user.
topicsThe topics to commit offsets for.
nameThe topic name.
partitionsEach partition to commit offsets for.
partition_indexThe partition index.
committed_offsetThe message offset to be committed.
committed_leader_epochThe leader epoch of this partition.
committed_metadataAny associated metadata the client wants to keep.
+
+
OffsetCommit Request (Version: 8) => group_id generation_id_or_member_epoch member_id group_instance_id [topics] _tagged_fields 
+  group_id => COMPACT_STRING
+  generation_id_or_member_epoch => INT32
+  member_id => COMPACT_STRING
+  group_instance_id => COMPACT_NULLABLE_STRING
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index committed_offset committed_leader_epoch committed_metadata _tagged_fields 
+      partition_index => INT32
+      committed_offset => INT64
+      committed_leader_epoch => INT32
+      committed_metadata => COMPACT_NULLABLE_STRING
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe unique group identifier.
generation_id_or_member_epochThe generation of the group if using the classic group protocol or the member epoch if using the consumer protocol.
member_idThe member ID assigned by the group coordinator.
group_instance_idThe unique identifier of the consumer instance provided by end user.
topicsThe topics to commit offsets for.
nameThe topic name.
partitionsEach partition to commit offsets for.
partition_indexThe partition index.
committed_offsetThe message offset to be committed.
committed_leader_epochThe leader epoch of this partition.
committed_metadataAny associated metadata the client wants to keep.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
OffsetCommit Request (Version: 9) => group_id generation_id_or_member_epoch member_id group_instance_id [topics] _tagged_fields 
+  group_id => COMPACT_STRING
+  generation_id_or_member_epoch => INT32
+  member_id => COMPACT_STRING
+  group_instance_id => COMPACT_NULLABLE_STRING
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index committed_offset committed_leader_epoch committed_metadata _tagged_fields 
+      partition_index => INT32
+      committed_offset => INT64
+      committed_leader_epoch => INT32
+      committed_metadata => COMPACT_NULLABLE_STRING
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe unique group identifier.
generation_id_or_member_epochThe generation of the group if using the classic group protocol or the member epoch if using the consumer protocol.
member_idThe member ID assigned by the group coordinator.
group_instance_idThe unique identifier of the consumer instance provided by end user.
topicsThe topics to commit offsets for.
nameThe topic name.
partitionsEach partition to commit offsets for.
partition_indexThe partition index.
committed_offsetThe message offset to be committed.
committed_leader_epochThe leader epoch of this partition.
committed_metadataAny associated metadata the client wants to keep.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
OffsetCommit Request (Version: 10) => group_id generation_id_or_member_epoch member_id group_instance_id [topics] _tagged_fields 
+  group_id => COMPACT_STRING
+  generation_id_or_member_epoch => INT32
+  member_id => COMPACT_STRING
+  group_instance_id => COMPACT_NULLABLE_STRING
+  topics => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition_index committed_offset committed_leader_epoch committed_metadata _tagged_fields 
+      partition_index => INT32
+      committed_offset => INT64
+      committed_leader_epoch => INT32
+      committed_metadata => COMPACT_NULLABLE_STRING
+

This version of the request is unstable.

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe unique group identifier.
generation_id_or_member_epochThe generation of the group if using the classic group protocol or the member epoch if using the consumer protocol.
member_idThe member ID assigned by the group coordinator.
group_instance_idThe unique identifier of the consumer instance provided by end user.
topicsThe topics to commit offsets for.
topic_idThe topic ID.
partitionsEach partition to commit offsets for.
partition_indexThe partition index.
committed_offsetThe message offset to be committed.
committed_leader_epochThe leader epoch of this partition.
committed_metadataAny associated metadata the client wants to keep.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
OffsetCommit Response (Version: 2) => [topics] 
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index error_code 
+      partition_index => INT32
+      error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + +
FieldDescription
topicsThe responses for each topic.
nameThe topic name.
partitionsThe responses for each partition in the topic.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no error.
+
+
OffsetCommit Response (Version: 3) => throttle_time_ms [topics] 
+  throttle_time_ms => INT32
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index error_code 
+      partition_index => INT32
+      error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsThe responses for each topic.
nameThe topic name.
partitionsThe responses for each partition in the topic.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no error.
+
+
OffsetCommit Response (Version: 4) => throttle_time_ms [topics] 
+  throttle_time_ms => INT32
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index error_code 
+      partition_index => INT32
+      error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsThe responses for each topic.
nameThe topic name.
partitionsThe responses for each partition in the topic.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no error.
+
+
OffsetCommit Response (Version: 5) => throttle_time_ms [topics] 
+  throttle_time_ms => INT32
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index error_code 
+      partition_index => INT32
+      error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsThe responses for each topic.
nameThe topic name.
partitionsThe responses for each partition in the topic.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no error.
+
+
OffsetCommit Response (Version: 6) => throttle_time_ms [topics] 
+  throttle_time_ms => INT32
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index error_code 
+      partition_index => INT32
+      error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsThe responses for each topic.
nameThe topic name.
partitionsThe responses for each partition in the topic.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no error.
+
+
OffsetCommit Response (Version: 7) => throttle_time_ms [topics] 
+  throttle_time_ms => INT32
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index error_code 
+      partition_index => INT32
+      error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsThe responses for each topic.
nameThe topic name.
partitionsThe responses for each partition in the topic.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no error.
+
+
OffsetCommit Response (Version: 8) => throttle_time_ms [topics] _tagged_fields 
+  throttle_time_ms => INT32
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index error_code _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsThe responses for each topic.
nameThe topic name.
partitionsThe responses for each partition in the topic.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
OffsetCommit Response (Version: 9) => throttle_time_ms [topics] _tagged_fields 
+  throttle_time_ms => INT32
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index error_code _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsThe responses for each topic.
nameThe topic name.
partitionsThe responses for each partition in the topic.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
OffsetCommit Response (Version: 10) => throttle_time_ms [topics] _tagged_fields 
+  throttle_time_ms => INT32
+  topics => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition_index error_code _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsThe responses for each topic.
topic_idThe topic ID.
partitionsThe responses for each partition in the topic.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
OffsetFetch API (Key: 9):
+ +Requests:
+
OffsetFetch Request (Version: 1) => group_id [topics] 
+  group_id => STRING
+  topics => name [partition_indexes] 
+    name => STRING
+    partition_indexes => INT32
+

Request header version: 1

+ + + + + + + + + + + +
FieldDescription
group_idThe group to fetch offsets for.
topicsEach topic we would like to fetch offsets for, or null to fetch offsets for all topics.
nameThe topic name.
partition_indexesThe partition indexes we would like to fetch offsets for.
+
+
OffsetFetch Request (Version: 2) => group_id [topics] 
+  group_id => STRING
+  topics => name [partition_indexes] 
+    name => STRING
+    partition_indexes => INT32
+

Request header version: 1

+ + + + + + + + + + + +
FieldDescription
group_idThe group to fetch offsets for.
topicsEach topic we would like to fetch offsets for, or null to fetch offsets for all topics.
nameThe topic name.
partition_indexesThe partition indexes we would like to fetch offsets for.
+
+
OffsetFetch Request (Version: 3) => group_id [topics] 
+  group_id => STRING
+  topics => name [partition_indexes] 
+    name => STRING
+    partition_indexes => INT32
+

Request header version: 1

+ + + + + + + + + + + +
FieldDescription
group_idThe group to fetch offsets for.
topicsEach topic we would like to fetch offsets for, or null to fetch offsets for all topics.
nameThe topic name.
partition_indexesThe partition indexes we would like to fetch offsets for.
+
+
OffsetFetch Request (Version: 4) => group_id [topics] 
+  group_id => STRING
+  topics => name [partition_indexes] 
+    name => STRING
+    partition_indexes => INT32
+

Request header version: 1

+ + + + + + + + + + + +
FieldDescription
group_idThe group to fetch offsets for.
topicsEach topic we would like to fetch offsets for, or null to fetch offsets for all topics.
nameThe topic name.
partition_indexesThe partition indexes we would like to fetch offsets for.
+
+
OffsetFetch Request (Version: 5) => group_id [topics] 
+  group_id => STRING
+  topics => name [partition_indexes] 
+    name => STRING
+    partition_indexes => INT32
+

Request header version: 1

+ + + + + + + + + + + +
FieldDescription
group_idThe group to fetch offsets for.
topicsEach topic we would like to fetch offsets for, or null to fetch offsets for all topics.
nameThe topic name.
partition_indexesThe partition indexes we would like to fetch offsets for.
+
+
OffsetFetch Request (Version: 6) => group_id [topics] _tagged_fields 
+  group_id => COMPACT_STRING
+  topics => name [partition_indexes] _tagged_fields 
+    name => COMPACT_STRING
+    partition_indexes => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + +
FieldDescription
group_idThe group to fetch offsets for.
topicsEach topic we would like to fetch offsets for, or null to fetch offsets for all topics.
nameThe topic name.
partition_indexesThe partition indexes we would like to fetch offsets for.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
OffsetFetch Request (Version: 7) => group_id [topics] require_stable _tagged_fields 
+  group_id => COMPACT_STRING
+  topics => name [partition_indexes] _tagged_fields 
+    name => COMPACT_STRING
+    partition_indexes => INT32
+  require_stable => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group to fetch offsets for.
topicsEach topic we would like to fetch offsets for, or null to fetch offsets for all topics.
nameThe topic name.
partition_indexesThe partition indexes we would like to fetch offsets for.
_tagged_fieldsThe tagged fields
require_stableWhether broker should hold on returning unstable offsets but set a retriable error code for the partitions.
_tagged_fieldsThe tagged fields
+
+
OffsetFetch Request (Version: 8) => [groups] require_stable _tagged_fields 
+  groups => group_id [topics] _tagged_fields 
+    group_id => COMPACT_STRING
+    topics => name [partition_indexes] _tagged_fields 
+      name => COMPACT_STRING
+      partition_indexes => INT32
+  require_stable => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
groupsEach group we would like to fetch offsets for.
group_idThe group ID.
topicsEach topic we would like to fetch offsets for, or null to fetch offsets for all topics.
nameThe topic name.
partition_indexesThe partition indexes we would like to fetch offsets for.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
require_stableWhether broker should hold on returning unstable offsets but set a retriable error code for the partitions.
_tagged_fieldsThe tagged fields
+
+
OffsetFetch Request (Version: 9) => [groups] require_stable _tagged_fields 
+  groups => group_id member_id member_epoch [topics] _tagged_fields 
+    group_id => COMPACT_STRING
+    member_id => COMPACT_NULLABLE_STRING
+    member_epoch => INT32
+    topics => name [partition_indexes] _tagged_fields 
+      name => COMPACT_STRING
+      partition_indexes => INT32
+  require_stable => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
groupsEach group we would like to fetch offsets for.
group_idThe group ID.
member_idThe member id.
member_epochThe member epoch if using the new consumer protocol (KIP-848).
topicsEach topic we would like to fetch offsets for, or null to fetch offsets for all topics.
nameThe topic name.
partition_indexesThe partition indexes we would like to fetch offsets for.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
require_stableWhether broker should hold on returning unstable offsets but set a retriable error code for the partitions.
_tagged_fieldsThe tagged fields
+
+
OffsetFetch Request (Version: 10) => [groups] require_stable _tagged_fields 
+  groups => group_id member_id member_epoch [topics] _tagged_fields 
+    group_id => COMPACT_STRING
+    member_id => COMPACT_NULLABLE_STRING
+    member_epoch => INT32
+    topics => topic_id [partition_indexes] _tagged_fields 
+      topic_id => UUID
+      partition_indexes => INT32
+  require_stable => BOOLEAN
+

This version of the request is unstable.

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
groupsEach group we would like to fetch offsets for.
group_idThe group ID.
member_idThe member id.
member_epochThe member epoch if using the new consumer protocol (KIP-848).
topicsEach topic we would like to fetch offsets for, or null to fetch offsets for all topics.
topic_idThe topic ID.
partition_indexesThe partition indexes we would like to fetch offsets for.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
require_stableWhether broker should hold on returning unstable offsets but set a retriable error code for the partitions.
_tagged_fieldsThe tagged fields
+
+Responses:
+
OffsetFetch Response (Version: 1) => [topics] 
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index committed_offset metadata error_code 
+      partition_index => INT32
+      committed_offset => INT64
+      metadata => NULLABLE_STRING
+      error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + + + + + +
FieldDescription
topicsThe responses per topic.
nameThe topic name.
partitionsThe responses per partition.
partition_indexThe partition index.
committed_offsetThe committed message offset.
metadataThe partition metadata.
error_codeThe error code, or 0 if there was no error.
+
+
OffsetFetch Response (Version: 2) => [topics] error_code 
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index committed_offset metadata error_code 
+      partition_index => INT32
+      committed_offset => INT64
+      metadata => NULLABLE_STRING
+      error_code => INT16
+  error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
topicsThe responses per topic.
nameThe topic name.
partitionsThe responses per partition.
partition_indexThe partition index.
committed_offsetThe committed message offset.
metadataThe partition metadata.
error_codeThe error code, or 0 if there was no error.
error_codeThe top-level error code, or 0 if there was no error.
+
+
OffsetFetch Response (Version: 3) => throttle_time_ms [topics] error_code 
+  throttle_time_ms => INT32
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index committed_offset metadata error_code 
+      partition_index => INT32
+      committed_offset => INT64
+      metadata => NULLABLE_STRING
+      error_code => INT16
+  error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsThe responses per topic.
nameThe topic name.
partitionsThe responses per partition.
partition_indexThe partition index.
committed_offsetThe committed message offset.
metadataThe partition metadata.
error_codeThe error code, or 0 if there was no error.
error_codeThe top-level error code, or 0 if there was no error.
+
+
OffsetFetch Response (Version: 4) => throttle_time_ms [topics] error_code 
+  throttle_time_ms => INT32
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index committed_offset metadata error_code 
+      partition_index => INT32
+      committed_offset => INT64
+      metadata => NULLABLE_STRING
+      error_code => INT16
+  error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsThe responses per topic.
nameThe topic name.
partitionsThe responses per partition.
partition_indexThe partition index.
committed_offsetThe committed message offset.
metadataThe partition metadata.
error_codeThe error code, or 0 if there was no error.
error_codeThe top-level error code, or 0 if there was no error.
+
+
OffsetFetch Response (Version: 5) => throttle_time_ms [topics] error_code 
+  throttle_time_ms => INT32
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index committed_offset committed_leader_epoch metadata error_code 
+      partition_index => INT32
+      committed_offset => INT64
+      committed_leader_epoch => INT32
+      metadata => NULLABLE_STRING
+      error_code => INT16
+  error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsThe responses per topic.
nameThe topic name.
partitionsThe responses per partition.
partition_indexThe partition index.
committed_offsetThe committed message offset.
committed_leader_epochThe leader epoch.
metadataThe partition metadata.
error_codeThe error code, or 0 if there was no error.
error_codeThe top-level error code, or 0 if there was no error.
+
+
OffsetFetch Response (Version: 6) => throttle_time_ms [topics] error_code _tagged_fields 
+  throttle_time_ms => INT32
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index committed_offset committed_leader_epoch metadata error_code _tagged_fields 
+      partition_index => INT32
+      committed_offset => INT64
+      committed_leader_epoch => INT32
+      metadata => COMPACT_NULLABLE_STRING
+      error_code => INT16
+  error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsThe responses per topic.
nameThe topic name.
partitionsThe responses per partition.
partition_indexThe partition index.
committed_offsetThe committed message offset.
committed_leader_epochThe leader epoch.
metadataThe partition metadata.
error_codeThe error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
error_codeThe top-level error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
+
+
OffsetFetch Response (Version: 7) => throttle_time_ms [topics] error_code _tagged_fields 
+  throttle_time_ms => INT32
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index committed_offset committed_leader_epoch metadata error_code _tagged_fields 
+      partition_index => INT32
+      committed_offset => INT64
+      committed_leader_epoch => INT32
+      metadata => COMPACT_NULLABLE_STRING
+      error_code => INT16
+  error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsThe responses per topic.
nameThe topic name.
partitionsThe responses per partition.
partition_indexThe partition index.
committed_offsetThe committed message offset.
committed_leader_epochThe leader epoch.
metadataThe partition metadata.
error_codeThe error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
error_codeThe top-level error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
+
+
OffsetFetch Response (Version: 8) => throttle_time_ms [groups] _tagged_fields 
+  throttle_time_ms => INT32
+  groups => group_id [topics] error_code _tagged_fields 
+    group_id => COMPACT_STRING
+    topics => name [partitions] _tagged_fields 
+      name => COMPACT_STRING
+      partitions => partition_index committed_offset committed_leader_epoch metadata error_code _tagged_fields 
+        partition_index => INT32
+        committed_offset => INT64
+        committed_leader_epoch => INT32
+        metadata => COMPACT_NULLABLE_STRING
+        error_code => INT16
+    error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
groupsThe responses per group id.
group_idThe group ID.
topicsThe responses per topic.
nameThe topic name.
partitionsThe responses per partition.
partition_indexThe partition index.
committed_offsetThe committed message offset.
committed_leader_epochThe leader epoch.
metadataThe partition metadata.
error_codeThe partition-level error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
error_codeThe group-level error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
OffsetFetch Response (Version: 9) => throttle_time_ms [groups] _tagged_fields 
+  throttle_time_ms => INT32
+  groups => group_id [topics] error_code _tagged_fields 
+    group_id => COMPACT_STRING
+    topics => name [partitions] _tagged_fields 
+      name => COMPACT_STRING
+      partitions => partition_index committed_offset committed_leader_epoch metadata error_code _tagged_fields 
+        partition_index => INT32
+        committed_offset => INT64
+        committed_leader_epoch => INT32
+        metadata => COMPACT_NULLABLE_STRING
+        error_code => INT16
+    error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
groupsThe responses per group id.
group_idThe group ID.
topicsThe responses per topic.
nameThe topic name.
partitionsThe responses per partition.
partition_indexThe partition index.
committed_offsetThe committed message offset.
committed_leader_epochThe leader epoch.
metadataThe partition metadata.
error_codeThe partition-level error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
error_codeThe group-level error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
OffsetFetch Response (Version: 10) => throttle_time_ms [groups] _tagged_fields 
+  throttle_time_ms => INT32
+  groups => group_id [topics] error_code _tagged_fields 
+    group_id => COMPACT_STRING
+    topics => topic_id [partitions] _tagged_fields 
+      topic_id => UUID
+      partitions => partition_index committed_offset committed_leader_epoch metadata error_code _tagged_fields 
+        partition_index => INT32
+        committed_offset => INT64
+        committed_leader_epoch => INT32
+        metadata => COMPACT_NULLABLE_STRING
+        error_code => INT16
+    error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
groupsThe responses per group id.
group_idThe group ID.
topicsThe responses per topic.
topic_idThe topic ID.
partitionsThe responses per partition.
partition_indexThe partition index.
committed_offsetThe committed message offset.
committed_leader_epochThe leader epoch.
metadataThe partition metadata.
error_codeThe partition-level error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
error_codeThe group-level error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
FindCoordinator API (Key: 10):
+ +Requests:
+
FindCoordinator Request (Version: 0) => key 
+  key => STRING
+

Request header version: 1

+ + + + + +
FieldDescription
keyThe coordinator key.
+
+
FindCoordinator Request (Version: 1) => key key_type 
+  key => STRING
+  key_type => INT8
+

Request header version: 1

+ + + + + + + +
FieldDescription
keyThe coordinator key.
key_typeThe coordinator key type. (group, transaction, share).
+
+
FindCoordinator Request (Version: 2) => key key_type 
+  key => STRING
+  key_type => INT8
+

Request header version: 1

+ + + + + + + +
FieldDescription
keyThe coordinator key.
key_typeThe coordinator key type. (group, transaction, share).
+
+
FindCoordinator Request (Version: 3) => key key_type _tagged_fields 
+  key => COMPACT_STRING
+  key_type => INT8
+

Request header version: 2

+ + + + + + + + + +
FieldDescription
keyThe coordinator key.
key_typeThe coordinator key type. (group, transaction, share).
_tagged_fieldsThe tagged fields
+
+
FindCoordinator Request (Version: 4) => key_type [coordinator_keys] _tagged_fields 
+  key_type => INT8
+  coordinator_keys => COMPACT_STRING
+

Request header version: 2

+ + + + + + + + + +
FieldDescription
key_typeThe coordinator key type. (group, transaction, share).
coordinator_keysThe coordinator keys.
_tagged_fieldsThe tagged fields
+
+
FindCoordinator Request (Version: 5) => key_type [coordinator_keys] _tagged_fields 
+  key_type => INT8
+  coordinator_keys => COMPACT_STRING
+

Request header version: 2

+ + + + + + + + + +
FieldDescription
key_typeThe coordinator key type. (group, transaction, share).
coordinator_keysThe coordinator keys.
_tagged_fieldsThe tagged fields
+
+
FindCoordinator Request (Version: 6) => key_type [coordinator_keys] _tagged_fields 
+  key_type => INT8
+  coordinator_keys => COMPACT_STRING
+

Request header version: 2

+ + + + + + + + + +
FieldDescription
key_typeThe coordinator key type. (group, transaction, share).
coordinator_keysThe coordinator keys.
_tagged_fieldsThe tagged fields
+
+Responses:
+
FindCoordinator Response (Version: 0) => error_code node_id host port 
+  error_code => INT16
+  node_id => INT32
+  host => STRING
+  port => INT32
+

Response header version: 0

+ + + + + + + + + + + +
FieldDescription
error_codeThe error code, or 0 if there was no error.
node_idThe node id.
hostThe host name.
portThe port.
+
+
FindCoordinator Response (Version: 1) => throttle_time_ms error_code error_message node_id host port 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => NULLABLE_STRING
+  node_id => INT32
+  host => STRING
+  port => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
node_idThe node id.
hostThe host name.
portThe port.
+
+
FindCoordinator Response (Version: 2) => throttle_time_ms error_code error_message node_id host port 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => NULLABLE_STRING
+  node_id => INT32
+  host => STRING
+  port => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
node_idThe node id.
hostThe host name.
portThe port.
+
+
FindCoordinator Response (Version: 3) => throttle_time_ms error_code error_message node_id host port _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  node_id => INT32
+  host => COMPACT_STRING
+  port => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
node_idThe node id.
hostThe host name.
portThe port.
_tagged_fieldsThe tagged fields
+
+
FindCoordinator Response (Version: 4) => throttle_time_ms [coordinators] _tagged_fields 
+  throttle_time_ms => INT32
+  coordinators => key node_id host port error_code error_message _tagged_fields 
+    key => COMPACT_STRING
+    node_id => INT32
+    host => COMPACT_STRING
+    port => INT32
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
coordinatorsEach coordinator result in the response.
keyThe coordinator key.
node_idThe node id.
hostThe host name.
portThe port.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
FindCoordinator Response (Version: 5) => throttle_time_ms [coordinators] _tagged_fields 
+  throttle_time_ms => INT32
+  coordinators => key node_id host port error_code error_message _tagged_fields 
+    key => COMPACT_STRING
+    node_id => INT32
+    host => COMPACT_STRING
+    port => INT32
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
coordinatorsEach coordinator result in the response.
keyThe coordinator key.
node_idThe node id.
hostThe host name.
portThe port.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
FindCoordinator Response (Version: 6) => throttle_time_ms [coordinators] _tagged_fields 
+  throttle_time_ms => INT32
+  coordinators => key node_id host port error_code error_message _tagged_fields 
+    key => COMPACT_STRING
+    node_id => INT32
+    host => COMPACT_STRING
+    port => INT32
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
coordinatorsEach coordinator result in the response.
keyThe coordinator key.
node_idThe node id.
hostThe host name.
portThe port.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
JoinGroup API (Key: 11):
+ +Requests:
+
JoinGroup Request (Version: 0) => group_id session_timeout_ms member_id protocol_type [protocols] 
+  group_id => STRING
+  session_timeout_ms => INT32
+  member_id => STRING
+  protocol_type => STRING
+  protocols => name metadata 
+    name => STRING
+    metadata => BYTES
+

Request header version: 1

+ + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
session_timeout_msThe coordinator considers the consumer dead if it receives no heartbeat after this timeout in milliseconds.
member_idThe member id assigned by the group coordinator.
protocol_typeThe unique name the for class of protocols implemented by the group we want to join.
protocolsThe list of protocols that the member supports.
nameThe protocol name.
metadataThe protocol metadata.
+
+
JoinGroup Request (Version: 1) => group_id session_timeout_ms rebalance_timeout_ms member_id protocol_type [protocols] 
+  group_id => STRING
+  session_timeout_ms => INT32
+  rebalance_timeout_ms => INT32
+  member_id => STRING
+  protocol_type => STRING
+  protocols => name metadata 
+    name => STRING
+    metadata => BYTES
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
session_timeout_msThe coordinator considers the consumer dead if it receives no heartbeat after this timeout in milliseconds.
rebalance_timeout_msThe maximum time in milliseconds that the coordinator will wait for each member to rejoin when rebalancing the group.
member_idThe member id assigned by the group coordinator.
protocol_typeThe unique name the for class of protocols implemented by the group we want to join.
protocolsThe list of protocols that the member supports.
nameThe protocol name.
metadataThe protocol metadata.
+
+
JoinGroup Request (Version: 2) => group_id session_timeout_ms rebalance_timeout_ms member_id protocol_type [protocols] 
+  group_id => STRING
+  session_timeout_ms => INT32
+  rebalance_timeout_ms => INT32
+  member_id => STRING
+  protocol_type => STRING
+  protocols => name metadata 
+    name => STRING
+    metadata => BYTES
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
session_timeout_msThe coordinator considers the consumer dead if it receives no heartbeat after this timeout in milliseconds.
rebalance_timeout_msThe maximum time in milliseconds that the coordinator will wait for each member to rejoin when rebalancing the group.
member_idThe member id assigned by the group coordinator.
protocol_typeThe unique name the for class of protocols implemented by the group we want to join.
protocolsThe list of protocols that the member supports.
nameThe protocol name.
metadataThe protocol metadata.
+
+
JoinGroup Request (Version: 3) => group_id session_timeout_ms rebalance_timeout_ms member_id protocol_type [protocols] 
+  group_id => STRING
+  session_timeout_ms => INT32
+  rebalance_timeout_ms => INT32
+  member_id => STRING
+  protocol_type => STRING
+  protocols => name metadata 
+    name => STRING
+    metadata => BYTES
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
session_timeout_msThe coordinator considers the consumer dead if it receives no heartbeat after this timeout in milliseconds.
rebalance_timeout_msThe maximum time in milliseconds that the coordinator will wait for each member to rejoin when rebalancing the group.
member_idThe member id assigned by the group coordinator.
protocol_typeThe unique name the for class of protocols implemented by the group we want to join.
protocolsThe list of protocols that the member supports.
nameThe protocol name.
metadataThe protocol metadata.
+
+
JoinGroup Request (Version: 4) => group_id session_timeout_ms rebalance_timeout_ms member_id protocol_type [protocols] 
+  group_id => STRING
+  session_timeout_ms => INT32
+  rebalance_timeout_ms => INT32
+  member_id => STRING
+  protocol_type => STRING
+  protocols => name metadata 
+    name => STRING
+    metadata => BYTES
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
session_timeout_msThe coordinator considers the consumer dead if it receives no heartbeat after this timeout in milliseconds.
rebalance_timeout_msThe maximum time in milliseconds that the coordinator will wait for each member to rejoin when rebalancing the group.
member_idThe member id assigned by the group coordinator.
protocol_typeThe unique name the for class of protocols implemented by the group we want to join.
protocolsThe list of protocols that the member supports.
nameThe protocol name.
metadataThe protocol metadata.
+
+
JoinGroup Request (Version: 5) => group_id session_timeout_ms rebalance_timeout_ms member_id group_instance_id protocol_type [protocols] 
+  group_id => STRING
+  session_timeout_ms => INT32
+  rebalance_timeout_ms => INT32
+  member_id => STRING
+  group_instance_id => NULLABLE_STRING
+  protocol_type => STRING
+  protocols => name metadata 
+    name => STRING
+    metadata => BYTES
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
session_timeout_msThe coordinator considers the consumer dead if it receives no heartbeat after this timeout in milliseconds.
rebalance_timeout_msThe maximum time in milliseconds that the coordinator will wait for each member to rejoin when rebalancing the group.
member_idThe member id assigned by the group coordinator.
group_instance_idThe unique identifier of the consumer instance provided by end user.
protocol_typeThe unique name the for class of protocols implemented by the group we want to join.
protocolsThe list of protocols that the member supports.
nameThe protocol name.
metadataThe protocol metadata.
+
+
JoinGroup Request (Version: 6) => group_id session_timeout_ms rebalance_timeout_ms member_id group_instance_id protocol_type [protocols] _tagged_fields 
+  group_id => COMPACT_STRING
+  session_timeout_ms => INT32
+  rebalance_timeout_ms => INT32
+  member_id => COMPACT_STRING
+  group_instance_id => COMPACT_NULLABLE_STRING
+  protocol_type => COMPACT_STRING
+  protocols => name metadata _tagged_fields 
+    name => COMPACT_STRING
+    metadata => COMPACT_BYTES
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
session_timeout_msThe coordinator considers the consumer dead if it receives no heartbeat after this timeout in milliseconds.
rebalance_timeout_msThe maximum time in milliseconds that the coordinator will wait for each member to rejoin when rebalancing the group.
member_idThe member id assigned by the group coordinator.
group_instance_idThe unique identifier of the consumer instance provided by end user.
protocol_typeThe unique name the for class of protocols implemented by the group we want to join.
protocolsThe list of protocols that the member supports.
nameThe protocol name.
metadataThe protocol metadata.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
JoinGroup Request (Version: 7) => group_id session_timeout_ms rebalance_timeout_ms member_id group_instance_id protocol_type [protocols] _tagged_fields 
+  group_id => COMPACT_STRING
+  session_timeout_ms => INT32
+  rebalance_timeout_ms => INT32
+  member_id => COMPACT_STRING
+  group_instance_id => COMPACT_NULLABLE_STRING
+  protocol_type => COMPACT_STRING
+  protocols => name metadata _tagged_fields 
+    name => COMPACT_STRING
+    metadata => COMPACT_BYTES
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
session_timeout_msThe coordinator considers the consumer dead if it receives no heartbeat after this timeout in milliseconds.
rebalance_timeout_msThe maximum time in milliseconds that the coordinator will wait for each member to rejoin when rebalancing the group.
member_idThe member id assigned by the group coordinator.
group_instance_idThe unique identifier of the consumer instance provided by end user.
protocol_typeThe unique name the for class of protocols implemented by the group we want to join.
protocolsThe list of protocols that the member supports.
nameThe protocol name.
metadataThe protocol metadata.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
JoinGroup Request (Version: 8) => group_id session_timeout_ms rebalance_timeout_ms member_id group_instance_id protocol_type [protocols] reason _tagged_fields 
+  group_id => COMPACT_STRING
+  session_timeout_ms => INT32
+  rebalance_timeout_ms => INT32
+  member_id => COMPACT_STRING
+  group_instance_id => COMPACT_NULLABLE_STRING
+  protocol_type => COMPACT_STRING
+  protocols => name metadata _tagged_fields 
+    name => COMPACT_STRING
+    metadata => COMPACT_BYTES
+  reason => COMPACT_NULLABLE_STRING
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
session_timeout_msThe coordinator considers the consumer dead if it receives no heartbeat after this timeout in milliseconds.
rebalance_timeout_msThe maximum time in milliseconds that the coordinator will wait for each member to rejoin when rebalancing the group.
member_idThe member id assigned by the group coordinator.
group_instance_idThe unique identifier of the consumer instance provided by end user.
protocol_typeThe unique name the for class of protocols implemented by the group we want to join.
protocolsThe list of protocols that the member supports.
nameThe protocol name.
metadataThe protocol metadata.
_tagged_fieldsThe tagged fields
reasonThe reason why the member (re-)joins the group.
_tagged_fieldsThe tagged fields
+
+
JoinGroup Request (Version: 9) => group_id session_timeout_ms rebalance_timeout_ms member_id group_instance_id protocol_type [protocols] reason _tagged_fields 
+  group_id => COMPACT_STRING
+  session_timeout_ms => INT32
+  rebalance_timeout_ms => INT32
+  member_id => COMPACT_STRING
+  group_instance_id => COMPACT_NULLABLE_STRING
+  protocol_type => COMPACT_STRING
+  protocols => name metadata _tagged_fields 
+    name => COMPACT_STRING
+    metadata => COMPACT_BYTES
+  reason => COMPACT_NULLABLE_STRING
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
session_timeout_msThe coordinator considers the consumer dead if it receives no heartbeat after this timeout in milliseconds.
rebalance_timeout_msThe maximum time in milliseconds that the coordinator will wait for each member to rejoin when rebalancing the group.
member_idThe member id assigned by the group coordinator.
group_instance_idThe unique identifier of the consumer instance provided by end user.
protocol_typeThe unique name the for class of protocols implemented by the group we want to join.
protocolsThe list of protocols that the member supports.
nameThe protocol name.
metadataThe protocol metadata.
_tagged_fieldsThe tagged fields
reasonThe reason why the member (re-)joins the group.
_tagged_fieldsThe tagged fields
+
+Responses:
+
JoinGroup Response (Version: 0) => error_code generation_id protocol_name leader member_id [members] 
+  error_code => INT16
+  generation_id => INT32
+  protocol_name => STRING
+  leader => STRING
+  member_id => STRING
+  members => member_id metadata 
+    member_id => STRING
+    metadata => BYTES
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
error_codeThe error code, or 0 if there was no error.
generation_idThe generation ID of the group.
protocol_nameThe group protocol selected by the coordinator.
leaderThe leader of the group.
member_idThe member ID assigned by the group coordinator.
membersThe group members.
member_idThe group member ID.
metadataThe group member metadata.
+
+
JoinGroup Response (Version: 1) => error_code generation_id protocol_name leader member_id [members] 
+  error_code => INT16
+  generation_id => INT32
+  protocol_name => STRING
+  leader => STRING
+  member_id => STRING
+  members => member_id metadata 
+    member_id => STRING
+    metadata => BYTES
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
error_codeThe error code, or 0 if there was no error.
generation_idThe generation ID of the group.
protocol_nameThe group protocol selected by the coordinator.
leaderThe leader of the group.
member_idThe member ID assigned by the group coordinator.
membersThe group members.
member_idThe group member ID.
metadataThe group member metadata.
+
+
JoinGroup Response (Version: 2) => throttle_time_ms error_code generation_id protocol_name leader member_id [members] 
+  throttle_time_ms => INT32
+  error_code => INT16
+  generation_id => INT32
+  protocol_name => STRING
+  leader => STRING
+  member_id => STRING
+  members => member_id metadata 
+    member_id => STRING
+    metadata => BYTES
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
generation_idThe generation ID of the group.
protocol_nameThe group protocol selected by the coordinator.
leaderThe leader of the group.
member_idThe member ID assigned by the group coordinator.
membersThe group members.
member_idThe group member ID.
metadataThe group member metadata.
+
+
JoinGroup Response (Version: 3) => throttle_time_ms error_code generation_id protocol_name leader member_id [members] 
+  throttle_time_ms => INT32
+  error_code => INT16
+  generation_id => INT32
+  protocol_name => STRING
+  leader => STRING
+  member_id => STRING
+  members => member_id metadata 
+    member_id => STRING
+    metadata => BYTES
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
generation_idThe generation ID of the group.
protocol_nameThe group protocol selected by the coordinator.
leaderThe leader of the group.
member_idThe member ID assigned by the group coordinator.
membersThe group members.
member_idThe group member ID.
metadataThe group member metadata.
+
+
JoinGroup Response (Version: 4) => throttle_time_ms error_code generation_id protocol_name leader member_id [members] 
+  throttle_time_ms => INT32
+  error_code => INT16
+  generation_id => INT32
+  protocol_name => STRING
+  leader => STRING
+  member_id => STRING
+  members => member_id metadata 
+    member_id => STRING
+    metadata => BYTES
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
generation_idThe generation ID of the group.
protocol_nameThe group protocol selected by the coordinator.
leaderThe leader of the group.
member_idThe member ID assigned by the group coordinator.
membersThe group members.
member_idThe group member ID.
metadataThe group member metadata.
+
+
JoinGroup Response (Version: 5) => throttle_time_ms error_code generation_id protocol_name leader member_id [members] 
+  throttle_time_ms => INT32
+  error_code => INT16
+  generation_id => INT32
+  protocol_name => STRING
+  leader => STRING
+  member_id => STRING
+  members => member_id group_instance_id metadata 
+    member_id => STRING
+    group_instance_id => NULLABLE_STRING
+    metadata => BYTES
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
generation_idThe generation ID of the group.
protocol_nameThe group protocol selected by the coordinator.
leaderThe leader of the group.
member_idThe member ID assigned by the group coordinator.
membersThe group members.
member_idThe group member ID.
group_instance_idThe unique identifier of the consumer instance provided by end user.
metadataThe group member metadata.
+
+
JoinGroup Response (Version: 6) => throttle_time_ms error_code generation_id protocol_name leader member_id [members] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  generation_id => INT32
+  protocol_name => COMPACT_STRING
+  leader => COMPACT_STRING
+  member_id => COMPACT_STRING
+  members => member_id group_instance_id metadata _tagged_fields 
+    member_id => COMPACT_STRING
+    group_instance_id => COMPACT_NULLABLE_STRING
+    metadata => COMPACT_BYTES
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
generation_idThe generation ID of the group.
protocol_nameThe group protocol selected by the coordinator.
leaderThe leader of the group.
member_idThe member ID assigned by the group coordinator.
membersThe group members.
member_idThe group member ID.
group_instance_idThe unique identifier of the consumer instance provided by end user.
metadataThe group member metadata.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
JoinGroup Response (Version: 7) => throttle_time_ms error_code generation_id protocol_type protocol_name leader member_id [members] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  generation_id => INT32
+  protocol_type => COMPACT_NULLABLE_STRING
+  protocol_name => COMPACT_NULLABLE_STRING
+  leader => COMPACT_STRING
+  member_id => COMPACT_STRING
+  members => member_id group_instance_id metadata _tagged_fields 
+    member_id => COMPACT_STRING
+    group_instance_id => COMPACT_NULLABLE_STRING
+    metadata => COMPACT_BYTES
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
generation_idThe generation ID of the group.
protocol_typeThe group protocol name.
protocol_nameThe group protocol selected by the coordinator.
leaderThe leader of the group.
member_idThe member ID assigned by the group coordinator.
membersThe group members.
member_idThe group member ID.
group_instance_idThe unique identifier of the consumer instance provided by end user.
metadataThe group member metadata.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
JoinGroup Response (Version: 8) => throttle_time_ms error_code generation_id protocol_type protocol_name leader member_id [members] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  generation_id => INT32
+  protocol_type => COMPACT_NULLABLE_STRING
+  protocol_name => COMPACT_NULLABLE_STRING
+  leader => COMPACT_STRING
+  member_id => COMPACT_STRING
+  members => member_id group_instance_id metadata _tagged_fields 
+    member_id => COMPACT_STRING
+    group_instance_id => COMPACT_NULLABLE_STRING
+    metadata => COMPACT_BYTES
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
generation_idThe generation ID of the group.
protocol_typeThe group protocol name.
protocol_nameThe group protocol selected by the coordinator.
leaderThe leader of the group.
member_idThe member ID assigned by the group coordinator.
membersThe group members.
member_idThe group member ID.
group_instance_idThe unique identifier of the consumer instance provided by end user.
metadataThe group member metadata.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
JoinGroup Response (Version: 9) => throttle_time_ms error_code generation_id protocol_type protocol_name leader skip_assignment member_id [members] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  generation_id => INT32
+  protocol_type => COMPACT_NULLABLE_STRING
+  protocol_name => COMPACT_NULLABLE_STRING
+  leader => COMPACT_STRING
+  skip_assignment => BOOLEAN
+  member_id => COMPACT_STRING
+  members => member_id group_instance_id metadata _tagged_fields 
+    member_id => COMPACT_STRING
+    group_instance_id => COMPACT_NULLABLE_STRING
+    metadata => COMPACT_BYTES
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
generation_idThe generation ID of the group.
protocol_typeThe group protocol name.
protocol_nameThe group protocol selected by the coordinator.
leaderThe leader of the group.
skip_assignmentTrue if the leader must skip running the assignment.
member_idThe member ID assigned by the group coordinator.
membersThe group members.
member_idThe group member ID.
group_instance_idThe unique identifier of the consumer instance provided by end user.
metadataThe group member metadata.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
Heartbeat API (Key: 12):
+ +Requests:
+
Heartbeat Request (Version: 0) => group_id generation_id member_id 
+  group_id => STRING
+  generation_id => INT32
+  member_id => STRING
+

Request header version: 1

+ + + + + + + + + +
FieldDescription
group_idThe group id.
generation_idThe generation of the group.
member_idThe member ID.
+
+
Heartbeat Request (Version: 1) => group_id generation_id member_id 
+  group_id => STRING
+  generation_id => INT32
+  member_id => STRING
+

Request header version: 1

+ + + + + + + + + +
FieldDescription
group_idThe group id.
generation_idThe generation of the group.
member_idThe member ID.
+
+
Heartbeat Request (Version: 2) => group_id generation_id member_id 
+  group_id => STRING
+  generation_id => INT32
+  member_id => STRING
+

Request header version: 1

+ + + + + + + + + +
FieldDescription
group_idThe group id.
generation_idThe generation of the group.
member_idThe member ID.
+
+
Heartbeat Request (Version: 3) => group_id generation_id member_id group_instance_id 
+  group_id => STRING
+  generation_id => INT32
+  member_id => STRING
+  group_instance_id => NULLABLE_STRING
+

Request header version: 1

+ + + + + + + + + + + +
FieldDescription
group_idThe group id.
generation_idThe generation of the group.
member_idThe member ID.
group_instance_idThe unique identifier of the consumer instance provided by end user.
+
+
Heartbeat Request (Version: 4) => group_id generation_id member_id group_instance_id _tagged_fields 
+  group_id => COMPACT_STRING
+  generation_id => INT32
+  member_id => COMPACT_STRING
+  group_instance_id => COMPACT_NULLABLE_STRING
+

Request header version: 2

+ + + + + + + + + + + + + +
FieldDescription
group_idThe group id.
generation_idThe generation of the group.
member_idThe member ID.
group_instance_idThe unique identifier of the consumer instance provided by end user.
_tagged_fieldsThe tagged fields
+
+Responses:
+
Heartbeat Response (Version: 0) => error_code 
+  error_code => INT16
+

Response header version: 0

+ + + + + +
FieldDescription
error_codeThe error code, or 0 if there was no error.
+
+
Heartbeat Response (Version: 1) => throttle_time_ms error_code 
+  throttle_time_ms => INT32
+  error_code => INT16
+

Response header version: 0

+ + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
+
+
Heartbeat Response (Version: 2) => throttle_time_ms error_code 
+  throttle_time_ms => INT32
+  error_code => INT16
+

Response header version: 0

+ + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
+
+
Heartbeat Response (Version: 3) => throttle_time_ms error_code 
+  throttle_time_ms => INT32
+  error_code => INT16
+

Response header version: 0

+ + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
+
+
Heartbeat Response (Version: 4) => throttle_time_ms error_code _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+

Response header version: 1

+ + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
+
+
LeaveGroup API (Key: 13):
+ +Requests:
+
LeaveGroup Request (Version: 0) => group_id member_id 
+  group_id => STRING
+  member_id => STRING
+

Request header version: 1

+ + + + + + + +
FieldDescription
group_idThe ID of the group to leave.
member_idThe member ID to remove from the group.
+
+
LeaveGroup Request (Version: 1) => group_id member_id 
+  group_id => STRING
+  member_id => STRING
+

Request header version: 1

+ + + + + + + +
FieldDescription
group_idThe ID of the group to leave.
member_idThe member ID to remove from the group.
+
+
LeaveGroup Request (Version: 2) => group_id member_id 
+  group_id => STRING
+  member_id => STRING
+

Request header version: 1

+ + + + + + + +
FieldDescription
group_idThe ID of the group to leave.
member_idThe member ID to remove from the group.
+
+
LeaveGroup Request (Version: 3) => group_id [members] 
+  group_id => STRING
+  members => member_id group_instance_id 
+    member_id => STRING
+    group_instance_id => NULLABLE_STRING
+

Request header version: 1

+ + + + + + + + + + + +
FieldDescription
group_idThe ID of the group to leave.
membersList of leaving member identities.
member_idThe member ID to remove from the group.
group_instance_idThe group instance ID to remove from the group.
+
+
LeaveGroup Request (Version: 4) => group_id [members] _tagged_fields 
+  group_id => COMPACT_STRING
+  members => member_id group_instance_id _tagged_fields 
+    member_id => COMPACT_STRING
+    group_instance_id => COMPACT_NULLABLE_STRING
+

Request header version: 2

+ + + + + + + + + + + + + + + +
FieldDescription
group_idThe ID of the group to leave.
membersList of leaving member identities.
member_idThe member ID to remove from the group.
group_instance_idThe group instance ID to remove from the group.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
LeaveGroup Request (Version: 5) => group_id [members] _tagged_fields 
+  group_id => COMPACT_STRING
+  members => member_id group_instance_id reason _tagged_fields 
+    member_id => COMPACT_STRING
+    group_instance_id => COMPACT_NULLABLE_STRING
+    reason => COMPACT_NULLABLE_STRING
+

Request header version: 2

+ + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe ID of the group to leave.
membersList of leaving member identities.
member_idThe member ID to remove from the group.
group_instance_idThe group instance ID to remove from the group.
reasonThe reason why the member left the group.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
LeaveGroup Response (Version: 0) => error_code 
+  error_code => INT16
+

Response header version: 0

+ + + + + +
FieldDescription
error_codeThe error code, or 0 if there was no error.
+
+
LeaveGroup Response (Version: 1) => throttle_time_ms error_code 
+  throttle_time_ms => INT32
+  error_code => INT16
+

Response header version: 0

+ + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
+
+
LeaveGroup Response (Version: 2) => throttle_time_ms error_code 
+  throttle_time_ms => INT32
+  error_code => INT16
+

Response header version: 0

+ + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
+
+
LeaveGroup Response (Version: 3) => throttle_time_ms error_code [members] 
+  throttle_time_ms => INT32
+  error_code => INT16
+  members => member_id group_instance_id error_code 
+    member_id => STRING
+    group_instance_id => NULLABLE_STRING
+    error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
membersList of leaving member responses.
member_idThe member ID to remove from the group.
group_instance_idThe group instance ID to remove from the group.
error_codeThe error code, or 0 if there was no error.
+
+
LeaveGroup Response (Version: 4) => throttle_time_ms error_code [members] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  members => member_id group_instance_id error_code _tagged_fields 
+    member_id => COMPACT_STRING
+    group_instance_id => COMPACT_NULLABLE_STRING
+    error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
membersList of leaving member responses.
member_idThe member ID to remove from the group.
group_instance_idThe group instance ID to remove from the group.
error_codeThe error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
LeaveGroup Response (Version: 5) => throttle_time_ms error_code [members] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  members => member_id group_instance_id error_code _tagged_fields 
+    member_id => COMPACT_STRING
+    group_instance_id => COMPACT_NULLABLE_STRING
+    error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
membersList of leaving member responses.
member_idThe member ID to remove from the group.
group_instance_idThe group instance ID to remove from the group.
error_codeThe error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
SyncGroup API (Key: 14):
+ +Requests:
+
SyncGroup Request (Version: 0) => group_id generation_id member_id [assignments] 
+  group_id => STRING
+  generation_id => INT32
+  member_id => STRING
+  assignments => member_id assignment 
+    member_id => STRING
+    assignment => BYTES
+

Request header version: 1

+ + + + + + + + + + + + + + + +
FieldDescription
group_idThe unique group identifier.
generation_idThe generation of the group.
member_idThe member ID assigned by the group.
assignmentsEach assignment.
member_idThe ID of the member to assign.
assignmentThe member assignment.
+
+
SyncGroup Request (Version: 1) => group_id generation_id member_id [assignments] 
+  group_id => STRING
+  generation_id => INT32
+  member_id => STRING
+  assignments => member_id assignment 
+    member_id => STRING
+    assignment => BYTES
+

Request header version: 1

+ + + + + + + + + + + + + + + +
FieldDescription
group_idThe unique group identifier.
generation_idThe generation of the group.
member_idThe member ID assigned by the group.
assignmentsEach assignment.
member_idThe ID of the member to assign.
assignmentThe member assignment.
+
+
SyncGroup Request (Version: 2) => group_id generation_id member_id [assignments] 
+  group_id => STRING
+  generation_id => INT32
+  member_id => STRING
+  assignments => member_id assignment 
+    member_id => STRING
+    assignment => BYTES
+

Request header version: 1

+ + + + + + + + + + + + + + + +
FieldDescription
group_idThe unique group identifier.
generation_idThe generation of the group.
member_idThe member ID assigned by the group.
assignmentsEach assignment.
member_idThe ID of the member to assign.
assignmentThe member assignment.
+
+
SyncGroup Request (Version: 3) => group_id generation_id member_id group_instance_id [assignments] 
+  group_id => STRING
+  generation_id => INT32
+  member_id => STRING
+  group_instance_id => NULLABLE_STRING
+  assignments => member_id assignment 
+    member_id => STRING
+    assignment => BYTES
+

Request header version: 1

+ + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe unique group identifier.
generation_idThe generation of the group.
member_idThe member ID assigned by the group.
group_instance_idThe unique identifier of the consumer instance provided by end user.
assignmentsEach assignment.
member_idThe ID of the member to assign.
assignmentThe member assignment.
+
+
SyncGroup Request (Version: 4) => group_id generation_id member_id group_instance_id [assignments] _tagged_fields 
+  group_id => COMPACT_STRING
+  generation_id => INT32
+  member_id => COMPACT_STRING
+  group_instance_id => COMPACT_NULLABLE_STRING
+  assignments => member_id assignment _tagged_fields 
+    member_id => COMPACT_STRING
+    assignment => COMPACT_BYTES
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe unique group identifier.
generation_idThe generation of the group.
member_idThe member ID assigned by the group.
group_instance_idThe unique identifier of the consumer instance provided by end user.
assignmentsEach assignment.
member_idThe ID of the member to assign.
assignmentThe member assignment.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
SyncGroup Request (Version: 5) => group_id generation_id member_id group_instance_id protocol_type protocol_name [assignments] _tagged_fields 
+  group_id => COMPACT_STRING
+  generation_id => INT32
+  member_id => COMPACT_STRING
+  group_instance_id => COMPACT_NULLABLE_STRING
+  protocol_type => COMPACT_NULLABLE_STRING
+  protocol_name => COMPACT_NULLABLE_STRING
+  assignments => member_id assignment _tagged_fields 
+    member_id => COMPACT_STRING
+    assignment => COMPACT_BYTES
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe unique group identifier.
generation_idThe generation of the group.
member_idThe member ID assigned by the group.
group_instance_idThe unique identifier of the consumer instance provided by end user.
protocol_typeThe group protocol type.
protocol_nameThe group protocol name.
assignmentsEach assignment.
member_idThe ID of the member to assign.
assignmentThe member assignment.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
SyncGroup Response (Version: 0) => error_code assignment 
+  error_code => INT16
+  assignment => BYTES
+

Response header version: 0

+ + + + + + + +
FieldDescription
error_codeThe error code, or 0 if there was no error.
assignmentThe member assignment.
+
+
SyncGroup Response (Version: 1) => throttle_time_ms error_code assignment 
+  throttle_time_ms => INT32
+  error_code => INT16
+  assignment => BYTES
+

Response header version: 0

+ + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
assignmentThe member assignment.
+
+
SyncGroup Response (Version: 2) => throttle_time_ms error_code assignment 
+  throttle_time_ms => INT32
+  error_code => INT16
+  assignment => BYTES
+

Response header version: 0

+ + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
assignmentThe member assignment.
+
+
SyncGroup Response (Version: 3) => throttle_time_ms error_code assignment 
+  throttle_time_ms => INT32
+  error_code => INT16
+  assignment => BYTES
+

Response header version: 0

+ + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
assignmentThe member assignment.
+
+
SyncGroup Response (Version: 4) => throttle_time_ms error_code assignment _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  assignment => COMPACT_BYTES
+

Response header version: 1

+ + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
assignmentThe member assignment.
_tagged_fieldsThe tagged fields
+
+
SyncGroup Response (Version: 5) => throttle_time_ms error_code protocol_type protocol_name assignment _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  protocol_type => COMPACT_NULLABLE_STRING
+  protocol_name => COMPACT_NULLABLE_STRING
+  assignment => COMPACT_BYTES
+

Response header version: 1

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
protocol_typeThe group protocol type.
protocol_nameThe group protocol name.
assignmentThe member assignment.
_tagged_fieldsThe tagged fields
+
+
DescribeGroups API (Key: 15):
+ +Requests:
+
DescribeGroups Request (Version: 0) => [groups] 
+  groups => STRING
+

Request header version: 1

+ + + + + +
FieldDescription
groupsThe names of the groups to describe.
+
+
DescribeGroups Request (Version: 1) => [groups] 
+  groups => STRING
+

Request header version: 1

+ + + + + +
FieldDescription
groupsThe names of the groups to describe.
+
+
DescribeGroups Request (Version: 2) => [groups] 
+  groups => STRING
+

Request header version: 1

+ + + + + +
FieldDescription
groupsThe names of the groups to describe.
+
+
DescribeGroups Request (Version: 3) => [groups] include_authorized_operations 
+  groups => STRING
+  include_authorized_operations => BOOLEAN
+

Request header version: 1

+ + + + + + + +
FieldDescription
groupsThe names of the groups to describe.
include_authorized_operationsWhether to include authorized operations.
+
+
DescribeGroups Request (Version: 4) => [groups] include_authorized_operations 
+  groups => STRING
+  include_authorized_operations => BOOLEAN
+

Request header version: 1

+ + + + + + + +
FieldDescription
groupsThe names of the groups to describe.
include_authorized_operationsWhether to include authorized operations.
+
+
DescribeGroups Request (Version: 5) => [groups] include_authorized_operations _tagged_fields 
+  groups => COMPACT_STRING
+  include_authorized_operations => BOOLEAN
+

Request header version: 2

+ + + + + + + + + +
FieldDescription
groupsThe names of the groups to describe.
include_authorized_operationsWhether to include authorized operations.
_tagged_fieldsThe tagged fields
+
+
DescribeGroups Request (Version: 6) => [groups] include_authorized_operations _tagged_fields 
+  groups => COMPACT_STRING
+  include_authorized_operations => BOOLEAN
+

Request header version: 2

+ + + + + + + + + +
FieldDescription
groupsThe names of the groups to describe.
include_authorized_operationsWhether to include authorized operations.
_tagged_fieldsThe tagged fields
+
+Responses:
+
DescribeGroups Response (Version: 0) => [groups] 
+  groups => error_code group_id group_state protocol_type protocol_data [members] 
+    error_code => INT16
+    group_id => STRING
+    group_state => STRING
+    protocol_type => STRING
+    protocol_data => STRING
+    members => member_id client_id client_host member_metadata member_assignment 
+      member_id => STRING
+      client_id => STRING
+      client_host => STRING
+      member_metadata => BYTES
+      member_assignment => BYTES
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
groupsEach described group.
error_codeThe describe error, or 0 if there was no error.
group_idThe group ID string.
group_stateThe group state string, or the empty string.
protocol_typeThe group protocol type, or the empty string.
protocol_dataThe group protocol data, or the empty string.
membersThe group members.
member_idThe member id.
client_idThe client ID used in the member's latest join group request.
client_hostThe client host.
member_metadataThe metadata corresponding to the current group protocol in use.
member_assignmentThe current assignment provided by the group leader.
+
+
DescribeGroups Response (Version: 1) => throttle_time_ms [groups] 
+  throttle_time_ms => INT32
+  groups => error_code group_id group_state protocol_type protocol_data [members] 
+    error_code => INT16
+    group_id => STRING
+    group_state => STRING
+    protocol_type => STRING
+    protocol_data => STRING
+    members => member_id client_id client_host member_metadata member_assignment 
+      member_id => STRING
+      client_id => STRING
+      client_host => STRING
+      member_metadata => BYTES
+      member_assignment => BYTES
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
groupsEach described group.
error_codeThe describe error, or 0 if there was no error.
group_idThe group ID string.
group_stateThe group state string, or the empty string.
protocol_typeThe group protocol type, or the empty string.
protocol_dataThe group protocol data, or the empty string.
membersThe group members.
member_idThe member id.
client_idThe client ID used in the member's latest join group request.
client_hostThe client host.
member_metadataThe metadata corresponding to the current group protocol in use.
member_assignmentThe current assignment provided by the group leader.
+
+
DescribeGroups Response (Version: 2) => throttle_time_ms [groups] 
+  throttle_time_ms => INT32
+  groups => error_code group_id group_state protocol_type protocol_data [members] 
+    error_code => INT16
+    group_id => STRING
+    group_state => STRING
+    protocol_type => STRING
+    protocol_data => STRING
+    members => member_id client_id client_host member_metadata member_assignment 
+      member_id => STRING
+      client_id => STRING
+      client_host => STRING
+      member_metadata => BYTES
+      member_assignment => BYTES
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
groupsEach described group.
error_codeThe describe error, or 0 if there was no error.
group_idThe group ID string.
group_stateThe group state string, or the empty string.
protocol_typeThe group protocol type, or the empty string.
protocol_dataThe group protocol data, or the empty string.
membersThe group members.
member_idThe member id.
client_idThe client ID used in the member's latest join group request.
client_hostThe client host.
member_metadataThe metadata corresponding to the current group protocol in use.
member_assignmentThe current assignment provided by the group leader.
+
+
DescribeGroups Response (Version: 3) => throttle_time_ms [groups] 
+  throttle_time_ms => INT32
+  groups => error_code group_id group_state protocol_type protocol_data [members] authorized_operations 
+    error_code => INT16
+    group_id => STRING
+    group_state => STRING
+    protocol_type => STRING
+    protocol_data => STRING
+    members => member_id client_id client_host member_metadata member_assignment 
+      member_id => STRING
+      client_id => STRING
+      client_host => STRING
+      member_metadata => BYTES
+      member_assignment => BYTES
+    authorized_operations => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
groupsEach described group.
error_codeThe describe error, or 0 if there was no error.
group_idThe group ID string.
group_stateThe group state string, or the empty string.
protocol_typeThe group protocol type, or the empty string.
protocol_dataThe group protocol data, or the empty string.
membersThe group members.
member_idThe member id.
client_idThe client ID used in the member's latest join group request.
client_hostThe client host.
member_metadataThe metadata corresponding to the current group protocol in use.
member_assignmentThe current assignment provided by the group leader.
authorized_operations32-bit bitfield to represent authorized operations for this group.
+
+
DescribeGroups Response (Version: 4) => throttle_time_ms [groups] 
+  throttle_time_ms => INT32
+  groups => error_code group_id group_state protocol_type protocol_data [members] authorized_operations 
+    error_code => INT16
+    group_id => STRING
+    group_state => STRING
+    protocol_type => STRING
+    protocol_data => STRING
+    members => member_id group_instance_id client_id client_host member_metadata member_assignment 
+      member_id => STRING
+      group_instance_id => NULLABLE_STRING
+      client_id => STRING
+      client_host => STRING
+      member_metadata => BYTES
+      member_assignment => BYTES
+    authorized_operations => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
groupsEach described group.
error_codeThe describe error, or 0 if there was no error.
group_idThe group ID string.
group_stateThe group state string, or the empty string.
protocol_typeThe group protocol type, or the empty string.
protocol_dataThe group protocol data, or the empty string.
membersThe group members.
member_idThe member id.
group_instance_idThe unique identifier of the consumer instance provided by end user.
client_idThe client ID used in the member's latest join group request.
client_hostThe client host.
member_metadataThe metadata corresponding to the current group protocol in use.
member_assignmentThe current assignment provided by the group leader.
authorized_operations32-bit bitfield to represent authorized operations for this group.
+
+
DescribeGroups Response (Version: 5) => throttle_time_ms [groups] _tagged_fields 
+  throttle_time_ms => INT32
+  groups => error_code group_id group_state protocol_type protocol_data [members] authorized_operations _tagged_fields 
+    error_code => INT16
+    group_id => COMPACT_STRING
+    group_state => COMPACT_STRING
+    protocol_type => COMPACT_STRING
+    protocol_data => COMPACT_STRING
+    members => member_id group_instance_id client_id client_host member_metadata member_assignment _tagged_fields 
+      member_id => COMPACT_STRING
+      group_instance_id => COMPACT_NULLABLE_STRING
+      client_id => COMPACT_STRING
+      client_host => COMPACT_STRING
+      member_metadata => COMPACT_BYTES
+      member_assignment => COMPACT_BYTES
+    authorized_operations => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
groupsEach described group.
error_codeThe describe error, or 0 if there was no error.
group_idThe group ID string.
group_stateThe group state string, or the empty string.
protocol_typeThe group protocol type, or the empty string.
protocol_dataThe group protocol data, or the empty string.
membersThe group members.
member_idThe member id.
group_instance_idThe unique identifier of the consumer instance provided by end user.
client_idThe client ID used in the member's latest join group request.
client_hostThe client host.
member_metadataThe metadata corresponding to the current group protocol in use.
member_assignmentThe current assignment provided by the group leader.
_tagged_fieldsThe tagged fields
authorized_operations32-bit bitfield to represent authorized operations for this group.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DescribeGroups Response (Version: 6) => throttle_time_ms [groups] _tagged_fields 
+  throttle_time_ms => INT32
+  groups => error_code error_message group_id group_state protocol_type protocol_data [members] authorized_operations _tagged_fields 
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+    group_id => COMPACT_STRING
+    group_state => COMPACT_STRING
+    protocol_type => COMPACT_STRING
+    protocol_data => COMPACT_STRING
+    members => member_id group_instance_id client_id client_host member_metadata member_assignment _tagged_fields 
+      member_id => COMPACT_STRING
+      group_instance_id => COMPACT_NULLABLE_STRING
+      client_id => COMPACT_STRING
+      client_host => COMPACT_STRING
+      member_metadata => COMPACT_BYTES
+      member_assignment => COMPACT_BYTES
+    authorized_operations => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
groupsEach described group.
error_codeThe describe error, or 0 if there was no error.
error_messageThe describe error message, or null if there was no error.
group_idThe group ID string.
group_stateThe group state string, or the empty string.
protocol_typeThe group protocol type, or the empty string.
protocol_dataThe group protocol data, or the empty string.
membersThe group members.
member_idThe member id.
group_instance_idThe unique identifier of the consumer instance provided by end user.
client_idThe client ID used in the member's latest join group request.
client_hostThe client host.
member_metadataThe metadata corresponding to the current group protocol in use.
member_assignmentThe current assignment provided by the group leader.
_tagged_fieldsThe tagged fields
authorized_operations32-bit bitfield to represent authorized operations for this group.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ListGroups API (Key: 16):
+ +Requests:
+
ListGroups Request (Version: 0) => 
+

Request header version: 1

+ + + +
FieldDescription
+
+
ListGroups Request (Version: 1) => 
+

Request header version: 1

+ + + +
FieldDescription
+
+
ListGroups Request (Version: 2) => 
+

Request header version: 1

+ + + +
FieldDescription
+
+
ListGroups Request (Version: 3) => _tagged_fields 
+

Request header version: 2

+ + + + + +
FieldDescription
_tagged_fieldsThe tagged fields
+
+
ListGroups Request (Version: 4) => [states_filter] _tagged_fields 
+  states_filter => COMPACT_STRING
+

Request header version: 2

+ + + + + + + +
FieldDescription
states_filterThe states of the groups we want to list. If empty, all groups are returned with their state.
_tagged_fieldsThe tagged fields
+
+
ListGroups Request (Version: 5) => [states_filter] [types_filter] _tagged_fields 
+  states_filter => COMPACT_STRING
+  types_filter => COMPACT_STRING
+

Request header version: 2

+ + + + + + + + + +
FieldDescription
states_filterThe states of the groups we want to list. If empty, all groups are returned with their state.
types_filterThe types of the groups we want to list. If empty, all groups are returned with their type.
_tagged_fieldsThe tagged fields
+
+Responses:
+
ListGroups Response (Version: 0) => error_code [groups] 
+  error_code => INT16
+  groups => group_id protocol_type 
+    group_id => STRING
+    protocol_type => STRING
+

Response header version: 0

+ + + + + + + + + + + +
FieldDescription
error_codeThe error code, or 0 if there was no error.
groupsEach group in the response.
group_idThe group ID.
protocol_typeThe group protocol type.
+
+
ListGroups Response (Version: 1) => throttle_time_ms error_code [groups] 
+  throttle_time_ms => INT32
+  error_code => INT16
+  groups => group_id protocol_type 
+    group_id => STRING
+    protocol_type => STRING
+

Response header version: 0

+ + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
groupsEach group in the response.
group_idThe group ID.
protocol_typeThe group protocol type.
+
+
ListGroups Response (Version: 2) => throttle_time_ms error_code [groups] 
+  throttle_time_ms => INT32
+  error_code => INT16
+  groups => group_id protocol_type 
+    group_id => STRING
+    protocol_type => STRING
+

Response header version: 0

+ + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
groupsEach group in the response.
group_idThe group ID.
protocol_typeThe group protocol type.
+
+
ListGroups Response (Version: 3) => throttle_time_ms error_code [groups] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  groups => group_id protocol_type _tagged_fields 
+    group_id => COMPACT_STRING
+    protocol_type => COMPACT_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
groupsEach group in the response.
group_idThe group ID.
protocol_typeThe group protocol type.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ListGroups Response (Version: 4) => throttle_time_ms error_code [groups] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  groups => group_id protocol_type group_state _tagged_fields 
+    group_id => COMPACT_STRING
+    protocol_type => COMPACT_STRING
+    group_state => COMPACT_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
groupsEach group in the response.
group_idThe group ID.
protocol_typeThe group protocol type.
group_stateThe group state name.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ListGroups Response (Version: 5) => throttle_time_ms error_code [groups] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  groups => group_id protocol_type group_state group_type _tagged_fields 
+    group_id => COMPACT_STRING
+    protocol_type => COMPACT_STRING
+    group_state => COMPACT_STRING
+    group_type => COMPACT_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
groupsEach group in the response.
group_idThe group ID.
protocol_typeThe group protocol type.
group_stateThe group state name.
group_typeThe group type name.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
SaslHandshake API (Key: 17):
+ +Requests:
+
SaslHandshake Request (Version: 0) => mechanism 
+  mechanism => STRING
+

Request header version: 1

+ + + + + +
FieldDescription
mechanismThe SASL mechanism chosen by the client.
+
+
SaslHandshake Request (Version: 1) => mechanism 
+  mechanism => STRING
+

Request header version: 1

+ + + + + +
FieldDescription
mechanismThe SASL mechanism chosen by the client.
+
+Responses:
+
SaslHandshake Response (Version: 0) => error_code [mechanisms] 
+  error_code => INT16
+  mechanisms => STRING
+

Response header version: 0

+ + + + + + + +
FieldDescription
error_codeThe error code, or 0 if there was no error.
mechanismsThe mechanisms enabled in the server.
+
+
SaslHandshake Response (Version: 1) => error_code [mechanisms] 
+  error_code => INT16
+  mechanisms => STRING
+

Response header version: 0

+ + + + + + + +
FieldDescription
error_codeThe error code, or 0 if there was no error.
mechanismsThe mechanisms enabled in the server.
+
+
ApiVersions API (Key: 18):
+ +Requests:
+
ApiVersions Request (Version: 0) => 
+

Request header version: 1

+ + + +
FieldDescription
+
+
ApiVersions Request (Version: 1) => 
+

Request header version: 1

+ + + +
FieldDescription
+
+
ApiVersions Request (Version: 2) => 
+

Request header version: 1

+ + + +
FieldDescription
+
+
ApiVersions Request (Version: 3) => client_software_name client_software_version _tagged_fields 
+  client_software_name => COMPACT_STRING
+  client_software_version => COMPACT_STRING
+

Request header version: 2

+ + + + + + + + + +
FieldDescription
client_software_nameThe name of the client.
client_software_versionThe version of the client.
_tagged_fieldsThe tagged fields
+
+
ApiVersions Request (Version: 4) => client_software_name client_software_version _tagged_fields 
+  client_software_name => COMPACT_STRING
+  client_software_version => COMPACT_STRING
+

Request header version: 2

+ + + + + + + + + +
FieldDescription
client_software_nameThe name of the client.
client_software_versionThe version of the client.
_tagged_fieldsThe tagged fields
+
+Responses:
+
ApiVersions Response (Version: 0) => error_code [api_keys] 
+  error_code => INT16
+  api_keys => api_key min_version max_version 
+    api_key => INT16
+    min_version => INT16
+    max_version => INT16
+

Response header version: 0

+ + + + + + + + + + + + + +
FieldDescription
error_codeThe top-level error code.
api_keysThe APIs supported by the broker.
api_keyThe API index.
min_versionThe minimum supported version, inclusive.
max_versionThe maximum supported version, inclusive.
+
+
ApiVersions Response (Version: 1) => error_code [api_keys] throttle_time_ms 
+  error_code => INT16
+  api_keys => api_key min_version max_version 
+    api_key => INT16
+    min_version => INT16
+    max_version => INT16
+  throttle_time_ms => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + +
FieldDescription
error_codeThe top-level error code.
api_keysThe APIs supported by the broker.
api_keyThe API index.
min_versionThe minimum supported version, inclusive.
max_versionThe maximum supported version, inclusive.
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+
+
ApiVersions Response (Version: 2) => error_code [api_keys] throttle_time_ms 
+  error_code => INT16
+  api_keys => api_key min_version max_version 
+    api_key => INT16
+    min_version => INT16
+    max_version => INT16
+  throttle_time_ms => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + +
FieldDescription
error_codeThe top-level error code.
api_keysThe APIs supported by the broker.
api_keyThe API index.
min_versionThe minimum supported version, inclusive.
max_versionThe maximum supported version, inclusive.
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+
+
ApiVersions Response (Version: 3) => error_code [api_keys] throttle_time_ms _tagged_fields 
+  error_code => INT16
+  api_keys => api_key min_version max_version _tagged_fields 
+    api_key => INT16
+    min_version => INT16
+    max_version => INT16
+  throttle_time_ms => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
error_codeThe top-level error code.
api_keysThe APIs supported by the broker.
api_keyThe API index.
min_versionThe minimum supported version, inclusive.
max_versionThe maximum supported version, inclusive.
_tagged_fieldsThe tagged fields
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
_tagged_fields + + + + + + + + + + + +
TagTagged fieldDescription
0supported_featuresFeatures supported by the broker. Note: in v0-v3, features with MinSupportedVersion = 0 are omitted. + + + + + + + + + + +
FieldDescription
nameThe name of the feature.
min_versionThe minimum supported version for the feature.
max_versionThe maximum supported version for the feature.
_tagged_fieldsThe tagged fields
+
1finalized_features_epochThe monotonically increasing epoch for the finalized features information. Valid values are >= 0. A value of -1 is special and represents unknown epoch.
2finalized_featuresList of cluster-wide finalized features. The information is valid only if FinalizedFeaturesEpoch >= 0. + + + + + + + + + + +
FieldDescription
nameThe name of the feature.
max_version_levelThe cluster-wide finalized max version level for the feature.
min_version_levelThe cluster-wide finalized min version level for the feature.
_tagged_fieldsThe tagged fields
+
3zk_migration_readySet by a KRaft controller if the required configurations for ZK migration are present.
+
+
+
ApiVersions Response (Version: 4) => error_code [api_keys] throttle_time_ms _tagged_fields 
+  error_code => INT16
+  api_keys => api_key min_version max_version _tagged_fields 
+    api_key => INT16
+    min_version => INT16
+    max_version => INT16
+  throttle_time_ms => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
error_codeThe top-level error code.
api_keysThe APIs supported by the broker.
api_keyThe API index.
min_versionThe minimum supported version, inclusive.
max_versionThe maximum supported version, inclusive.
_tagged_fieldsThe tagged fields
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
_tagged_fields + + + + + + + + + + + +
TagTagged fieldDescription
0supported_featuresFeatures supported by the broker. Note: in v0-v3, features with MinSupportedVersion = 0 are omitted. + + + + + + + + + + +
FieldDescription
nameThe name of the feature.
min_versionThe minimum supported version for the feature.
max_versionThe maximum supported version for the feature.
_tagged_fieldsThe tagged fields
+
1finalized_features_epochThe monotonically increasing epoch for the finalized features information. Valid values are >= 0. A value of -1 is special and represents unknown epoch.
2finalized_featuresList of cluster-wide finalized features. The information is valid only if FinalizedFeaturesEpoch >= 0. + + + + + + + + + + +
FieldDescription
nameThe name of the feature.
max_version_levelThe cluster-wide finalized max version level for the feature.
min_version_levelThe cluster-wide finalized min version level for the feature.
_tagged_fieldsThe tagged fields
+
3zk_migration_readySet by a KRaft controller if the required configurations for ZK migration are present.
+
+
+
CreateTopics API (Key: 19):
+ +Requests:
+
CreateTopics Request (Version: 2) => [topics] timeout_ms validate_only 
+  topics => name num_partitions replication_factor [assignments] [configs] 
+    name => STRING
+    num_partitions => INT32
+    replication_factor => INT16
+    assignments => partition_index [broker_ids] 
+      partition_index => INT32
+      broker_ids => INT32
+    configs => name value 
+      name => STRING
+      value => NULLABLE_STRING
+  timeout_ms => INT32
+  validate_only => BOOLEAN
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
topicsThe topics to create.
nameThe topic name.
num_partitionsThe number of partitions to create in the topic, or -1 if we are either specifying a manual partition assignment or using the default partitions.
replication_factorThe number of replicas to create for each partition in the topic, or -1 if we are either specifying a manual partition assignment or using the default replication factor.
assignmentsThe manual partition assignment, or the empty array if we are using automatic assignment.
partition_indexThe partition index.
broker_idsThe brokers to place the partition on.
configsThe custom topic configurations to set.
nameThe configuration name.
valueThe configuration value.
timeout_msHow long to wait in milliseconds before timing out the request.
validate_onlyIf true, check that the topics can be created as specified, but don't create anything.
+
+
CreateTopics Request (Version: 3) => [topics] timeout_ms validate_only 
+  topics => name num_partitions replication_factor [assignments] [configs] 
+    name => STRING
+    num_partitions => INT32
+    replication_factor => INT16
+    assignments => partition_index [broker_ids] 
+      partition_index => INT32
+      broker_ids => INT32
+    configs => name value 
+      name => STRING
+      value => NULLABLE_STRING
+  timeout_ms => INT32
+  validate_only => BOOLEAN
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
topicsThe topics to create.
nameThe topic name.
num_partitionsThe number of partitions to create in the topic, or -1 if we are either specifying a manual partition assignment or using the default partitions.
replication_factorThe number of replicas to create for each partition in the topic, or -1 if we are either specifying a manual partition assignment or using the default replication factor.
assignmentsThe manual partition assignment, or the empty array if we are using automatic assignment.
partition_indexThe partition index.
broker_idsThe brokers to place the partition on.
configsThe custom topic configurations to set.
nameThe configuration name.
valueThe configuration value.
timeout_msHow long to wait in milliseconds before timing out the request.
validate_onlyIf true, check that the topics can be created as specified, but don't create anything.
+
+
CreateTopics Request (Version: 4) => [topics] timeout_ms validate_only 
+  topics => name num_partitions replication_factor [assignments] [configs] 
+    name => STRING
+    num_partitions => INT32
+    replication_factor => INT16
+    assignments => partition_index [broker_ids] 
+      partition_index => INT32
+      broker_ids => INT32
+    configs => name value 
+      name => STRING
+      value => NULLABLE_STRING
+  timeout_ms => INT32
+  validate_only => BOOLEAN
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
topicsThe topics to create.
nameThe topic name.
num_partitionsThe number of partitions to create in the topic, or -1 if we are either specifying a manual partition assignment or using the default partitions.
replication_factorThe number of replicas to create for each partition in the topic, or -1 if we are either specifying a manual partition assignment or using the default replication factor.
assignmentsThe manual partition assignment, or the empty array if we are using automatic assignment.
partition_indexThe partition index.
broker_idsThe brokers to place the partition on.
configsThe custom topic configurations to set.
nameThe configuration name.
valueThe configuration value.
timeout_msHow long to wait in milliseconds before timing out the request.
validate_onlyIf true, check that the topics can be created as specified, but don't create anything.
+
+
CreateTopics Request (Version: 5) => [topics] timeout_ms validate_only _tagged_fields 
+  topics => name num_partitions replication_factor [assignments] [configs] _tagged_fields 
+    name => COMPACT_STRING
+    num_partitions => INT32
+    replication_factor => INT16
+    assignments => partition_index [broker_ids] _tagged_fields 
+      partition_index => INT32
+      broker_ids => INT32
+    configs => name value _tagged_fields 
+      name => COMPACT_STRING
+      value => COMPACT_NULLABLE_STRING
+  timeout_ms => INT32
+  validate_only => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
topicsThe topics to create.
nameThe topic name.
num_partitionsThe number of partitions to create in the topic, or -1 if we are either specifying a manual partition assignment or using the default partitions.
replication_factorThe number of replicas to create for each partition in the topic, or -1 if we are either specifying a manual partition assignment or using the default replication factor.
assignmentsThe manual partition assignment, or the empty array if we are using automatic assignment.
partition_indexThe partition index.
broker_idsThe brokers to place the partition on.
_tagged_fieldsThe tagged fields
configsThe custom topic configurations to set.
nameThe configuration name.
valueThe configuration value.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
timeout_msHow long to wait in milliseconds before timing out the request.
validate_onlyIf true, check that the topics can be created as specified, but don't create anything.
_tagged_fieldsThe tagged fields
+
+
CreateTopics Request (Version: 6) => [topics] timeout_ms validate_only _tagged_fields 
+  topics => name num_partitions replication_factor [assignments] [configs] _tagged_fields 
+    name => COMPACT_STRING
+    num_partitions => INT32
+    replication_factor => INT16
+    assignments => partition_index [broker_ids] _tagged_fields 
+      partition_index => INT32
+      broker_ids => INT32
+    configs => name value _tagged_fields 
+      name => COMPACT_STRING
+      value => COMPACT_NULLABLE_STRING
+  timeout_ms => INT32
+  validate_only => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
topicsThe topics to create.
nameThe topic name.
num_partitionsThe number of partitions to create in the topic, or -1 if we are either specifying a manual partition assignment or using the default partitions.
replication_factorThe number of replicas to create for each partition in the topic, or -1 if we are either specifying a manual partition assignment or using the default replication factor.
assignmentsThe manual partition assignment, or the empty array if we are using automatic assignment.
partition_indexThe partition index.
broker_idsThe brokers to place the partition on.
_tagged_fieldsThe tagged fields
configsThe custom topic configurations to set.
nameThe configuration name.
valueThe configuration value.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
timeout_msHow long to wait in milliseconds before timing out the request.
validate_onlyIf true, check that the topics can be created as specified, but don't create anything.
_tagged_fieldsThe tagged fields
+
+
CreateTopics Request (Version: 7) => [topics] timeout_ms validate_only _tagged_fields 
+  topics => name num_partitions replication_factor [assignments] [configs] _tagged_fields 
+    name => COMPACT_STRING
+    num_partitions => INT32
+    replication_factor => INT16
+    assignments => partition_index [broker_ids] _tagged_fields 
+      partition_index => INT32
+      broker_ids => INT32
+    configs => name value _tagged_fields 
+      name => COMPACT_STRING
+      value => COMPACT_NULLABLE_STRING
+  timeout_ms => INT32
+  validate_only => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
topicsThe topics to create.
nameThe topic name.
num_partitionsThe number of partitions to create in the topic, or -1 if we are either specifying a manual partition assignment or using the default partitions.
replication_factorThe number of replicas to create for each partition in the topic, or -1 if we are either specifying a manual partition assignment or using the default replication factor.
assignmentsThe manual partition assignment, or the empty array if we are using automatic assignment.
partition_indexThe partition index.
broker_idsThe brokers to place the partition on.
_tagged_fieldsThe tagged fields
configsThe custom topic configurations to set.
nameThe configuration name.
valueThe configuration value.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
timeout_msHow long to wait in milliseconds before timing out the request.
validate_onlyIf true, check that the topics can be created as specified, but don't create anything.
_tagged_fieldsThe tagged fields
+
+Responses:
+
CreateTopics Response (Version: 2) => throttle_time_ms [topics] 
+  throttle_time_ms => INT32
+  topics => name error_code error_message 
+    name => STRING
+    error_code => INT16
+    error_message => NULLABLE_STRING
+

Response header version: 0

+ + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsResults for each topic we tried to create.
nameThe topic name.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
+
+
CreateTopics Response (Version: 3) => throttle_time_ms [topics] 
+  throttle_time_ms => INT32
+  topics => name error_code error_message 
+    name => STRING
+    error_code => INT16
+    error_message => NULLABLE_STRING
+

Response header version: 0

+ + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsResults for each topic we tried to create.
nameThe topic name.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
+
+
CreateTopics Response (Version: 4) => throttle_time_ms [topics] 
+  throttle_time_ms => INT32
+  topics => name error_code error_message 
+    name => STRING
+    error_code => INT16
+    error_message => NULLABLE_STRING
+

Response header version: 0

+ + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsResults for each topic we tried to create.
nameThe topic name.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
+
+
CreateTopics Response (Version: 5) => throttle_time_ms [topics] _tagged_fields 
+  throttle_time_ms => INT32
+  topics => name error_code error_message num_partitions replication_factor [configs] _tagged_fields 
+    name => COMPACT_STRING
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+    num_partitions => INT32
+    replication_factor => INT16
+    configs => name value read_only config_source is_sensitive _tagged_fields 
+      name => COMPACT_STRING
+      value => COMPACT_NULLABLE_STRING
+      read_only => BOOLEAN
+      config_source => INT8
+      is_sensitive => BOOLEAN
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsResults for each topic we tried to create.
nameThe topic name.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
num_partitionsNumber of partitions of the topic.
replication_factorReplication factor of the topic.
configsConfiguration of the topic.
nameThe configuration name.
valueThe configuration value.
read_onlyTrue if the configuration is read-only.
config_sourceThe configuration source.
is_sensitiveTrue if this configuration is sensitive.
_tagged_fieldsThe tagged fields
_tagged_fields + + + + + +
TagTagged fieldDescription
0topic_config_error_codeOptional topic config error returned if configs are not returned in the response.
+
_tagged_fieldsThe tagged fields
+
+
CreateTopics Response (Version: 6) => throttle_time_ms [topics] _tagged_fields 
+  throttle_time_ms => INT32
+  topics => name error_code error_message num_partitions replication_factor [configs] _tagged_fields 
+    name => COMPACT_STRING
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+    num_partitions => INT32
+    replication_factor => INT16
+    configs => name value read_only config_source is_sensitive _tagged_fields 
+      name => COMPACT_STRING
+      value => COMPACT_NULLABLE_STRING
+      read_only => BOOLEAN
+      config_source => INT8
+      is_sensitive => BOOLEAN
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsResults for each topic we tried to create.
nameThe topic name.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
num_partitionsNumber of partitions of the topic.
replication_factorReplication factor of the topic.
configsConfiguration of the topic.
nameThe configuration name.
valueThe configuration value.
read_onlyTrue if the configuration is read-only.
config_sourceThe configuration source.
is_sensitiveTrue if this configuration is sensitive.
_tagged_fieldsThe tagged fields
_tagged_fields + + + + + +
TagTagged fieldDescription
0topic_config_error_codeOptional topic config error returned if configs are not returned in the response.
+
_tagged_fieldsThe tagged fields
+
+
CreateTopics Response (Version: 7) => throttle_time_ms [topics] _tagged_fields 
+  throttle_time_ms => INT32
+  topics => name topic_id error_code error_message num_partitions replication_factor [configs] _tagged_fields 
+    name => COMPACT_STRING
+    topic_id => UUID
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+    num_partitions => INT32
+    replication_factor => INT16
+    configs => name value read_only config_source is_sensitive _tagged_fields 
+      name => COMPACT_STRING
+      value => COMPACT_NULLABLE_STRING
+      read_only => BOOLEAN
+      config_source => INT8
+      is_sensitive => BOOLEAN
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsResults for each topic we tried to create.
nameThe topic name.
topic_idThe unique topic ID.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
num_partitionsNumber of partitions of the topic.
replication_factorReplication factor of the topic.
configsConfiguration of the topic.
nameThe configuration name.
valueThe configuration value.
read_onlyTrue if the configuration is read-only.
config_sourceThe configuration source.
is_sensitiveTrue if this configuration is sensitive.
_tagged_fieldsThe tagged fields
_tagged_fields + + + + + +
TagTagged fieldDescription
0topic_config_error_codeOptional topic config error returned if configs are not returned in the response.
+
_tagged_fieldsThe tagged fields
+
+
DeleteTopics API (Key: 20):
+ +Requests:
+
DeleteTopics Request (Version: 1) => [topic_names] timeout_ms 
+  topic_names => STRING
+  timeout_ms => INT32
+

Request header version: 1

+ + + + + + + +
FieldDescription
topic_namesThe names of the topics to delete.
timeout_msThe length of time in milliseconds to wait for the deletions to complete.
+
+
DeleteTopics Request (Version: 2) => [topic_names] timeout_ms 
+  topic_names => STRING
+  timeout_ms => INT32
+

Request header version: 1

+ + + + + + + +
FieldDescription
topic_namesThe names of the topics to delete.
timeout_msThe length of time in milliseconds to wait for the deletions to complete.
+
+
DeleteTopics Request (Version: 3) => [topic_names] timeout_ms 
+  topic_names => STRING
+  timeout_ms => INT32
+

Request header version: 1

+ + + + + + + +
FieldDescription
topic_namesThe names of the topics to delete.
timeout_msThe length of time in milliseconds to wait for the deletions to complete.
+
+
DeleteTopics Request (Version: 4) => [topic_names] timeout_ms _tagged_fields 
+  topic_names => COMPACT_STRING
+  timeout_ms => INT32
+

Request header version: 2

+ + + + + + + + + +
FieldDescription
topic_namesThe names of the topics to delete.
timeout_msThe length of time in milliseconds to wait for the deletions to complete.
_tagged_fieldsThe tagged fields
+
+
DeleteTopics Request (Version: 5) => [topic_names] timeout_ms _tagged_fields 
+  topic_names => COMPACT_STRING
+  timeout_ms => INT32
+

Request header version: 2

+ + + + + + + + + +
FieldDescription
topic_namesThe names of the topics to delete.
timeout_msThe length of time in milliseconds to wait for the deletions to complete.
_tagged_fieldsThe tagged fields
+
+
DeleteTopics Request (Version: 6) => [topics] timeout_ms _tagged_fields 
+  topics => name topic_id _tagged_fields 
+    name => COMPACT_NULLABLE_STRING
+    topic_id => UUID
+  timeout_ms => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + +
FieldDescription
topicsThe name or topic ID of the topic.
nameThe topic name.
topic_idThe unique topic ID.
_tagged_fieldsThe tagged fields
timeout_msThe length of time in milliseconds to wait for the deletions to complete.
_tagged_fieldsThe tagged fields
+
+Responses:
+
DeleteTopics Response (Version: 1) => throttle_time_ms [responses] 
+  throttle_time_ms => INT32
+  responses => name error_code 
+    name => STRING
+    error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
responsesThe results for each topic we tried to delete.
nameThe topic name.
error_codeThe deletion error, or 0 if the deletion succeeded.
+
+
DeleteTopics Response (Version: 2) => throttle_time_ms [responses] 
+  throttle_time_ms => INT32
+  responses => name error_code 
+    name => STRING
+    error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
responsesThe results for each topic we tried to delete.
nameThe topic name.
error_codeThe deletion error, or 0 if the deletion succeeded.
+
+
DeleteTopics Response (Version: 3) => throttle_time_ms [responses] 
+  throttle_time_ms => INT32
+  responses => name error_code 
+    name => STRING
+    error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
responsesThe results for each topic we tried to delete.
nameThe topic name.
error_codeThe deletion error, or 0 if the deletion succeeded.
+
+
DeleteTopics Response (Version: 4) => throttle_time_ms [responses] _tagged_fields 
+  throttle_time_ms => INT32
+  responses => name error_code _tagged_fields 
+    name => COMPACT_STRING
+    error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
responsesThe results for each topic we tried to delete.
nameThe topic name.
error_codeThe deletion error, or 0 if the deletion succeeded.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DeleteTopics Response (Version: 5) => throttle_time_ms [responses] _tagged_fields 
+  throttle_time_ms => INT32
+  responses => name error_code error_message _tagged_fields 
+    name => COMPACT_STRING
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
responsesThe results for each topic we tried to delete.
nameThe topic name.
error_codeThe deletion error, or 0 if the deletion succeeded.
error_messageThe error message, or null if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DeleteTopics Response (Version: 6) => throttle_time_ms [responses] _tagged_fields 
+  throttle_time_ms => INT32
+  responses => name topic_id error_code error_message _tagged_fields 
+    name => COMPACT_NULLABLE_STRING
+    topic_id => UUID
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
responsesThe results for each topic we tried to delete.
nameThe topic name.
topic_idThe unique topic ID.
error_codeThe deletion error, or 0 if the deletion succeeded.
error_messageThe error message, or null if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DeleteRecords API (Key: 21):
+ +Requests:
+
DeleteRecords Request (Version: 0) => [topics] timeout_ms 
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index offset 
+      partition_index => INT32
+      offset => INT64
+  timeout_ms => INT32
+

Request header version: 1

+ + + + + + + + + + + + + + + +
FieldDescription
topicsEach topic that we want to delete records from.
nameThe topic name.
partitionsEach partition that we want to delete records from.
partition_indexThe partition index.
offsetThe deletion offset.
timeout_msHow long to wait for the deletion to complete, in milliseconds.
+
+
DeleteRecords Request (Version: 1) => [topics] timeout_ms 
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index offset 
+      partition_index => INT32
+      offset => INT64
+  timeout_ms => INT32
+

Request header version: 1

+ + + + + + + + + + + + + + + +
FieldDescription
topicsEach topic that we want to delete records from.
nameThe topic name.
partitionsEach partition that we want to delete records from.
partition_indexThe partition index.
offsetThe deletion offset.
timeout_msHow long to wait for the deletion to complete, in milliseconds.
+
+
DeleteRecords Request (Version: 2) => [topics] timeout_ms _tagged_fields 
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index offset _tagged_fields 
+      partition_index => INT32
+      offset => INT64
+  timeout_ms => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
topicsEach topic that we want to delete records from.
nameThe topic name.
partitionsEach partition that we want to delete records from.
partition_indexThe partition index.
offsetThe deletion offset.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
timeout_msHow long to wait for the deletion to complete, in milliseconds.
_tagged_fieldsThe tagged fields
+
+Responses:
+
DeleteRecords Response (Version: 0) => throttle_time_ms [topics] 
+  throttle_time_ms => INT32
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index low_watermark error_code 
+      partition_index => INT32
+      low_watermark => INT64
+      error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsEach topic that we wanted to delete records from.
nameThe topic name.
partitionsEach partition that we wanted to delete records from.
partition_indexThe partition index.
low_watermarkThe partition low water mark.
error_codeThe deletion error code, or 0 if the deletion succeeded.
+
+
DeleteRecords Response (Version: 1) => throttle_time_ms [topics] 
+  throttle_time_ms => INT32
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index low_watermark error_code 
+      partition_index => INT32
+      low_watermark => INT64
+      error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsEach topic that we wanted to delete records from.
nameThe topic name.
partitionsEach partition that we wanted to delete records from.
partition_indexThe partition index.
low_watermarkThe partition low water mark.
error_codeThe deletion error code, or 0 if the deletion succeeded.
+
+
DeleteRecords Response (Version: 2) => throttle_time_ms [topics] _tagged_fields 
+  throttle_time_ms => INT32
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index low_watermark error_code _tagged_fields 
+      partition_index => INT32
+      low_watermark => INT64
+      error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsEach topic that we wanted to delete records from.
nameThe topic name.
partitionsEach partition that we wanted to delete records from.
partition_indexThe partition index.
low_watermarkThe partition low water mark.
error_codeThe deletion error code, or 0 if the deletion succeeded.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
InitProducerId API (Key: 22):
+ +Requests:
+
InitProducerId Request (Version: 0) => transactional_id transaction_timeout_ms 
+  transactional_id => NULLABLE_STRING
+  transaction_timeout_ms => INT32
+

Request header version: 1

+ + + + + + + +
FieldDescription
transactional_idThe transactional id, or null if the producer is not transactional.
transaction_timeout_msThe time in ms to wait before aborting idle transactions sent by this producer. This is only relevant if a TransactionalId has been defined.
+
+
InitProducerId Request (Version: 1) => transactional_id transaction_timeout_ms 
+  transactional_id => NULLABLE_STRING
+  transaction_timeout_ms => INT32
+

Request header version: 1

+ + + + + + + +
FieldDescription
transactional_idThe transactional id, or null if the producer is not transactional.
transaction_timeout_msThe time in ms to wait before aborting idle transactions sent by this producer. This is only relevant if a TransactionalId has been defined.
+
+
InitProducerId Request (Version: 2) => transactional_id transaction_timeout_ms _tagged_fields 
+  transactional_id => COMPACT_NULLABLE_STRING
+  transaction_timeout_ms => INT32
+

Request header version: 2

+ + + + + + + + + +
FieldDescription
transactional_idThe transactional id, or null if the producer is not transactional.
transaction_timeout_msThe time in ms to wait before aborting idle transactions sent by this producer. This is only relevant if a TransactionalId has been defined.
_tagged_fieldsThe tagged fields
+
+
InitProducerId Request (Version: 3) => transactional_id transaction_timeout_ms producer_id producer_epoch _tagged_fields 
+  transactional_id => COMPACT_NULLABLE_STRING
+  transaction_timeout_ms => INT32
+  producer_id => INT64
+  producer_epoch => INT16
+

Request header version: 2

+ + + + + + + + + + + + + +
FieldDescription
transactional_idThe transactional id, or null if the producer is not transactional.
transaction_timeout_msThe time in ms to wait before aborting idle transactions sent by this producer. This is only relevant if a TransactionalId has been defined.
producer_idThe producer id. This is used to disambiguate requests if a transactional id is reused following its expiration.
producer_epochThe producer's current epoch. This will be checked against the producer epoch on the broker, and the request will return an error if they do not match.
_tagged_fieldsThe tagged fields
+
+
InitProducerId Request (Version: 4) => transactional_id transaction_timeout_ms producer_id producer_epoch _tagged_fields 
+  transactional_id => COMPACT_NULLABLE_STRING
+  transaction_timeout_ms => INT32
+  producer_id => INT64
+  producer_epoch => INT16
+

Request header version: 2

+ + + + + + + + + + + + + +
FieldDescription
transactional_idThe transactional id, or null if the producer is not transactional.
transaction_timeout_msThe time in ms to wait before aborting idle transactions sent by this producer. This is only relevant if a TransactionalId has been defined.
producer_idThe producer id. This is used to disambiguate requests if a transactional id is reused following its expiration.
producer_epochThe producer's current epoch. This will be checked against the producer epoch on the broker, and the request will return an error if they do not match.
_tagged_fieldsThe tagged fields
+
+
InitProducerId Request (Version: 5) => transactional_id transaction_timeout_ms producer_id producer_epoch _tagged_fields 
+  transactional_id => COMPACT_NULLABLE_STRING
+  transaction_timeout_ms => INT32
+  producer_id => INT64
+  producer_epoch => INT16
+

Request header version: 2

+ + + + + + + + + + + + + +
FieldDescription
transactional_idThe transactional id, or null if the producer is not transactional.
transaction_timeout_msThe time in ms to wait before aborting idle transactions sent by this producer. This is only relevant if a TransactionalId has been defined.
producer_idThe producer id. This is used to disambiguate requests if a transactional id is reused following its expiration.
producer_epochThe producer's current epoch. This will be checked against the producer epoch on the broker, and the request will return an error if they do not match.
_tagged_fieldsThe tagged fields
+
+
InitProducerId Request (Version: 6) => transactional_id transaction_timeout_ms producer_id producer_epoch enable2_pc keep_prepared_txn _tagged_fields 
+  transactional_id => COMPACT_NULLABLE_STRING
+  transaction_timeout_ms => INT32
+  producer_id => INT64
+  producer_epoch => INT16
+  enable2_pc => BOOLEAN
+  keep_prepared_txn => BOOLEAN
+

This version of the request is unstable.

Request header version: 2

+ + + + + + + + + + + + + + + + + +
FieldDescription
transactional_idThe transactional id, or null if the producer is not transactional.
transaction_timeout_msThe time in ms to wait before aborting idle transactions sent by this producer. This is only relevant if a TransactionalId has been defined.
producer_idThe producer id. This is used to disambiguate requests if a transactional id is reused following its expiration.
producer_epochThe producer's current epoch. This will be checked against the producer epoch on the broker, and the request will return an error if they do not match.
enable2_pcTrue if the client wants to enable two-phase commit (2PC) protocol for transactions.
keep_prepared_txnTrue if the client wants to keep the currently ongoing transaction instead of aborting it.
_tagged_fieldsThe tagged fields
+
+Responses:
+
InitProducerId Response (Version: 0) => throttle_time_ms error_code producer_id producer_epoch 
+  throttle_time_ms => INT32
+  error_code => INT16
+  producer_id => INT64
+  producer_epoch => INT16
+

Response header version: 0

+ + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
producer_idThe current producer id.
producer_epochThe current epoch associated with the producer id.
+
+
InitProducerId Response (Version: 1) => throttle_time_ms error_code producer_id producer_epoch 
+  throttle_time_ms => INT32
+  error_code => INT16
+  producer_id => INT64
+  producer_epoch => INT16
+

Response header version: 0

+ + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
producer_idThe current producer id.
producer_epochThe current epoch associated with the producer id.
+
+
InitProducerId Response (Version: 2) => throttle_time_ms error_code producer_id producer_epoch _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  producer_id => INT64
+  producer_epoch => INT16
+

Response header version: 1

+ + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
producer_idThe current producer id.
producer_epochThe current epoch associated with the producer id.
_tagged_fieldsThe tagged fields
+
+
InitProducerId Response (Version: 3) => throttle_time_ms error_code producer_id producer_epoch _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  producer_id => INT64
+  producer_epoch => INT16
+

Response header version: 1

+ + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
producer_idThe current producer id.
producer_epochThe current epoch associated with the producer id.
_tagged_fieldsThe tagged fields
+
+
InitProducerId Response (Version: 4) => throttle_time_ms error_code producer_id producer_epoch _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  producer_id => INT64
+  producer_epoch => INT16
+

Response header version: 1

+ + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
producer_idThe current producer id.
producer_epochThe current epoch associated with the producer id.
_tagged_fieldsThe tagged fields
+
+
InitProducerId Response (Version: 5) => throttle_time_ms error_code producer_id producer_epoch _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  producer_id => INT64
+  producer_epoch => INT16
+

Response header version: 1

+ + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
producer_idThe current producer id.
producer_epochThe current epoch associated with the producer id.
_tagged_fieldsThe tagged fields
+
+
InitProducerId Response (Version: 6) => throttle_time_ms error_code producer_id producer_epoch ongoing_txn_producer_id ongoing_txn_producer_epoch _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  producer_id => INT64
+  producer_epoch => INT16
+  ongoing_txn_producer_id => INT64
+  ongoing_txn_producer_epoch => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
producer_idThe current producer id.
producer_epochThe current epoch associated with the producer id.
ongoing_txn_producer_idThe producer id for ongoing transaction when KeepPreparedTxn is used, -1 if there is no transaction ongoing.
ongoing_txn_producer_epochThe epoch associated with the producer id for ongoing transaction when KeepPreparedTxn is used, -1 if there is no transaction ongoing.
_tagged_fieldsThe tagged fields
+
+
OffsetForLeaderEpoch API (Key: 23):
+ +Requests:
+
OffsetForLeaderEpoch Request (Version: 2) => [topics] 
+  topics => topic [partitions] 
+    topic => STRING
+    partitions => partition current_leader_epoch leader_epoch 
+      partition => INT32
+      current_leader_epoch => INT32
+      leader_epoch => INT32
+

Request header version: 1

+ + + + + + + + + + + + + + + +
FieldDescription
topicsEach topic to get offsets for.
topicThe topic name.
partitionsEach partition to get offsets for.
partitionThe partition index.
current_leader_epochAn epoch used to fence consumers/replicas with old metadata. If the epoch provided by the client is larger than the current epoch known to the broker, then the UNKNOWN_LEADER_EPOCH error code will be returned. If the provided epoch is smaller, then the FENCED_LEADER_EPOCH error code will be returned.
leader_epochThe epoch to look up an offset for.
+
+
OffsetForLeaderEpoch Request (Version: 3) => replica_id [topics] 
+  replica_id => INT32
+  topics => topic [partitions] 
+    topic => STRING
+    partitions => partition current_leader_epoch leader_epoch 
+      partition => INT32
+      current_leader_epoch => INT32
+      leader_epoch => INT32
+

Request header version: 1

+ + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the follower, of -1 if this request is from a consumer.
topicsEach topic to get offsets for.
topicThe topic name.
partitionsEach partition to get offsets for.
partitionThe partition index.
current_leader_epochAn epoch used to fence consumers/replicas with old metadata. If the epoch provided by the client is larger than the current epoch known to the broker, then the UNKNOWN_LEADER_EPOCH error code will be returned. If the provided epoch is smaller, then the FENCED_LEADER_EPOCH error code will be returned.
leader_epochThe epoch to look up an offset for.
+
+
OffsetForLeaderEpoch Request (Version: 4) => replica_id [topics] _tagged_fields 
+  replica_id => INT32
+  topics => topic [partitions] _tagged_fields 
+    topic => COMPACT_STRING
+    partitions => partition current_leader_epoch leader_epoch _tagged_fields 
+      partition => INT32
+      current_leader_epoch => INT32
+      leader_epoch => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
replica_idThe broker ID of the follower, of -1 if this request is from a consumer.
topicsEach topic to get offsets for.
topicThe topic name.
partitionsEach partition to get offsets for.
partitionThe partition index.
current_leader_epochAn epoch used to fence consumers/replicas with old metadata. If the epoch provided by the client is larger than the current epoch known to the broker, then the UNKNOWN_LEADER_EPOCH error code will be returned. If the provided epoch is smaller, then the FENCED_LEADER_EPOCH error code will be returned.
leader_epochThe epoch to look up an offset for.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
OffsetForLeaderEpoch Response (Version: 2) => throttle_time_ms [topics] 
+  throttle_time_ms => INT32
+  topics => topic [partitions] 
+    topic => STRING
+    partitions => error_code partition leader_epoch end_offset 
+      error_code => INT16
+      partition => INT32
+      leader_epoch => INT32
+      end_offset => INT64
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsEach topic we fetched offsets for.
topicThe topic name.
partitionsEach partition in the topic we fetched offsets for.
error_codeThe error code 0, or if there was no error.
partitionThe partition index.
leader_epochThe leader epoch of the partition.
end_offsetThe end offset of the epoch.
+
+
OffsetForLeaderEpoch Response (Version: 3) => throttle_time_ms [topics] 
+  throttle_time_ms => INT32
+  topics => topic [partitions] 
+    topic => STRING
+    partitions => error_code partition leader_epoch end_offset 
+      error_code => INT16
+      partition => INT32
+      leader_epoch => INT32
+      end_offset => INT64
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsEach topic we fetched offsets for.
topicThe topic name.
partitionsEach partition in the topic we fetched offsets for.
error_codeThe error code 0, or if there was no error.
partitionThe partition index.
leader_epochThe leader epoch of the partition.
end_offsetThe end offset of the epoch.
+
+
OffsetForLeaderEpoch Response (Version: 4) => throttle_time_ms [topics] _tagged_fields 
+  throttle_time_ms => INT32
+  topics => topic [partitions] _tagged_fields 
+    topic => COMPACT_STRING
+    partitions => error_code partition leader_epoch end_offset _tagged_fields 
+      error_code => INT16
+      partition => INT32
+      leader_epoch => INT32
+      end_offset => INT64
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsEach topic we fetched offsets for.
topicThe topic name.
partitionsEach partition in the topic we fetched offsets for.
error_codeThe error code 0, or if there was no error.
partitionThe partition index.
leader_epochThe leader epoch of the partition.
end_offsetThe end offset of the epoch.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
AddPartitionsToTxn API (Key: 24):
+ +Requests:
+
AddPartitionsToTxn Request (Version: 0) => v3_and_below_transactional_id v3_and_below_producer_id v3_and_below_producer_epoch [v3_and_below_topics] 
+  v3_and_below_transactional_id => STRING
+  v3_and_below_producer_id => INT64
+  v3_and_below_producer_epoch => INT16
+  v3_and_below_topics => name [partitions] 
+    name => STRING
+    partitions => INT32
+

Request header version: 1

+ + + + + + + + + + + + + + + +
FieldDescription
v3_and_below_transactional_idThe transactional id corresponding to the transaction.
v3_and_below_producer_idCurrent producer id in use by the transactional id.
v3_and_below_producer_epochCurrent epoch associated with the producer id.
v3_and_below_topicsThe partitions to add to the transaction.
nameThe name of the topic.
partitionsThe partition indexes to add to the transaction.
+
+
AddPartitionsToTxn Request (Version: 1) => v3_and_below_transactional_id v3_and_below_producer_id v3_and_below_producer_epoch [v3_and_below_topics] 
+  v3_and_below_transactional_id => STRING
+  v3_and_below_producer_id => INT64
+  v3_and_below_producer_epoch => INT16
+  v3_and_below_topics => name [partitions] 
+    name => STRING
+    partitions => INT32
+

Request header version: 1

+ + + + + + + + + + + + + + + +
FieldDescription
v3_and_below_transactional_idThe transactional id corresponding to the transaction.
v3_and_below_producer_idCurrent producer id in use by the transactional id.
v3_and_below_producer_epochCurrent epoch associated with the producer id.
v3_and_below_topicsThe partitions to add to the transaction.
nameThe name of the topic.
partitionsThe partition indexes to add to the transaction.
+
+
AddPartitionsToTxn Request (Version: 2) => v3_and_below_transactional_id v3_and_below_producer_id v3_and_below_producer_epoch [v3_and_below_topics] 
+  v3_and_below_transactional_id => STRING
+  v3_and_below_producer_id => INT64
+  v3_and_below_producer_epoch => INT16
+  v3_and_below_topics => name [partitions] 
+    name => STRING
+    partitions => INT32
+

Request header version: 1

+ + + + + + + + + + + + + + + +
FieldDescription
v3_and_below_transactional_idThe transactional id corresponding to the transaction.
v3_and_below_producer_idCurrent producer id in use by the transactional id.
v3_and_below_producer_epochCurrent epoch associated with the producer id.
v3_and_below_topicsThe partitions to add to the transaction.
nameThe name of the topic.
partitionsThe partition indexes to add to the transaction.
+
+
AddPartitionsToTxn Request (Version: 3) => v3_and_below_transactional_id v3_and_below_producer_id v3_and_below_producer_epoch [v3_and_below_topics] _tagged_fields 
+  v3_and_below_transactional_id => COMPACT_STRING
+  v3_and_below_producer_id => INT64
+  v3_and_below_producer_epoch => INT16
+  v3_and_below_topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
v3_and_below_transactional_idThe transactional id corresponding to the transaction.
v3_and_below_producer_idCurrent producer id in use by the transactional id.
v3_and_below_producer_epochCurrent epoch associated with the producer id.
v3_and_below_topicsThe partitions to add to the transaction.
nameThe name of the topic.
partitionsThe partition indexes to add to the transaction.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
AddPartitionsToTxn Request (Version: 4) => [transactions] _tagged_fields 
+  transactions => transactional_id producer_id producer_epoch verify_only [topics] _tagged_fields 
+    transactional_id => COMPACT_STRING
+    producer_id => INT64
+    producer_epoch => INT16
+    verify_only => BOOLEAN
+    topics => name [partitions] _tagged_fields 
+      name => COMPACT_STRING
+      partitions => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
transactionsList of transactions to add partitions to.
transactional_idThe transactional id corresponding to the transaction.
producer_idCurrent producer id in use by the transactional id.
producer_epochCurrent epoch associated with the producer id.
verify_onlyBoolean to signify if we want to check if the partition is in the transaction rather than add it.
topicsThe partitions to add to the transaction.
nameThe name of the topic.
partitionsThe partition indexes to add to the transaction.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
AddPartitionsToTxn Request (Version: 5) => [transactions] _tagged_fields 
+  transactions => transactional_id producer_id producer_epoch verify_only [topics] _tagged_fields 
+    transactional_id => COMPACT_STRING
+    producer_id => INT64
+    producer_epoch => INT16
+    verify_only => BOOLEAN
+    topics => name [partitions] _tagged_fields 
+      name => COMPACT_STRING
+      partitions => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
transactionsList of transactions to add partitions to.
transactional_idThe transactional id corresponding to the transaction.
producer_idCurrent producer id in use by the transactional id.
producer_epochCurrent epoch associated with the producer id.
verify_onlyBoolean to signify if we want to check if the partition is in the transaction rather than add it.
topicsThe partitions to add to the transaction.
nameThe name of the topic.
partitionsThe partition indexes to add to the transaction.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
AddPartitionsToTxn Response (Version: 0) => throttle_time_ms [results_by_topic_v3_and_below] 
+  throttle_time_ms => INT32
+  results_by_topic_v3_and_below => name [results_by_partition] 
+    name => STRING
+    results_by_partition => partition_index partition_error_code 
+      partition_index => INT32
+      partition_error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msDuration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
results_by_topic_v3_and_belowThe results for each topic.
nameThe topic name.
results_by_partitionThe results for each partition.
partition_indexThe partition indexes.
partition_error_codeThe response error code.
+
+
AddPartitionsToTxn Response (Version: 1) => throttle_time_ms [results_by_topic_v3_and_below] 
+  throttle_time_ms => INT32
+  results_by_topic_v3_and_below => name [results_by_partition] 
+    name => STRING
+    results_by_partition => partition_index partition_error_code 
+      partition_index => INT32
+      partition_error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msDuration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
results_by_topic_v3_and_belowThe results for each topic.
nameThe topic name.
results_by_partitionThe results for each partition.
partition_indexThe partition indexes.
partition_error_codeThe response error code.
+
+
AddPartitionsToTxn Response (Version: 2) => throttle_time_ms [results_by_topic_v3_and_below] 
+  throttle_time_ms => INT32
+  results_by_topic_v3_and_below => name [results_by_partition] 
+    name => STRING
+    results_by_partition => partition_index partition_error_code 
+      partition_index => INT32
+      partition_error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msDuration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
results_by_topic_v3_and_belowThe results for each topic.
nameThe topic name.
results_by_partitionThe results for each partition.
partition_indexThe partition indexes.
partition_error_codeThe response error code.
+
+
AddPartitionsToTxn Response (Version: 3) => throttle_time_ms [results_by_topic_v3_and_below] _tagged_fields 
+  throttle_time_ms => INT32
+  results_by_topic_v3_and_below => name [results_by_partition] _tagged_fields 
+    name => COMPACT_STRING
+    results_by_partition => partition_index partition_error_code _tagged_fields 
+      partition_index => INT32
+      partition_error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msDuration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
results_by_topic_v3_and_belowThe results for each topic.
nameThe topic name.
results_by_partitionThe results for each partition.
partition_indexThe partition indexes.
partition_error_codeThe response error code.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
AddPartitionsToTxn Response (Version: 4) => throttle_time_ms error_code [results_by_transaction] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  results_by_transaction => transactional_id [topic_results] _tagged_fields 
+    transactional_id => COMPACT_STRING
+    topic_results => name [results_by_partition] _tagged_fields 
+      name => COMPACT_STRING
+      results_by_partition => partition_index partition_error_code _tagged_fields 
+        partition_index => INT32
+        partition_error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msDuration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe response top level error code.
results_by_transactionResults categorized by transactional ID.
transactional_idThe transactional id corresponding to the transaction.
topic_resultsThe results for each topic.
nameThe topic name.
results_by_partitionThe results for each partition.
partition_indexThe partition indexes.
partition_error_codeThe response error code.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
AddPartitionsToTxn Response (Version: 5) => throttle_time_ms error_code [results_by_transaction] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  results_by_transaction => transactional_id [topic_results] _tagged_fields 
+    transactional_id => COMPACT_STRING
+    topic_results => name [results_by_partition] _tagged_fields 
+      name => COMPACT_STRING
+      results_by_partition => partition_index partition_error_code _tagged_fields 
+        partition_index => INT32
+        partition_error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msDuration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe response top level error code.
results_by_transactionResults categorized by transactional ID.
transactional_idThe transactional id corresponding to the transaction.
topic_resultsThe results for each topic.
nameThe topic name.
results_by_partitionThe results for each partition.
partition_indexThe partition indexes.
partition_error_codeThe response error code.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
AddOffsetsToTxn API (Key: 25):
+ +Requests:
+
AddOffsetsToTxn Request (Version: 0) => transactional_id producer_id producer_epoch group_id 
+  transactional_id => STRING
+  producer_id => INT64
+  producer_epoch => INT16
+  group_id => STRING
+

Request header version: 1

+ + + + + + + + + + + +
FieldDescription
transactional_idThe transactional id corresponding to the transaction.
producer_idCurrent producer id in use by the transactional id.
producer_epochCurrent epoch associated with the producer id.
group_idThe unique group identifier.
+
+
AddOffsetsToTxn Request (Version: 1) => transactional_id producer_id producer_epoch group_id 
+  transactional_id => STRING
+  producer_id => INT64
+  producer_epoch => INT16
+  group_id => STRING
+

Request header version: 1

+ + + + + + + + + + + +
FieldDescription
transactional_idThe transactional id corresponding to the transaction.
producer_idCurrent producer id in use by the transactional id.
producer_epochCurrent epoch associated with the producer id.
group_idThe unique group identifier.
+
+
AddOffsetsToTxn Request (Version: 2) => transactional_id producer_id producer_epoch group_id 
+  transactional_id => STRING
+  producer_id => INT64
+  producer_epoch => INT16
+  group_id => STRING
+

Request header version: 1

+ + + + + + + + + + + +
FieldDescription
transactional_idThe transactional id corresponding to the transaction.
producer_idCurrent producer id in use by the transactional id.
producer_epochCurrent epoch associated with the producer id.
group_idThe unique group identifier.
+
+
AddOffsetsToTxn Request (Version: 3) => transactional_id producer_id producer_epoch group_id _tagged_fields 
+  transactional_id => COMPACT_STRING
+  producer_id => INT64
+  producer_epoch => INT16
+  group_id => COMPACT_STRING
+

Request header version: 2

+ + + + + + + + + + + + + +
FieldDescription
transactional_idThe transactional id corresponding to the transaction.
producer_idCurrent producer id in use by the transactional id.
producer_epochCurrent epoch associated with the producer id.
group_idThe unique group identifier.
_tagged_fieldsThe tagged fields
+
+
AddOffsetsToTxn Request (Version: 4) => transactional_id producer_id producer_epoch group_id _tagged_fields 
+  transactional_id => COMPACT_STRING
+  producer_id => INT64
+  producer_epoch => INT16
+  group_id => COMPACT_STRING
+

Request header version: 2

+ + + + + + + + + + + + + +
FieldDescription
transactional_idThe transactional id corresponding to the transaction.
producer_idCurrent producer id in use by the transactional id.
producer_epochCurrent epoch associated with the producer id.
group_idThe unique group identifier.
_tagged_fieldsThe tagged fields
+
+Responses:
+
AddOffsetsToTxn Response (Version: 0) => throttle_time_ms error_code 
+  throttle_time_ms => INT32
+  error_code => INT16
+

Response header version: 0

+ + + + + + + +
FieldDescription
throttle_time_msDuration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe response error code, or 0 if there was no error.
+
+
AddOffsetsToTxn Response (Version: 1) => throttle_time_ms error_code 
+  throttle_time_ms => INT32
+  error_code => INT16
+

Response header version: 0

+ + + + + + + +
FieldDescription
throttle_time_msDuration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe response error code, or 0 if there was no error.
+
+
AddOffsetsToTxn Response (Version: 2) => throttle_time_ms error_code 
+  throttle_time_ms => INT32
+  error_code => INT16
+

Response header version: 0

+ + + + + + + +
FieldDescription
throttle_time_msDuration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe response error code, or 0 if there was no error.
+
+
AddOffsetsToTxn Response (Version: 3) => throttle_time_ms error_code _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+

Response header version: 1

+ + + + + + + + + +
FieldDescription
throttle_time_msDuration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe response error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
+
+
AddOffsetsToTxn Response (Version: 4) => throttle_time_ms error_code _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+

Response header version: 1

+ + + + + + + + + +
FieldDescription
throttle_time_msDuration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe response error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
+
+
EndTxn API (Key: 26):
+ +Requests:
+
EndTxn Request (Version: 0) => transactional_id producer_id producer_epoch committed 
+  transactional_id => STRING
+  producer_id => INT64
+  producer_epoch => INT16
+  committed => BOOLEAN
+

Request header version: 1

+ + + + + + + + + + + +
FieldDescription
transactional_idThe ID of the transaction to end.
producer_idThe producer ID.
producer_epochThe current epoch associated with the producer.
committedTrue if the transaction was committed, false if it was aborted.
+
+
EndTxn Request (Version: 1) => transactional_id producer_id producer_epoch committed 
+  transactional_id => STRING
+  producer_id => INT64
+  producer_epoch => INT16
+  committed => BOOLEAN
+

Request header version: 1

+ + + + + + + + + + + +
FieldDescription
transactional_idThe ID of the transaction to end.
producer_idThe producer ID.
producer_epochThe current epoch associated with the producer.
committedTrue if the transaction was committed, false if it was aborted.
+
+
EndTxn Request (Version: 2) => transactional_id producer_id producer_epoch committed 
+  transactional_id => STRING
+  producer_id => INT64
+  producer_epoch => INT16
+  committed => BOOLEAN
+

Request header version: 1

+ + + + + + + + + + + +
FieldDescription
transactional_idThe ID of the transaction to end.
producer_idThe producer ID.
producer_epochThe current epoch associated with the producer.
committedTrue if the transaction was committed, false if it was aborted.
+
+
EndTxn Request (Version: 3) => transactional_id producer_id producer_epoch committed _tagged_fields 
+  transactional_id => COMPACT_STRING
+  producer_id => INT64
+  producer_epoch => INT16
+  committed => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + +
FieldDescription
transactional_idThe ID of the transaction to end.
producer_idThe producer ID.
producer_epochThe current epoch associated with the producer.
committedTrue if the transaction was committed, false if it was aborted.
_tagged_fieldsThe tagged fields
+
+
EndTxn Request (Version: 4) => transactional_id producer_id producer_epoch committed _tagged_fields 
+  transactional_id => COMPACT_STRING
+  producer_id => INT64
+  producer_epoch => INT16
+  committed => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + +
FieldDescription
transactional_idThe ID of the transaction to end.
producer_idThe producer ID.
producer_epochThe current epoch associated with the producer.
committedTrue if the transaction was committed, false if it was aborted.
_tagged_fieldsThe tagged fields
+
+
EndTxn Request (Version: 5) => transactional_id producer_id producer_epoch committed _tagged_fields 
+  transactional_id => COMPACT_STRING
+  producer_id => INT64
+  producer_epoch => INT16
+  committed => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + +
FieldDescription
transactional_idThe ID of the transaction to end.
producer_idThe producer ID.
producer_epochThe current epoch associated with the producer.
committedTrue if the transaction was committed, false if it was aborted.
_tagged_fieldsThe tagged fields
+
+Responses:
+
EndTxn Response (Version: 0) => throttle_time_ms error_code 
+  throttle_time_ms => INT32
+  error_code => INT16
+

Response header version: 0

+ + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
+
+
EndTxn Response (Version: 1) => throttle_time_ms error_code 
+  throttle_time_ms => INT32
+  error_code => INT16
+

Response header version: 0

+ + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
+
+
EndTxn Response (Version: 2) => throttle_time_ms error_code 
+  throttle_time_ms => INT32
+  error_code => INT16
+

Response header version: 0

+ + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
+
+
EndTxn Response (Version: 3) => throttle_time_ms error_code _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+

Response header version: 1

+ + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
+
+
EndTxn Response (Version: 4) => throttle_time_ms error_code _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+

Response header version: 1

+ + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
+
+
EndTxn Response (Version: 5) => throttle_time_ms error_code producer_id producer_epoch _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  producer_id => INT64
+  producer_epoch => INT16
+

Response header version: 1

+ + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
producer_idThe producer ID.
producer_epochThe current epoch associated with the producer.
_tagged_fieldsThe tagged fields
+
+
WriteTxnMarkers API (Key: 27):
+ +Requests:
+
WriteTxnMarkers Request (Version: 1) => [markers] _tagged_fields 
+  markers => producer_id producer_epoch transaction_result [topics] coordinator_epoch _tagged_fields 
+    producer_id => INT64
+    producer_epoch => INT16
+    transaction_result => BOOLEAN
+    topics => name [partition_indexes] _tagged_fields 
+      name => COMPACT_STRING
+      partition_indexes => INT32
+    coordinator_epoch => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
markersThe transaction markers to be written.
producer_idThe current producer ID.
producer_epochThe current epoch associated with the producer ID.
transaction_resultThe result of the transaction to write to the partitions (false = ABORT, true = COMMIT).
topicsEach topic that we want to write transaction marker(s) for.
nameThe topic name.
partition_indexesThe indexes of the partitions to write transaction markers for.
_tagged_fieldsThe tagged fields
coordinator_epochEpoch associated with the transaction state partition hosted by this transaction coordinator.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
WriteTxnMarkers Response (Version: 1) => [markers] _tagged_fields 
+  markers => producer_id [topics] _tagged_fields 
+    producer_id => INT64
+    topics => name [partitions] _tagged_fields 
+      name => COMPACT_STRING
+      partitions => partition_index error_code _tagged_fields 
+        partition_index => INT32
+        error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
markersThe results for writing makers.
producer_idThe current producer ID in use by the transactional ID.
topicsThe results by topic.
nameThe topic name.
partitionsThe results by partition.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
TxnOffsetCommit API (Key: 28):
+ +Requests:
+
TxnOffsetCommit Request (Version: 0) => transactional_id group_id producer_id producer_epoch [topics] 
+  transactional_id => STRING
+  group_id => STRING
+  producer_id => INT64
+  producer_epoch => INT16
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index committed_offset committed_metadata 
+      partition_index => INT32
+      committed_offset => INT64
+      committed_metadata => NULLABLE_STRING
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
transactional_idThe ID of the transaction.
group_idThe ID of the group.
producer_idThe current producer ID in use by the transactional ID.
producer_epochThe current epoch associated with the producer ID.
topicsEach topic that we want to commit offsets for.
nameThe topic name.
partitionsThe partitions inside the topic that we want to commit offsets for.
partition_indexThe index of the partition within the topic.
committed_offsetThe message offset to be committed.
committed_metadataAny associated metadata the client wants to keep.
+
+
TxnOffsetCommit Request (Version: 1) => transactional_id group_id producer_id producer_epoch [topics] 
+  transactional_id => STRING
+  group_id => STRING
+  producer_id => INT64
+  producer_epoch => INT16
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index committed_offset committed_metadata 
+      partition_index => INT32
+      committed_offset => INT64
+      committed_metadata => NULLABLE_STRING
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
transactional_idThe ID of the transaction.
group_idThe ID of the group.
producer_idThe current producer ID in use by the transactional ID.
producer_epochThe current epoch associated with the producer ID.
topicsEach topic that we want to commit offsets for.
nameThe topic name.
partitionsThe partitions inside the topic that we want to commit offsets for.
partition_indexThe index of the partition within the topic.
committed_offsetThe message offset to be committed.
committed_metadataAny associated metadata the client wants to keep.
+
+
TxnOffsetCommit Request (Version: 2) => transactional_id group_id producer_id producer_epoch [topics] 
+  transactional_id => STRING
+  group_id => STRING
+  producer_id => INT64
+  producer_epoch => INT16
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index committed_offset committed_leader_epoch committed_metadata 
+      partition_index => INT32
+      committed_offset => INT64
+      committed_leader_epoch => INT32
+      committed_metadata => NULLABLE_STRING
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
transactional_idThe ID of the transaction.
group_idThe ID of the group.
producer_idThe current producer ID in use by the transactional ID.
producer_epochThe current epoch associated with the producer ID.
topicsEach topic that we want to commit offsets for.
nameThe topic name.
partitionsThe partitions inside the topic that we want to commit offsets for.
partition_indexThe index of the partition within the topic.
committed_offsetThe message offset to be committed.
committed_leader_epochThe leader epoch of the last consumed record.
committed_metadataAny associated metadata the client wants to keep.
+
+
TxnOffsetCommit Request (Version: 3) => transactional_id group_id producer_id producer_epoch generation_id member_id group_instance_id [topics] _tagged_fields 
+  transactional_id => COMPACT_STRING
+  group_id => COMPACT_STRING
+  producer_id => INT64
+  producer_epoch => INT16
+  generation_id => INT32
+  member_id => COMPACT_STRING
+  group_instance_id => COMPACT_NULLABLE_STRING
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index committed_offset committed_leader_epoch committed_metadata _tagged_fields 
+      partition_index => INT32
+      committed_offset => INT64
+      committed_leader_epoch => INT32
+      committed_metadata => COMPACT_NULLABLE_STRING
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
transactional_idThe ID of the transaction.
group_idThe ID of the group.
producer_idThe current producer ID in use by the transactional ID.
producer_epochThe current epoch associated with the producer ID.
generation_idThe generation of the consumer.
member_idThe member ID assigned by the group coordinator.
group_instance_idThe unique identifier of the consumer instance provided by end user.
topicsEach topic that we want to commit offsets for.
nameThe topic name.
partitionsThe partitions inside the topic that we want to commit offsets for.
partition_indexThe index of the partition within the topic.
committed_offsetThe message offset to be committed.
committed_leader_epochThe leader epoch of the last consumed record.
committed_metadataAny associated metadata the client wants to keep.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
TxnOffsetCommit Request (Version: 4) => transactional_id group_id producer_id producer_epoch generation_id member_id group_instance_id [topics] _tagged_fields 
+  transactional_id => COMPACT_STRING
+  group_id => COMPACT_STRING
+  producer_id => INT64
+  producer_epoch => INT16
+  generation_id => INT32
+  member_id => COMPACT_STRING
+  group_instance_id => COMPACT_NULLABLE_STRING
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index committed_offset committed_leader_epoch committed_metadata _tagged_fields 
+      partition_index => INT32
+      committed_offset => INT64
+      committed_leader_epoch => INT32
+      committed_metadata => COMPACT_NULLABLE_STRING
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
transactional_idThe ID of the transaction.
group_idThe ID of the group.
producer_idThe current producer ID in use by the transactional ID.
producer_epochThe current epoch associated with the producer ID.
generation_idThe generation of the consumer.
member_idThe member ID assigned by the group coordinator.
group_instance_idThe unique identifier of the consumer instance provided by end user.
topicsEach topic that we want to commit offsets for.
nameThe topic name.
partitionsThe partitions inside the topic that we want to commit offsets for.
partition_indexThe index of the partition within the topic.
committed_offsetThe message offset to be committed.
committed_leader_epochThe leader epoch of the last consumed record.
committed_metadataAny associated metadata the client wants to keep.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
TxnOffsetCommit Request (Version: 5) => transactional_id group_id producer_id producer_epoch generation_id member_id group_instance_id [topics] _tagged_fields 
+  transactional_id => COMPACT_STRING
+  group_id => COMPACT_STRING
+  producer_id => INT64
+  producer_epoch => INT16
+  generation_id => INT32
+  member_id => COMPACT_STRING
+  group_instance_id => COMPACT_NULLABLE_STRING
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index committed_offset committed_leader_epoch committed_metadata _tagged_fields 
+      partition_index => INT32
+      committed_offset => INT64
+      committed_leader_epoch => INT32
+      committed_metadata => COMPACT_NULLABLE_STRING
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
transactional_idThe ID of the transaction.
group_idThe ID of the group.
producer_idThe current producer ID in use by the transactional ID.
producer_epochThe current epoch associated with the producer ID.
generation_idThe generation of the consumer.
member_idThe member ID assigned by the group coordinator.
group_instance_idThe unique identifier of the consumer instance provided by end user.
topicsEach topic that we want to commit offsets for.
nameThe topic name.
partitionsThe partitions inside the topic that we want to commit offsets for.
partition_indexThe index of the partition within the topic.
committed_offsetThe message offset to be committed.
committed_leader_epochThe leader epoch of the last consumed record.
committed_metadataAny associated metadata the client wants to keep.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
TxnOffsetCommit Response (Version: 0) => throttle_time_ms [topics] 
+  throttle_time_ms => INT32
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index error_code 
+      partition_index => INT32
+      error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsThe responses for each topic.
nameThe topic name.
partitionsThe responses for each partition in the topic.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no error.
+
+
TxnOffsetCommit Response (Version: 1) => throttle_time_ms [topics] 
+  throttle_time_ms => INT32
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index error_code 
+      partition_index => INT32
+      error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsThe responses for each topic.
nameThe topic name.
partitionsThe responses for each partition in the topic.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no error.
+
+
TxnOffsetCommit Response (Version: 2) => throttle_time_ms [topics] 
+  throttle_time_ms => INT32
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index error_code 
+      partition_index => INT32
+      error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsThe responses for each topic.
nameThe topic name.
partitionsThe responses for each partition in the topic.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no error.
+
+
TxnOffsetCommit Response (Version: 3) => throttle_time_ms [topics] _tagged_fields 
+  throttle_time_ms => INT32
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index error_code _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsThe responses for each topic.
nameThe topic name.
partitionsThe responses for each partition in the topic.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
TxnOffsetCommit Response (Version: 4) => throttle_time_ms [topics] _tagged_fields 
+  throttle_time_ms => INT32
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index error_code _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsThe responses for each topic.
nameThe topic name.
partitionsThe responses for each partition in the topic.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
TxnOffsetCommit Response (Version: 5) => throttle_time_ms [topics] _tagged_fields 
+  throttle_time_ms => INT32
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index error_code _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsThe responses for each topic.
nameThe topic name.
partitionsThe responses for each partition in the topic.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DescribeAcls API (Key: 29):
+ +Requests:
+
DescribeAcls Request (Version: 1) => resource_type_filter resource_name_filter pattern_type_filter principal_filter host_filter operation permission_type 
+  resource_type_filter => INT8
+  resource_name_filter => NULLABLE_STRING
+  pattern_type_filter => INT8
+  principal_filter => NULLABLE_STRING
+  host_filter => NULLABLE_STRING
+  operation => INT8
+  permission_type => INT8
+

Request header version: 1

+ + + + + + + + + + + + + + + + + +
FieldDescription
resource_type_filterThe resource type.
resource_name_filterThe resource name, or null to match any resource name.
pattern_type_filterThe resource pattern to match.
principal_filterThe principal to match, or null to match any principal.
host_filterThe host to match, or null to match any host.
operationThe operation to match.
permission_typeThe permission type to match.
+
+
DescribeAcls Request (Version: 2) => resource_type_filter resource_name_filter pattern_type_filter principal_filter host_filter operation permission_type _tagged_fields 
+  resource_type_filter => INT8
+  resource_name_filter => COMPACT_NULLABLE_STRING
+  pattern_type_filter => INT8
+  principal_filter => COMPACT_NULLABLE_STRING
+  host_filter => COMPACT_NULLABLE_STRING
+  operation => INT8
+  permission_type => INT8
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
resource_type_filterThe resource type.
resource_name_filterThe resource name, or null to match any resource name.
pattern_type_filterThe resource pattern to match.
principal_filterThe principal to match, or null to match any principal.
host_filterThe host to match, or null to match any host.
operationThe operation to match.
permission_typeThe permission type to match.
_tagged_fieldsThe tagged fields
+
+
DescribeAcls Request (Version: 3) => resource_type_filter resource_name_filter pattern_type_filter principal_filter host_filter operation permission_type _tagged_fields 
+  resource_type_filter => INT8
+  resource_name_filter => COMPACT_NULLABLE_STRING
+  pattern_type_filter => INT8
+  principal_filter => COMPACT_NULLABLE_STRING
+  host_filter => COMPACT_NULLABLE_STRING
+  operation => INT8
+  permission_type => INT8
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
resource_type_filterThe resource type.
resource_name_filterThe resource name, or null to match any resource name.
pattern_type_filterThe resource pattern to match.
principal_filterThe principal to match, or null to match any principal.
host_filterThe host to match, or null to match any host.
operationThe operation to match.
permission_typeThe permission type to match.
_tagged_fieldsThe tagged fields
+
+Responses:
+
DescribeAcls Response (Version: 1) => throttle_time_ms error_code error_message [resources] 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => NULLABLE_STRING
+  resources => resource_type resource_name pattern_type [acls] 
+    resource_type => INT8
+    resource_name => STRING
+    pattern_type => INT8
+    acls => principal host operation permission_type 
+      principal => STRING
+      host => STRING
+      operation => INT8
+      permission_type => INT8
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
resourcesEach Resource that is referenced in an ACL.
resource_typeThe resource type.
resource_nameThe resource name.
pattern_typeThe resource pattern type.
aclsThe ACLs.
principalThe ACL principal.
hostThe ACL host.
operationThe ACL operation.
permission_typeThe ACL permission type.
+
+
DescribeAcls Response (Version: 2) => throttle_time_ms error_code error_message [resources] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  resources => resource_type resource_name pattern_type [acls] _tagged_fields 
+    resource_type => INT8
+    resource_name => COMPACT_STRING
+    pattern_type => INT8
+    acls => principal host operation permission_type _tagged_fields 
+      principal => COMPACT_STRING
+      host => COMPACT_STRING
+      operation => INT8
+      permission_type => INT8
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
resourcesEach Resource that is referenced in an ACL.
resource_typeThe resource type.
resource_nameThe resource name.
pattern_typeThe resource pattern type.
aclsThe ACLs.
principalThe ACL principal.
hostThe ACL host.
operationThe ACL operation.
permission_typeThe ACL permission type.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DescribeAcls Response (Version: 3) => throttle_time_ms error_code error_message [resources] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  resources => resource_type resource_name pattern_type [acls] _tagged_fields 
+    resource_type => INT8
+    resource_name => COMPACT_STRING
+    pattern_type => INT8
+    acls => principal host operation permission_type _tagged_fields 
+      principal => COMPACT_STRING
+      host => COMPACT_STRING
+      operation => INT8
+      permission_type => INT8
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
resourcesEach Resource that is referenced in an ACL.
resource_typeThe resource type.
resource_nameThe resource name.
pattern_typeThe resource pattern type.
aclsThe ACLs.
principalThe ACL principal.
hostThe ACL host.
operationThe ACL operation.
permission_typeThe ACL permission type.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
CreateAcls API (Key: 30):
+ +Requests:
+
CreateAcls Request (Version: 1) => [creations] 
+  creations => resource_type resource_name resource_pattern_type principal host operation permission_type 
+    resource_type => INT8
+    resource_name => STRING
+    resource_pattern_type => INT8
+    principal => STRING
+    host => STRING
+    operation => INT8
+    permission_type => INT8
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
creationsThe ACLs that we want to create.
resource_typeThe type of the resource.
resource_nameThe resource name for the ACL.
resource_pattern_typeThe pattern type for the ACL.
principalThe principal for the ACL.
hostThe host for the ACL.
operationThe operation type for the ACL (read, write, etc.).
permission_typeThe permission type for the ACL (allow, deny, etc.).
+
+
CreateAcls Request (Version: 2) => [creations] _tagged_fields 
+  creations => resource_type resource_name resource_pattern_type principal host operation permission_type _tagged_fields 
+    resource_type => INT8
+    resource_name => COMPACT_STRING
+    resource_pattern_type => INT8
+    principal => COMPACT_STRING
+    host => COMPACT_STRING
+    operation => INT8
+    permission_type => INT8
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
creationsThe ACLs that we want to create.
resource_typeThe type of the resource.
resource_nameThe resource name for the ACL.
resource_pattern_typeThe pattern type for the ACL.
principalThe principal for the ACL.
hostThe host for the ACL.
operationThe operation type for the ACL (read, write, etc.).
permission_typeThe permission type for the ACL (allow, deny, etc.).
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
CreateAcls Request (Version: 3) => [creations] _tagged_fields 
+  creations => resource_type resource_name resource_pattern_type principal host operation permission_type _tagged_fields 
+    resource_type => INT8
+    resource_name => COMPACT_STRING
+    resource_pattern_type => INT8
+    principal => COMPACT_STRING
+    host => COMPACT_STRING
+    operation => INT8
+    permission_type => INT8
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
creationsThe ACLs that we want to create.
resource_typeThe type of the resource.
resource_nameThe resource name for the ACL.
resource_pattern_typeThe pattern type for the ACL.
principalThe principal for the ACL.
hostThe host for the ACL.
operationThe operation type for the ACL (read, write, etc.).
permission_typeThe permission type for the ACL (allow, deny, etc.).
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
CreateAcls Response (Version: 1) => throttle_time_ms [results] 
+  throttle_time_ms => INT32
+  results => error_code error_message 
+    error_code => INT16
+    error_message => NULLABLE_STRING
+

Response header version: 0

+ + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
resultsThe results for each ACL creation.
error_codeThe result error, or zero if there was no error.
error_messageThe result message, or null if there was no error.
+
+
CreateAcls Response (Version: 2) => throttle_time_ms [results] _tagged_fields 
+  throttle_time_ms => INT32
+  results => error_code error_message _tagged_fields 
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
resultsThe results for each ACL creation.
error_codeThe result error, or zero if there was no error.
error_messageThe result message, or null if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
CreateAcls Response (Version: 3) => throttle_time_ms [results] _tagged_fields 
+  throttle_time_ms => INT32
+  results => error_code error_message _tagged_fields 
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
resultsThe results for each ACL creation.
error_codeThe result error, or zero if there was no error.
error_messageThe result message, or null if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DeleteAcls API (Key: 31):
+ +Requests:
+
DeleteAcls Request (Version: 1) => [filters] 
+  filters => resource_type_filter resource_name_filter pattern_type_filter principal_filter host_filter operation permission_type 
+    resource_type_filter => INT8
+    resource_name_filter => NULLABLE_STRING
+    pattern_type_filter => INT8
+    principal_filter => NULLABLE_STRING
+    host_filter => NULLABLE_STRING
+    operation => INT8
+    permission_type => INT8
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
filtersThe filters to use when deleting ACLs.
resource_type_filterThe resource type.
resource_name_filterThe resource name.
pattern_type_filterThe pattern type.
principal_filterThe principal filter, or null to accept all principals.
host_filterThe host filter, or null to accept all hosts.
operationThe ACL operation.
permission_typeThe permission type.
+
+
DeleteAcls Request (Version: 2) => [filters] _tagged_fields 
+  filters => resource_type_filter resource_name_filter pattern_type_filter principal_filter host_filter operation permission_type _tagged_fields 
+    resource_type_filter => INT8
+    resource_name_filter => COMPACT_NULLABLE_STRING
+    pattern_type_filter => INT8
+    principal_filter => COMPACT_NULLABLE_STRING
+    host_filter => COMPACT_NULLABLE_STRING
+    operation => INT8
+    permission_type => INT8
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
filtersThe filters to use when deleting ACLs.
resource_type_filterThe resource type.
resource_name_filterThe resource name.
pattern_type_filterThe pattern type.
principal_filterThe principal filter, or null to accept all principals.
host_filterThe host filter, or null to accept all hosts.
operationThe ACL operation.
permission_typeThe permission type.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DeleteAcls Request (Version: 3) => [filters] _tagged_fields 
+  filters => resource_type_filter resource_name_filter pattern_type_filter principal_filter host_filter operation permission_type _tagged_fields 
+    resource_type_filter => INT8
+    resource_name_filter => COMPACT_NULLABLE_STRING
+    pattern_type_filter => INT8
+    principal_filter => COMPACT_NULLABLE_STRING
+    host_filter => COMPACT_NULLABLE_STRING
+    operation => INT8
+    permission_type => INT8
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
filtersThe filters to use when deleting ACLs.
resource_type_filterThe resource type.
resource_name_filterThe resource name.
pattern_type_filterThe pattern type.
principal_filterThe principal filter, or null to accept all principals.
host_filterThe host filter, or null to accept all hosts.
operationThe ACL operation.
permission_typeThe permission type.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
DeleteAcls Response (Version: 1) => throttle_time_ms [filter_results] 
+  throttle_time_ms => INT32
+  filter_results => error_code error_message [matching_acls] 
+    error_code => INT16
+    error_message => NULLABLE_STRING
+    matching_acls => error_code error_message resource_type resource_name pattern_type principal host operation permission_type 
+      error_code => INT16
+      error_message => NULLABLE_STRING
+      resource_type => INT8
+      resource_name => STRING
+      pattern_type => INT8
+      principal => STRING
+      host => STRING
+      operation => INT8
+      permission_type => INT8
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
filter_resultsThe results for each filter.
error_codeThe error code, or 0 if the filter succeeded.
error_messageThe error message, or null if the filter succeeded.
matching_aclsThe ACLs which matched this filter.
error_codeThe deletion error code, or 0 if the deletion succeeded.
error_messageThe deletion error message, or null if the deletion succeeded.
resource_typeThe ACL resource type.
resource_nameThe ACL resource name.
pattern_typeThe ACL resource pattern type.
principalThe ACL principal.
hostThe ACL host.
operationThe ACL operation.
permission_typeThe ACL permission type.
+
+
DeleteAcls Response (Version: 2) => throttle_time_ms [filter_results] _tagged_fields 
+  throttle_time_ms => INT32
+  filter_results => error_code error_message [matching_acls] _tagged_fields 
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+    matching_acls => error_code error_message resource_type resource_name pattern_type principal host operation permission_type _tagged_fields 
+      error_code => INT16
+      error_message => COMPACT_NULLABLE_STRING
+      resource_type => INT8
+      resource_name => COMPACT_STRING
+      pattern_type => INT8
+      principal => COMPACT_STRING
+      host => COMPACT_STRING
+      operation => INT8
+      permission_type => INT8
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
filter_resultsThe results for each filter.
error_codeThe error code, or 0 if the filter succeeded.
error_messageThe error message, or null if the filter succeeded.
matching_aclsThe ACLs which matched this filter.
error_codeThe deletion error code, or 0 if the deletion succeeded.
error_messageThe deletion error message, or null if the deletion succeeded.
resource_typeThe ACL resource type.
resource_nameThe ACL resource name.
pattern_typeThe ACL resource pattern type.
principalThe ACL principal.
hostThe ACL host.
operationThe ACL operation.
permission_typeThe ACL permission type.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DeleteAcls Response (Version: 3) => throttle_time_ms [filter_results] _tagged_fields 
+  throttle_time_ms => INT32
+  filter_results => error_code error_message [matching_acls] _tagged_fields 
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+    matching_acls => error_code error_message resource_type resource_name pattern_type principal host operation permission_type _tagged_fields 
+      error_code => INT16
+      error_message => COMPACT_NULLABLE_STRING
+      resource_type => INT8
+      resource_name => COMPACT_STRING
+      pattern_type => INT8
+      principal => COMPACT_STRING
+      host => COMPACT_STRING
+      operation => INT8
+      permission_type => INT8
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
filter_resultsThe results for each filter.
error_codeThe error code, or 0 if the filter succeeded.
error_messageThe error message, or null if the filter succeeded.
matching_aclsThe ACLs which matched this filter.
error_codeThe deletion error code, or 0 if the deletion succeeded.
error_messageThe deletion error message, or null if the deletion succeeded.
resource_typeThe ACL resource type.
resource_nameThe ACL resource name.
pattern_typeThe ACL resource pattern type.
principalThe ACL principal.
hostThe ACL host.
operationThe ACL operation.
permission_typeThe ACL permission type.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DescribeConfigs API (Key: 32):
+ +Requests:
+
DescribeConfigs Request (Version: 1) => [resources] include_synonyms 
+  resources => resource_type resource_name [configuration_keys] 
+    resource_type => INT8
+    resource_name => STRING
+    configuration_keys => STRING
+  include_synonyms => BOOLEAN
+

Request header version: 1

+ + + + + + + + + + + + + +
FieldDescription
resourcesThe resources whose configurations we want to describe.
resource_typeThe resource type.
resource_nameThe resource name.
configuration_keysThe configuration keys to list, or null to list all configuration keys.
include_synonymsTrue if we should include all synonyms.
+
+
DescribeConfigs Request (Version: 2) => [resources] include_synonyms 
+  resources => resource_type resource_name [configuration_keys] 
+    resource_type => INT8
+    resource_name => STRING
+    configuration_keys => STRING
+  include_synonyms => BOOLEAN
+

Request header version: 1

+ + + + + + + + + + + + + +
FieldDescription
resourcesThe resources whose configurations we want to describe.
resource_typeThe resource type.
resource_nameThe resource name.
configuration_keysThe configuration keys to list, or null to list all configuration keys.
include_synonymsTrue if we should include all synonyms.
+
+
DescribeConfigs Request (Version: 3) => [resources] include_synonyms include_documentation 
+  resources => resource_type resource_name [configuration_keys] 
+    resource_type => INT8
+    resource_name => STRING
+    configuration_keys => STRING
+  include_synonyms => BOOLEAN
+  include_documentation => BOOLEAN
+

Request header version: 1

+ + + + + + + + + + + + + + + +
FieldDescription
resourcesThe resources whose configurations we want to describe.
resource_typeThe resource type.
resource_nameThe resource name.
configuration_keysThe configuration keys to list, or null to list all configuration keys.
include_synonymsTrue if we should include all synonyms.
include_documentationTrue if we should include configuration documentation.
+
+
DescribeConfigs Request (Version: 4) => [resources] include_synonyms include_documentation _tagged_fields 
+  resources => resource_type resource_name [configuration_keys] _tagged_fields 
+    resource_type => INT8
+    resource_name => COMPACT_STRING
+    configuration_keys => COMPACT_STRING
+  include_synonyms => BOOLEAN
+  include_documentation => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
resourcesThe resources whose configurations we want to describe.
resource_typeThe resource type.
resource_nameThe resource name.
configuration_keysThe configuration keys to list, or null to list all configuration keys.
_tagged_fieldsThe tagged fields
include_synonymsTrue if we should include all synonyms.
include_documentationTrue if we should include configuration documentation.
_tagged_fieldsThe tagged fields
+
+Responses:
+
DescribeConfigs Response (Version: 1) => throttle_time_ms [results] 
+  throttle_time_ms => INT32
+  results => error_code error_message resource_type resource_name [configs] 
+    error_code => INT16
+    error_message => NULLABLE_STRING
+    resource_type => INT8
+    resource_name => STRING
+    configs => name value read_only config_source is_sensitive [synonyms] 
+      name => STRING
+      value => NULLABLE_STRING
+      read_only => BOOLEAN
+      config_source => INT8
+      is_sensitive => BOOLEAN
+      synonyms => name value source 
+        name => STRING
+        value => NULLABLE_STRING
+        source => INT8
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
resultsThe results for each resource.
error_codeThe error code, or 0 if we were able to successfully describe the configurations.
error_messageThe error message, or null if we were able to successfully describe the configurations.
resource_typeThe resource type.
resource_nameThe resource name.
configsEach listed configuration.
nameThe configuration name.
valueThe configuration value.
read_onlyTrue if the configuration is read-only.
config_sourceThe configuration source.
is_sensitiveTrue if this configuration is sensitive.
synonymsThe synonyms for this configuration key.
nameThe synonym name.
valueThe synonym value.
sourceThe synonym source.
+
+
DescribeConfigs Response (Version: 2) => throttle_time_ms [results] 
+  throttle_time_ms => INT32
+  results => error_code error_message resource_type resource_name [configs] 
+    error_code => INT16
+    error_message => NULLABLE_STRING
+    resource_type => INT8
+    resource_name => STRING
+    configs => name value read_only config_source is_sensitive [synonyms] 
+      name => STRING
+      value => NULLABLE_STRING
+      read_only => BOOLEAN
+      config_source => INT8
+      is_sensitive => BOOLEAN
+      synonyms => name value source 
+        name => STRING
+        value => NULLABLE_STRING
+        source => INT8
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
resultsThe results for each resource.
error_codeThe error code, or 0 if we were able to successfully describe the configurations.
error_messageThe error message, or null if we were able to successfully describe the configurations.
resource_typeThe resource type.
resource_nameThe resource name.
configsEach listed configuration.
nameThe configuration name.
valueThe configuration value.
read_onlyTrue if the configuration is read-only.
config_sourceThe configuration source.
is_sensitiveTrue if this configuration is sensitive.
synonymsThe synonyms for this configuration key.
nameThe synonym name.
valueThe synonym value.
sourceThe synonym source.
+
+
DescribeConfigs Response (Version: 3) => throttle_time_ms [results] 
+  throttle_time_ms => INT32
+  results => error_code error_message resource_type resource_name [configs] 
+    error_code => INT16
+    error_message => NULLABLE_STRING
+    resource_type => INT8
+    resource_name => STRING
+    configs => name value read_only config_source is_sensitive [synonyms] config_type documentation 
+      name => STRING
+      value => NULLABLE_STRING
+      read_only => BOOLEAN
+      config_source => INT8
+      is_sensitive => BOOLEAN
+      synonyms => name value source 
+        name => STRING
+        value => NULLABLE_STRING
+        source => INT8
+      config_type => INT8
+      documentation => NULLABLE_STRING
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
resultsThe results for each resource.
error_codeThe error code, or 0 if we were able to successfully describe the configurations.
error_messageThe error message, or null if we were able to successfully describe the configurations.
resource_typeThe resource type.
resource_nameThe resource name.
configsEach listed configuration.
nameThe configuration name.
valueThe configuration value.
read_onlyTrue if the configuration is read-only.
config_sourceThe configuration source.
is_sensitiveTrue if this configuration is sensitive.
synonymsThe synonyms for this configuration key.
nameThe synonym name.
valueThe synonym value.
sourceThe synonym source.
config_typeThe configuration data type. Type can be one of the following values - BOOLEAN, STRING, INT, SHORT, LONG, DOUBLE, LIST, CLASS, PASSWORD.
documentationThe configuration documentation.
+
+
DescribeConfigs Response (Version: 4) => throttle_time_ms [results] _tagged_fields 
+  throttle_time_ms => INT32
+  results => error_code error_message resource_type resource_name [configs] _tagged_fields 
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+    resource_type => INT8
+    resource_name => COMPACT_STRING
+    configs => name value read_only config_source is_sensitive [synonyms] config_type documentation _tagged_fields 
+      name => COMPACT_STRING
+      value => COMPACT_NULLABLE_STRING
+      read_only => BOOLEAN
+      config_source => INT8
+      is_sensitive => BOOLEAN
+      synonyms => name value source _tagged_fields 
+        name => COMPACT_STRING
+        value => COMPACT_NULLABLE_STRING
+        source => INT8
+      config_type => INT8
+      documentation => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
resultsThe results for each resource.
error_codeThe error code, or 0 if we were able to successfully describe the configurations.
error_messageThe error message, or null if we were able to successfully describe the configurations.
resource_typeThe resource type.
resource_nameThe resource name.
configsEach listed configuration.
nameThe configuration name.
valueThe configuration value.
read_onlyTrue if the configuration is read-only.
config_sourceThe configuration source.
is_sensitiveTrue if this configuration is sensitive.
synonymsThe synonyms for this configuration key.
nameThe synonym name.
valueThe synonym value.
sourceThe synonym source.
_tagged_fieldsThe tagged fields
config_typeThe configuration data type. Type can be one of the following values - BOOLEAN, STRING, INT, SHORT, LONG, DOUBLE, LIST, CLASS, PASSWORD.
documentationThe configuration documentation.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
AlterConfigs API (Key: 33):
+ +Requests:
+
AlterConfigs Request (Version: 0) => [resources] validate_only 
+  resources => resource_type resource_name [configs] 
+    resource_type => INT8
+    resource_name => STRING
+    configs => name value 
+      name => STRING
+      value => NULLABLE_STRING
+  validate_only => BOOLEAN
+

Request header version: 1

+ + + + + + + + + + + + + + + + + +
FieldDescription
resourcesThe updates for each resource.
resource_typeThe resource type.
resource_nameThe resource name.
configsThe configurations.
nameThe configuration key name.
valueThe value to set for the configuration key.
validate_onlyTrue if we should validate the request, but not change the configurations.
+
+
AlterConfigs Request (Version: 1) => [resources] validate_only 
+  resources => resource_type resource_name [configs] 
+    resource_type => INT8
+    resource_name => STRING
+    configs => name value 
+      name => STRING
+      value => NULLABLE_STRING
+  validate_only => BOOLEAN
+

Request header version: 1

+ + + + + + + + + + + + + + + + + +
FieldDescription
resourcesThe updates for each resource.
resource_typeThe resource type.
resource_nameThe resource name.
configsThe configurations.
nameThe configuration key name.
valueThe value to set for the configuration key.
validate_onlyTrue if we should validate the request, but not change the configurations.
+
+
AlterConfigs Request (Version: 2) => [resources] validate_only _tagged_fields 
+  resources => resource_type resource_name [configs] _tagged_fields 
+    resource_type => INT8
+    resource_name => COMPACT_STRING
+    configs => name value _tagged_fields 
+      name => COMPACT_STRING
+      value => COMPACT_NULLABLE_STRING
+  validate_only => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
resourcesThe updates for each resource.
resource_typeThe resource type.
resource_nameThe resource name.
configsThe configurations.
nameThe configuration key name.
valueThe value to set for the configuration key.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
validate_onlyTrue if we should validate the request, but not change the configurations.
_tagged_fieldsThe tagged fields
+
+Responses:
+
AlterConfigs Response (Version: 0) => throttle_time_ms [responses] 
+  throttle_time_ms => INT32
+  responses => error_code error_message resource_type resource_name 
+    error_code => INT16
+    error_message => NULLABLE_STRING
+    resource_type => INT8
+    resource_name => STRING
+

Response header version: 0

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msDuration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
responsesThe responses for each resource.
error_codeThe resource error code.
error_messageThe resource error message, or null if there was no error.
resource_typeThe resource type.
resource_nameThe resource name.
+
+
AlterConfigs Response (Version: 1) => throttle_time_ms [responses] 
+  throttle_time_ms => INT32
+  responses => error_code error_message resource_type resource_name 
+    error_code => INT16
+    error_message => NULLABLE_STRING
+    resource_type => INT8
+    resource_name => STRING
+

Response header version: 0

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msDuration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
responsesThe responses for each resource.
error_codeThe resource error code.
error_messageThe resource error message, or null if there was no error.
resource_typeThe resource type.
resource_nameThe resource name.
+
+
AlterConfigs Response (Version: 2) => throttle_time_ms [responses] _tagged_fields 
+  throttle_time_ms => INT32
+  responses => error_code error_message resource_type resource_name _tagged_fields 
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+    resource_type => INT8
+    resource_name => COMPACT_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msDuration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
responsesThe responses for each resource.
error_codeThe resource error code.
error_messageThe resource error message, or null if there was no error.
resource_typeThe resource type.
resource_nameThe resource name.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
AlterReplicaLogDirs API (Key: 34):
+ +Requests:
+
AlterReplicaLogDirs Request (Version: 1) => [dirs] 
+  dirs => path [topics] 
+    path => STRING
+    topics => name [partitions] 
+      name => STRING
+      partitions => INT32
+

Request header version: 1

+ + + + + + + + + + + + + +
FieldDescription
dirsThe alterations to make for each directory.
pathThe absolute directory path.
topicsThe topics to add to the directory.
nameThe topic name.
partitionsThe partition indexes.
+
+
AlterReplicaLogDirs Request (Version: 2) => [dirs] _tagged_fields 
+  dirs => path [topics] _tagged_fields 
+    path => COMPACT_STRING
+    topics => name [partitions] _tagged_fields 
+      name => COMPACT_STRING
+      partitions => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
dirsThe alterations to make for each directory.
pathThe absolute directory path.
topicsThe topics to add to the directory.
nameThe topic name.
partitionsThe partition indexes.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
AlterReplicaLogDirs Response (Version: 1) => throttle_time_ms [results] 
+  throttle_time_ms => INT32
+  results => topic_name [partitions] 
+    topic_name => STRING
+    partitions => partition_index error_code 
+      partition_index => INT32
+      error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msDuration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
resultsThe results for each topic.
topic_nameThe name of the topic.
partitionsThe results for each partition.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no error.
+
+
AlterReplicaLogDirs Response (Version: 2) => throttle_time_ms [results] _tagged_fields 
+  throttle_time_ms => INT32
+  results => topic_name [partitions] _tagged_fields 
+    topic_name => COMPACT_STRING
+    partitions => partition_index error_code _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msDuration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
resultsThe results for each topic.
topic_nameThe name of the topic.
partitionsThe results for each partition.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DescribeLogDirs API (Key: 35):
+ +Requests:
+
DescribeLogDirs Request (Version: 1) => [topics] 
+  topics => topic [partitions] 
+    topic => STRING
+    partitions => INT32
+

Request header version: 1

+ + + + + + + + + +
FieldDescription
topicsEach topic that we want to describe log directories for, or null for all topics.
topicThe topic name.
partitionsThe partition indexes.
+
+
DescribeLogDirs Request (Version: 2) => [topics] _tagged_fields 
+  topics => topic [partitions] _tagged_fields 
+    topic => COMPACT_STRING
+    partitions => INT32
+

Request header version: 2

+ + + + + + + + + + + + + +
FieldDescription
topicsEach topic that we want to describe log directories for, or null for all topics.
topicThe topic name.
partitionsThe partition indexes.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DescribeLogDirs Request (Version: 3) => [topics] _tagged_fields 
+  topics => topic [partitions] _tagged_fields 
+    topic => COMPACT_STRING
+    partitions => INT32
+

Request header version: 2

+ + + + + + + + + + + + + +
FieldDescription
topicsEach topic that we want to describe log directories for, or null for all topics.
topicThe topic name.
partitionsThe partition indexes.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DescribeLogDirs Request (Version: 4) => [topics] _tagged_fields 
+  topics => topic [partitions] _tagged_fields 
+    topic => COMPACT_STRING
+    partitions => INT32
+

Request header version: 2

+ + + + + + + + + + + + + +
FieldDescription
topicsEach topic that we want to describe log directories for, or null for all topics.
topicThe topic name.
partitionsThe partition indexes.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
DescribeLogDirs Response (Version: 1) => throttle_time_ms [results] 
+  throttle_time_ms => INT32
+  results => error_code log_dir [topics] 
+    error_code => INT16
+    log_dir => STRING
+    topics => name [partitions] 
+      name => STRING
+      partitions => partition_index partition_size offset_lag is_future_key 
+        partition_index => INT32
+        partition_size => INT64
+        offset_lag => INT64
+        is_future_key => BOOLEAN
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
resultsThe log directories.
error_codeThe error code, or 0 if there was no error.
log_dirThe absolute log directory path.
topicsThe topics.
nameThe topic name.
partitionsThe partitions.
partition_indexThe partition index.
partition_sizeThe size of the log segments in this partition in bytes.
offset_lagThe lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or current replica's LEO (if it is the future log for the partition).
is_future_keyTrue if this log is created by AlterReplicaLogDirsRequest and will replace the current log of the replica in the future.
+
+
DescribeLogDirs Response (Version: 2) => throttle_time_ms [results] _tagged_fields 
+  throttle_time_ms => INT32
+  results => error_code log_dir [topics] _tagged_fields 
+    error_code => INT16
+    log_dir => COMPACT_STRING
+    topics => name [partitions] _tagged_fields 
+      name => COMPACT_STRING
+      partitions => partition_index partition_size offset_lag is_future_key _tagged_fields 
+        partition_index => INT32
+        partition_size => INT64
+        offset_lag => INT64
+        is_future_key => BOOLEAN
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
resultsThe log directories.
error_codeThe error code, or 0 if there was no error.
log_dirThe absolute log directory path.
topicsThe topics.
nameThe topic name.
partitionsThe partitions.
partition_indexThe partition index.
partition_sizeThe size of the log segments in this partition in bytes.
offset_lagThe lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or current replica's LEO (if it is the future log for the partition).
is_future_keyTrue if this log is created by AlterReplicaLogDirsRequest and will replace the current log of the replica in the future.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DescribeLogDirs Response (Version: 3) => throttle_time_ms error_code [results] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  results => error_code log_dir [topics] _tagged_fields 
+    error_code => INT16
+    log_dir => COMPACT_STRING
+    topics => name [partitions] _tagged_fields 
+      name => COMPACT_STRING
+      partitions => partition_index partition_size offset_lag is_future_key _tagged_fields 
+        partition_index => INT32
+        partition_size => INT64
+        offset_lag => INT64
+        is_future_key => BOOLEAN
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
resultsThe log directories.
error_codeThe error code, or 0 if there was no error.
log_dirThe absolute log directory path.
topicsThe topics.
nameThe topic name.
partitionsThe partitions.
partition_indexThe partition index.
partition_sizeThe size of the log segments in this partition in bytes.
offset_lagThe lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or current replica's LEO (if it is the future log for the partition).
is_future_keyTrue if this log is created by AlterReplicaLogDirsRequest and will replace the current log of the replica in the future.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DescribeLogDirs Response (Version: 4) => throttle_time_ms error_code [results] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  results => error_code log_dir [topics] total_bytes usable_bytes _tagged_fields 
+    error_code => INT16
+    log_dir => COMPACT_STRING
+    topics => name [partitions] _tagged_fields 
+      name => COMPACT_STRING
+      partitions => partition_index partition_size offset_lag is_future_key _tagged_fields 
+        partition_index => INT32
+        partition_size => INT64
+        offset_lag => INT64
+        is_future_key => BOOLEAN
+    total_bytes => INT64
+    usable_bytes => INT64
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
resultsThe log directories.
error_codeThe error code, or 0 if there was no error.
log_dirThe absolute log directory path.
topicsThe topics.
nameThe topic name.
partitionsThe partitions.
partition_indexThe partition index.
partition_sizeThe size of the log segments in this partition in bytes.
offset_lagThe lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or current replica's LEO (if it is the future log for the partition).
is_future_keyTrue if this log is created by AlterReplicaLogDirsRequest and will replace the current log of the replica in the future.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
total_bytesThe total size in bytes of the volume the log directory is in.
usable_bytesThe usable size in bytes of the volume the log directory is in.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
SaslAuthenticate API (Key: 36):
+ +Requests:
+
SaslAuthenticate Request (Version: 0) => auth_bytes 
+  auth_bytes => BYTES
+

Request header version: 1

+ + + + + +
FieldDescription
auth_bytesThe SASL authentication bytes from the client, as defined by the SASL mechanism.
+
+
SaslAuthenticate Request (Version: 1) => auth_bytes 
+  auth_bytes => BYTES
+

Request header version: 1

+ + + + + +
FieldDescription
auth_bytesThe SASL authentication bytes from the client, as defined by the SASL mechanism.
+
+
SaslAuthenticate Request (Version: 2) => auth_bytes _tagged_fields 
+  auth_bytes => COMPACT_BYTES
+

Request header version: 2

+ + + + + + + +
FieldDescription
auth_bytesThe SASL authentication bytes from the client, as defined by the SASL mechanism.
_tagged_fieldsThe tagged fields
+
+Responses:
+
SaslAuthenticate Response (Version: 0) => error_code error_message auth_bytes 
+  error_code => INT16
+  error_message => NULLABLE_STRING
+  auth_bytes => BYTES
+

Response header version: 0

+ + + + + + + + + +
FieldDescription
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
auth_bytesThe SASL authentication bytes from the server, as defined by the SASL mechanism.
+
+
SaslAuthenticate Response (Version: 1) => error_code error_message auth_bytes session_lifetime_ms 
+  error_code => INT16
+  error_message => NULLABLE_STRING
+  auth_bytes => BYTES
+  session_lifetime_ms => INT64
+

Response header version: 0

+ + + + + + + + + + + +
FieldDescription
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
auth_bytesThe SASL authentication bytes from the server, as defined by the SASL mechanism.
session_lifetime_msNumber of milliseconds after which only re-authentication over the existing connection to create a new session can occur.
+
+
SaslAuthenticate Response (Version: 2) => error_code error_message auth_bytes session_lifetime_ms _tagged_fields 
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  auth_bytes => COMPACT_BYTES
+  session_lifetime_ms => INT64
+

Response header version: 1

+ + + + + + + + + + + + + +
FieldDescription
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
auth_bytesThe SASL authentication bytes from the server, as defined by the SASL mechanism.
session_lifetime_msNumber of milliseconds after which only re-authentication over the existing connection to create a new session can occur.
_tagged_fieldsThe tagged fields
+
+
CreatePartitions API (Key: 37):
+ +Requests:
+
CreatePartitions Request (Version: 0) => [topics] timeout_ms validate_only 
+  topics => name count [assignments] 
+    name => STRING
+    count => INT32
+    assignments => [broker_ids] 
+      broker_ids => INT32
+  timeout_ms => INT32
+  validate_only => BOOLEAN
+

Request header version: 1

+ + + + + + + + + + + + + + + + + +
FieldDescription
topicsEach topic that we want to create new partitions inside.
nameThe topic name.
countThe new partition count.
assignmentsThe new partition assignments.
broker_idsThe assigned broker IDs.
timeout_msThe time in ms to wait for the partitions to be created.
validate_onlyIf true, then validate the request, but don't actually increase the number of partitions.
+
+
CreatePartitions Request (Version: 1) => [topics] timeout_ms validate_only 
+  topics => name count [assignments] 
+    name => STRING
+    count => INT32
+    assignments => [broker_ids] 
+      broker_ids => INT32
+  timeout_ms => INT32
+  validate_only => BOOLEAN
+

Request header version: 1

+ + + + + + + + + + + + + + + + + +
FieldDescription
topicsEach topic that we want to create new partitions inside.
nameThe topic name.
countThe new partition count.
assignmentsThe new partition assignments.
broker_idsThe assigned broker IDs.
timeout_msThe time in ms to wait for the partitions to be created.
validate_onlyIf true, then validate the request, but don't actually increase the number of partitions.
+
+
CreatePartitions Request (Version: 2) => [topics] timeout_ms validate_only _tagged_fields 
+  topics => name count [assignments] _tagged_fields 
+    name => COMPACT_STRING
+    count => INT32
+    assignments => [broker_ids] _tagged_fields 
+      broker_ids => INT32
+  timeout_ms => INT32
+  validate_only => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
topicsEach topic that we want to create new partitions inside.
nameThe topic name.
countThe new partition count.
assignmentsThe new partition assignments.
broker_idsThe assigned broker IDs.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
timeout_msThe time in ms to wait for the partitions to be created.
validate_onlyIf true, then validate the request, but don't actually increase the number of partitions.
_tagged_fieldsThe tagged fields
+
+
CreatePartitions Request (Version: 3) => [topics] timeout_ms validate_only _tagged_fields 
+  topics => name count [assignments] _tagged_fields 
+    name => COMPACT_STRING
+    count => INT32
+    assignments => [broker_ids] _tagged_fields 
+      broker_ids => INT32
+  timeout_ms => INT32
+  validate_only => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
topicsEach topic that we want to create new partitions inside.
nameThe topic name.
countThe new partition count.
assignmentsThe new partition assignments.
broker_idsThe assigned broker IDs.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
timeout_msThe time in ms to wait for the partitions to be created.
validate_onlyIf true, then validate the request, but don't actually increase the number of partitions.
_tagged_fieldsThe tagged fields
+
+Responses:
+
CreatePartitions Response (Version: 0) => throttle_time_ms [results] 
+  throttle_time_ms => INT32
+  results => name error_code error_message 
+    name => STRING
+    error_code => INT16
+    error_message => NULLABLE_STRING
+

Response header version: 0

+ + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
resultsThe partition creation results for each topic.
nameThe topic name.
error_codeThe result error, or zero if there was no error.
error_messageThe result message, or null if there was no error.
+
+
CreatePartitions Response (Version: 1) => throttle_time_ms [results] 
+  throttle_time_ms => INT32
+  results => name error_code error_message 
+    name => STRING
+    error_code => INT16
+    error_message => NULLABLE_STRING
+

Response header version: 0

+ + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
resultsThe partition creation results for each topic.
nameThe topic name.
error_codeThe result error, or zero if there was no error.
error_messageThe result message, or null if there was no error.
+
+
CreatePartitions Response (Version: 2) => throttle_time_ms [results] _tagged_fields 
+  throttle_time_ms => INT32
+  results => name error_code error_message _tagged_fields 
+    name => COMPACT_STRING
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
resultsThe partition creation results for each topic.
nameThe topic name.
error_codeThe result error, or zero if there was no error.
error_messageThe result message, or null if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
CreatePartitions Response (Version: 3) => throttle_time_ms [results] _tagged_fields 
+  throttle_time_ms => INT32
+  results => name error_code error_message _tagged_fields 
+    name => COMPACT_STRING
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
resultsThe partition creation results for each topic.
nameThe topic name.
error_codeThe result error, or zero if there was no error.
error_messageThe result message, or null if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
CreateDelegationToken API (Key: 38):
+ +Requests:
+
CreateDelegationToken Request (Version: 1) => [renewers] max_lifetime_ms 
+  renewers => principal_type principal_name 
+    principal_type => STRING
+    principal_name => STRING
+  max_lifetime_ms => INT64
+

Request header version: 1

+ + + + + + + + + + + +
FieldDescription
renewersA list of those who are allowed to renew this token before it expires.
principal_typeThe type of the Kafka principal.
principal_nameThe name of the Kafka principal.
max_lifetime_msThe maximum lifetime of the token in milliseconds, or -1 to use the server side default.
+
+
CreateDelegationToken Request (Version: 2) => [renewers] max_lifetime_ms _tagged_fields 
+  renewers => principal_type principal_name _tagged_fields 
+    principal_type => COMPACT_STRING
+    principal_name => COMPACT_STRING
+  max_lifetime_ms => INT64
+

Request header version: 2

+ + + + + + + + + + + + + + + +
FieldDescription
renewersA list of those who are allowed to renew this token before it expires.
principal_typeThe type of the Kafka principal.
principal_nameThe name of the Kafka principal.
_tagged_fieldsThe tagged fields
max_lifetime_msThe maximum lifetime of the token in milliseconds, or -1 to use the server side default.
_tagged_fieldsThe tagged fields
+
+
CreateDelegationToken Request (Version: 3) => owner_principal_type owner_principal_name [renewers] max_lifetime_ms _tagged_fields 
+  owner_principal_type => COMPACT_NULLABLE_STRING
+  owner_principal_name => COMPACT_NULLABLE_STRING
+  renewers => principal_type principal_name _tagged_fields 
+    principal_type => COMPACT_STRING
+    principal_name => COMPACT_STRING
+  max_lifetime_ms => INT64
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
owner_principal_typeThe principal type of the owner of the token. If it's null it defaults to the token request principal.
owner_principal_nameThe principal name of the owner of the token. If it's null it defaults to the token request principal.
renewersA list of those who are allowed to renew this token before it expires.
principal_typeThe type of the Kafka principal.
principal_nameThe name of the Kafka principal.
_tagged_fieldsThe tagged fields
max_lifetime_msThe maximum lifetime of the token in milliseconds, or -1 to use the server side default.
_tagged_fieldsThe tagged fields
+
+Responses:
+
CreateDelegationToken Response (Version: 1) => error_code principal_type principal_name issue_timestamp_ms expiry_timestamp_ms max_timestamp_ms token_id hmac throttle_time_ms 
+  error_code => INT16
+  principal_type => STRING
+  principal_name => STRING
+  issue_timestamp_ms => INT64
+  expiry_timestamp_ms => INT64
+  max_timestamp_ms => INT64
+  token_id => STRING
+  hmac => BYTES
+  throttle_time_ms => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
error_codeThe top-level error, or zero if there was no error.
principal_typeThe principal type of the token owner.
principal_nameThe name of the token owner.
issue_timestamp_msWhen this token was generated.
expiry_timestamp_msWhen this token expires.
max_timestamp_msThe maximum lifetime of this token.
token_idThe token UUID.
hmacHMAC of the delegation token.
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+
+
CreateDelegationToken Response (Version: 2) => error_code principal_type principal_name issue_timestamp_ms expiry_timestamp_ms max_timestamp_ms token_id hmac throttle_time_ms _tagged_fields 
+  error_code => INT16
+  principal_type => COMPACT_STRING
+  principal_name => COMPACT_STRING
+  issue_timestamp_ms => INT64
+  expiry_timestamp_ms => INT64
+  max_timestamp_ms => INT64
+  token_id => COMPACT_STRING
+  hmac => COMPACT_BYTES
+  throttle_time_ms => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
error_codeThe top-level error, or zero if there was no error.
principal_typeThe principal type of the token owner.
principal_nameThe name of the token owner.
issue_timestamp_msWhen this token was generated.
expiry_timestamp_msWhen this token expires.
max_timestamp_msThe maximum lifetime of this token.
token_idThe token UUID.
hmacHMAC of the delegation token.
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
_tagged_fieldsThe tagged fields
+
+
CreateDelegationToken Response (Version: 3) => error_code principal_type principal_name token_requester_principal_type token_requester_principal_name issue_timestamp_ms expiry_timestamp_ms max_timestamp_ms token_id hmac throttle_time_ms _tagged_fields 
+  error_code => INT16
+  principal_type => COMPACT_STRING
+  principal_name => COMPACT_STRING
+  token_requester_principal_type => COMPACT_STRING
+  token_requester_principal_name => COMPACT_STRING
+  issue_timestamp_ms => INT64
+  expiry_timestamp_ms => INT64
+  max_timestamp_ms => INT64
+  token_id => COMPACT_STRING
+  hmac => COMPACT_BYTES
+  throttle_time_ms => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
error_codeThe top-level error, or zero if there was no error.
principal_typeThe principal type of the token owner.
principal_nameThe name of the token owner.
token_requester_principal_typeThe principal type of the requester of the token.
token_requester_principal_nameThe principal type of the requester of the token.
issue_timestamp_msWhen this token was generated.
expiry_timestamp_msWhen this token expires.
max_timestamp_msThe maximum lifetime of this token.
token_idThe token UUID.
hmacHMAC of the delegation token.
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
_tagged_fieldsThe tagged fields
+
+
RenewDelegationToken API (Key: 39):
+ +Requests:
+
RenewDelegationToken Request (Version: 1) => hmac renew_period_ms 
+  hmac => BYTES
+  renew_period_ms => INT64
+

Request header version: 1

+ + + + + + + +
FieldDescription
hmacThe HMAC of the delegation token to be renewed.
renew_period_msThe renewal time period in milliseconds.
+
+
RenewDelegationToken Request (Version: 2) => hmac renew_period_ms _tagged_fields 
+  hmac => COMPACT_BYTES
+  renew_period_ms => INT64
+

Request header version: 2

+ + + + + + + + + +
FieldDescription
hmacThe HMAC of the delegation token to be renewed.
renew_period_msThe renewal time period in milliseconds.
_tagged_fieldsThe tagged fields
+
+Responses:
+
RenewDelegationToken Response (Version: 1) => error_code expiry_timestamp_ms throttle_time_ms 
+  error_code => INT16
+  expiry_timestamp_ms => INT64
+  throttle_time_ms => INT32
+

Response header version: 0

+ + + + + + + + + +
FieldDescription
error_codeThe error code, or 0 if there was no error.
expiry_timestamp_msThe timestamp in milliseconds at which this token expires.
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+
+
RenewDelegationToken Response (Version: 2) => error_code expiry_timestamp_ms throttle_time_ms _tagged_fields 
+  error_code => INT16
+  expiry_timestamp_ms => INT64
+  throttle_time_ms => INT32
+

Response header version: 1

+ + + + + + + + + + + +
FieldDescription
error_codeThe error code, or 0 if there was no error.
expiry_timestamp_msThe timestamp in milliseconds at which this token expires.
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
_tagged_fieldsThe tagged fields
+
+
ExpireDelegationToken API (Key: 40):
+ +Requests:
+
ExpireDelegationToken Request (Version: 1) => hmac expiry_time_period_ms 
+  hmac => BYTES
+  expiry_time_period_ms => INT64
+

Request header version: 1

+ + + + + + + +
FieldDescription
hmacThe HMAC of the delegation token to be expired.
expiry_time_period_msThe expiry time period in milliseconds.
+
+
ExpireDelegationToken Request (Version: 2) => hmac expiry_time_period_ms _tagged_fields 
+  hmac => COMPACT_BYTES
+  expiry_time_period_ms => INT64
+

Request header version: 2

+ + + + + + + + + +
FieldDescription
hmacThe HMAC of the delegation token to be expired.
expiry_time_period_msThe expiry time period in milliseconds.
_tagged_fieldsThe tagged fields
+
+Responses:
+
ExpireDelegationToken Response (Version: 1) => error_code expiry_timestamp_ms throttle_time_ms 
+  error_code => INT16
+  expiry_timestamp_ms => INT64
+  throttle_time_ms => INT32
+

Response header version: 0

+ + + + + + + + + +
FieldDescription
error_codeThe error code, or 0 if there was no error.
expiry_timestamp_msThe timestamp in milliseconds at which this token expires.
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+
+
ExpireDelegationToken Response (Version: 2) => error_code expiry_timestamp_ms throttle_time_ms _tagged_fields 
+  error_code => INT16
+  expiry_timestamp_ms => INT64
+  throttle_time_ms => INT32
+

Response header version: 1

+ + + + + + + + + + + +
FieldDescription
error_codeThe error code, or 0 if there was no error.
expiry_timestamp_msThe timestamp in milliseconds at which this token expires.
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
_tagged_fieldsThe tagged fields
+
+
DescribeDelegationToken API (Key: 41):
+ +Requests:
+
DescribeDelegationToken Request (Version: 1) => [owners] 
+  owners => principal_type principal_name 
+    principal_type => STRING
+    principal_name => STRING
+

Request header version: 1

+ + + + + + + + + +
FieldDescription
ownersEach owner that we want to describe delegation tokens for, or null to describe all tokens.
principal_typeThe owner principal type.
principal_nameThe owner principal name.
+
+
DescribeDelegationToken Request (Version: 2) => [owners] _tagged_fields 
+  owners => principal_type principal_name _tagged_fields 
+    principal_type => COMPACT_STRING
+    principal_name => COMPACT_STRING
+

Request header version: 2

+ + + + + + + + + + + + + +
FieldDescription
ownersEach owner that we want to describe delegation tokens for, or null to describe all tokens.
principal_typeThe owner principal type.
principal_nameThe owner principal name.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DescribeDelegationToken Request (Version: 3) => [owners] _tagged_fields 
+  owners => principal_type principal_name _tagged_fields 
+    principal_type => COMPACT_STRING
+    principal_name => COMPACT_STRING
+

Request header version: 2

+ + + + + + + + + + + + + +
FieldDescription
ownersEach owner that we want to describe delegation tokens for, or null to describe all tokens.
principal_typeThe owner principal type.
principal_nameThe owner principal name.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
DescribeDelegationToken Response (Version: 1) => error_code [tokens] throttle_time_ms 
+  error_code => INT16
+  tokens => principal_type principal_name issue_timestamp expiry_timestamp max_timestamp token_id hmac [renewers] 
+    principal_type => STRING
+    principal_name => STRING
+    issue_timestamp => INT64
+    expiry_timestamp => INT64
+    max_timestamp => INT64
+    token_id => STRING
+    hmac => BYTES
+    renewers => principal_type principal_name 
+      principal_type => STRING
+      principal_name => STRING
+  throttle_time_ms => INT32
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
error_codeThe error code, or 0 if there was no error.
tokensThe tokens.
principal_typeThe token principal type.
principal_nameThe token principal name.
issue_timestampThe token issue timestamp in milliseconds.
expiry_timestampThe token expiry timestamp in milliseconds.
max_timestampThe token maximum timestamp length in milliseconds.
token_idThe token ID.
hmacThe token HMAC.
renewersThose who are able to renew this token before it expires.
principal_typeThe renewer principal type.
principal_nameThe renewer principal name.
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+
+
DescribeDelegationToken Response (Version: 2) => error_code [tokens] throttle_time_ms _tagged_fields 
+  error_code => INT16
+  tokens => principal_type principal_name issue_timestamp expiry_timestamp max_timestamp token_id hmac [renewers] _tagged_fields 
+    principal_type => COMPACT_STRING
+    principal_name => COMPACT_STRING
+    issue_timestamp => INT64
+    expiry_timestamp => INT64
+    max_timestamp => INT64
+    token_id => COMPACT_STRING
+    hmac => COMPACT_BYTES
+    renewers => principal_type principal_name _tagged_fields 
+      principal_type => COMPACT_STRING
+      principal_name => COMPACT_STRING
+  throttle_time_ms => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
error_codeThe error code, or 0 if there was no error.
tokensThe tokens.
principal_typeThe token principal type.
principal_nameThe token principal name.
issue_timestampThe token issue timestamp in milliseconds.
expiry_timestampThe token expiry timestamp in milliseconds.
max_timestampThe token maximum timestamp length in milliseconds.
token_idThe token ID.
hmacThe token HMAC.
renewersThose who are able to renew this token before it expires.
principal_typeThe renewer principal type.
principal_nameThe renewer principal name.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
_tagged_fieldsThe tagged fields
+
+
DescribeDelegationToken Response (Version: 3) => error_code [tokens] throttle_time_ms _tagged_fields 
+  error_code => INT16
+  tokens => principal_type principal_name token_requester_principal_type token_requester_principal_name issue_timestamp expiry_timestamp max_timestamp token_id hmac [renewers] _tagged_fields 
+    principal_type => COMPACT_STRING
+    principal_name => COMPACT_STRING
+    token_requester_principal_type => COMPACT_STRING
+    token_requester_principal_name => COMPACT_STRING
+    issue_timestamp => INT64
+    expiry_timestamp => INT64
+    max_timestamp => INT64
+    token_id => COMPACT_STRING
+    hmac => COMPACT_BYTES
+    renewers => principal_type principal_name _tagged_fields 
+      principal_type => COMPACT_STRING
+      principal_name => COMPACT_STRING
+  throttle_time_ms => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
error_codeThe error code, or 0 if there was no error.
tokensThe tokens.
principal_typeThe token principal type.
principal_nameThe token principal name.
token_requester_principal_typeThe principal type of the requester of the token.
token_requester_principal_nameThe principal type of the requester of the token.
issue_timestampThe token issue timestamp in milliseconds.
expiry_timestampThe token expiry timestamp in milliseconds.
max_timestampThe token maximum timestamp length in milliseconds.
token_idThe token ID.
hmacThe token HMAC.
renewersThose who are able to renew this token before it expires.
principal_typeThe renewer principal type.
principal_nameThe renewer principal name.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
_tagged_fieldsThe tagged fields
+
+
DeleteGroups API (Key: 42):
+ +Requests:
+
DeleteGroups Request (Version: 0) => [groups_names] 
+  groups_names => STRING
+

Request header version: 1

+ + + + + +
FieldDescription
groups_namesThe group names to delete.
+
+
DeleteGroups Request (Version: 1) => [groups_names] 
+  groups_names => STRING
+

Request header version: 1

+ + + + + +
FieldDescription
groups_namesThe group names to delete.
+
+
DeleteGroups Request (Version: 2) => [groups_names] _tagged_fields 
+  groups_names => COMPACT_STRING
+

Request header version: 2

+ + + + + + + +
FieldDescription
groups_namesThe group names to delete.
_tagged_fieldsThe tagged fields
+
+Responses:
+
DeleteGroups Response (Version: 0) => throttle_time_ms [results] 
+  throttle_time_ms => INT32
+  results => group_id error_code 
+    group_id => STRING
+    error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
resultsThe deletion results.
group_idThe group id.
error_codeThe deletion error, or 0 if the deletion succeeded.
+
+
DeleteGroups Response (Version: 1) => throttle_time_ms [results] 
+  throttle_time_ms => INT32
+  results => group_id error_code 
+    group_id => STRING
+    error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
resultsThe deletion results.
group_idThe group id.
error_codeThe deletion error, or 0 if the deletion succeeded.
+
+
DeleteGroups Response (Version: 2) => throttle_time_ms [results] _tagged_fields 
+  throttle_time_ms => INT32
+  results => group_id error_code _tagged_fields 
+    group_id => COMPACT_STRING
+    error_code => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
resultsThe deletion results.
group_idThe group id.
error_codeThe deletion error, or 0 if the deletion succeeded.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ElectLeaders API (Key: 43):
+ +Requests:
+
ElectLeaders Request (Version: 0) => [topic_partitions] timeout_ms 
+  topic_partitions => topic [partitions] 
+    topic => STRING
+    partitions => INT32
+  timeout_ms => INT32
+

Request header version: 1

+ + + + + + + + + + + +
FieldDescription
topic_partitionsThe topic partitions to elect leaders.
topicThe name of a topic.
partitionsThe partitions of this topic whose leader should be elected.
timeout_msThe time in ms to wait for the election to complete.
+
+
ElectLeaders Request (Version: 1) => election_type [topic_partitions] timeout_ms 
+  election_type => INT8
+  topic_partitions => topic [partitions] 
+    topic => STRING
+    partitions => INT32
+  timeout_ms => INT32
+

Request header version: 1

+ + + + + + + + + + + + + +
FieldDescription
election_typeType of elections to conduct for the partition. A value of '0' elects the preferred replica. A value of '1' elects the first live replica if there are no in-sync replica.
topic_partitionsThe topic partitions to elect leaders.
topicThe name of a topic.
partitionsThe partitions of this topic whose leader should be elected.
timeout_msThe time in ms to wait for the election to complete.
+
+
ElectLeaders Request (Version: 2) => election_type [topic_partitions] timeout_ms _tagged_fields 
+  election_type => INT8
+  topic_partitions => topic [partitions] _tagged_fields 
+    topic => COMPACT_STRING
+    partitions => INT32
+  timeout_ms => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + +
FieldDescription
election_typeType of elections to conduct for the partition. A value of '0' elects the preferred replica. A value of '1' elects the first live replica if there are no in-sync replica.
topic_partitionsThe topic partitions to elect leaders.
topicThe name of a topic.
partitionsThe partitions of this topic whose leader should be elected.
_tagged_fieldsThe tagged fields
timeout_msThe time in ms to wait for the election to complete.
_tagged_fieldsThe tagged fields
+
+Responses:
+
ElectLeaders Response (Version: 0) => throttle_time_ms [replica_election_results] 
+  throttle_time_ms => INT32
+  replica_election_results => topic [partition_result] 
+    topic => STRING
+    partition_result => partition_id error_code error_message 
+      partition_id => INT32
+      error_code => INT16
+      error_message => NULLABLE_STRING
+

Response header version: 0

+ + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
replica_election_resultsThe election results, or an empty array if the requester did not have permission and the request asks for all partitions.
topicThe topic name.
partition_resultThe results for each partition.
partition_idThe partition id.
error_codeThe result error, or zero if there was no error.
error_messageThe result message, or null if there was no error.
+
+
ElectLeaders Response (Version: 1) => throttle_time_ms error_code [replica_election_results] 
+  throttle_time_ms => INT32
+  error_code => INT16
+  replica_election_results => topic [partition_result] 
+    topic => STRING
+    partition_result => partition_id error_code error_message 
+      partition_id => INT32
+      error_code => INT16
+      error_message => NULLABLE_STRING
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top level response error code.
replica_election_resultsThe election results, or an empty array if the requester did not have permission and the request asks for all partitions.
topicThe topic name.
partition_resultThe results for each partition.
partition_idThe partition id.
error_codeThe result error, or zero if there was no error.
error_messageThe result message, or null if there was no error.
+
+
ElectLeaders Response (Version: 2) => throttle_time_ms error_code [replica_election_results] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  replica_election_results => topic [partition_result] _tagged_fields 
+    topic => COMPACT_STRING
+    partition_result => partition_id error_code error_message _tagged_fields 
+      partition_id => INT32
+      error_code => INT16
+      error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top level response error code.
replica_election_resultsThe election results, or an empty array if the requester did not have permission and the request asks for all partitions.
topicThe topic name.
partition_resultThe results for each partition.
partition_idThe partition id.
error_codeThe result error, or zero if there was no error.
error_messageThe result message, or null if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
IncrementalAlterConfigs API (Key: 44):
+ +Requests:
+
IncrementalAlterConfigs Request (Version: 0) => [resources] validate_only 
+  resources => resource_type resource_name [configs] 
+    resource_type => INT8
+    resource_name => STRING
+    configs => name config_operation value 
+      name => STRING
+      config_operation => INT8
+      value => NULLABLE_STRING
+  validate_only => BOOLEAN
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
resourcesThe incremental updates for each resource.
resource_typeThe resource type.
resource_nameThe resource name.
configsThe configurations.
nameThe configuration key name.
config_operationThe type (Set, Delete, Append, Subtract) of operation.
valueThe value to set for the configuration key.
validate_onlyTrue if we should validate the request, but not change the configurations.
+
+
IncrementalAlterConfigs Request (Version: 1) => [resources] validate_only _tagged_fields 
+  resources => resource_type resource_name [configs] _tagged_fields 
+    resource_type => INT8
+    resource_name => COMPACT_STRING
+    configs => name config_operation value _tagged_fields 
+      name => COMPACT_STRING
+      config_operation => INT8
+      value => COMPACT_NULLABLE_STRING
+  validate_only => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
resourcesThe incremental updates for each resource.
resource_typeThe resource type.
resource_nameThe resource name.
configsThe configurations.
nameThe configuration key name.
config_operationThe type (Set, Delete, Append, Subtract) of operation.
valueThe value to set for the configuration key.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
validate_onlyTrue if we should validate the request, but not change the configurations.
_tagged_fieldsThe tagged fields
+
+Responses:
+
IncrementalAlterConfigs Response (Version: 0) => throttle_time_ms [responses] 
+  throttle_time_ms => INT32
+  responses => error_code error_message resource_type resource_name 
+    error_code => INT16
+    error_message => NULLABLE_STRING
+    resource_type => INT8
+    resource_name => STRING
+

Response header version: 0

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msDuration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
responsesThe responses for each resource.
error_codeThe resource error code.
error_messageThe resource error message, or null if there was no error.
resource_typeThe resource type.
resource_nameThe resource name.
+
+
IncrementalAlterConfigs Response (Version: 1) => throttle_time_ms [responses] _tagged_fields 
+  throttle_time_ms => INT32
+  responses => error_code error_message resource_type resource_name _tagged_fields 
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+    resource_type => INT8
+    resource_name => COMPACT_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msDuration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
responsesThe responses for each resource.
error_codeThe resource error code.
error_messageThe resource error message, or null if there was no error.
resource_typeThe resource type.
resource_nameThe resource name.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
AlterPartitionReassignments API (Key: 45):
+ +Requests:
+
AlterPartitionReassignments Request (Version: 0) => timeout_ms [topics] _tagged_fields 
+  timeout_ms => INT32
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index [replicas] _tagged_fields 
+      partition_index => INT32
+      replicas => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
timeout_msThe time in ms to wait for the request to complete.
topicsThe topics to reassign.
nameThe topic name.
partitionsThe partitions to reassign.
partition_indexThe partition index.
replicasThe replicas to place the partitions on, or null to cancel a pending reassignment for this partition.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
AlterPartitionReassignments Request (Version: 1) => timeout_ms allow_replication_factor_change [topics] _tagged_fields 
+  timeout_ms => INT32
+  allow_replication_factor_change => BOOLEAN
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index [replicas] _tagged_fields 
+      partition_index => INT32
+      replicas => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
timeout_msThe time in ms to wait for the request to complete.
allow_replication_factor_changeThe option indicating whether changing the replication factor of any given partition as part of this request is a valid move.
topicsThe topics to reassign.
nameThe topic name.
partitionsThe partitions to reassign.
partition_indexThe partition index.
replicasThe replicas to place the partitions on, or null to cancel a pending reassignment for this partition.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
AlterPartitionReassignments Response (Version: 0) => throttle_time_ms error_code error_message [responses] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  responses => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index error_code error_message _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+      error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top-level error code, or 0 if there was no error.
error_messageThe top-level error message, or null if there was no error.
responsesThe responses to topics to reassign.
nameThe topic name.
partitionsThe responses to partitions to reassign.
partition_indexThe partition index.
error_codeThe error code for this partition, or 0 if there was no error.
error_messageThe error message for this partition, or null if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
AlterPartitionReassignments Response (Version: 1) => throttle_time_ms allow_replication_factor_change error_code error_message [responses] _tagged_fields 
+  throttle_time_ms => INT32
+  allow_replication_factor_change => BOOLEAN
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  responses => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index error_code error_message _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+      error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
allow_replication_factor_changeThe option indicating whether changing the replication factor of any given partition as part of the request was allowed.
error_codeThe top-level error code, or 0 if there was no error.
error_messageThe top-level error message, or null if there was no error.
responsesThe responses to topics to reassign.
nameThe topic name.
partitionsThe responses to partitions to reassign.
partition_indexThe partition index.
error_codeThe error code for this partition, or 0 if there was no error.
error_messageThe error message for this partition, or null if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ListPartitionReassignments API (Key: 46):
+ +Requests:
+
ListPartitionReassignments Request (Version: 0) => timeout_ms [topics] _tagged_fields 
+  timeout_ms => INT32
+  topics => name [partition_indexes] _tagged_fields 
+    name => COMPACT_STRING
+    partition_indexes => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + +
FieldDescription
timeout_msThe time in ms to wait for the request to complete.
topicsThe topics to list partition reassignments for, or null to list everything.
nameThe topic name.
partition_indexesThe partitions to list partition reassignments for.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
ListPartitionReassignments Response (Version: 0) => throttle_time_ms error_code error_message [topics] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index [replicas] [adding_replicas] [removing_replicas] _tagged_fields 
+      partition_index => INT32
+      replicas => INT32
+      adding_replicas => INT32
+      removing_replicas => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top-level error code, or 0 if there was no error.
error_messageThe top-level error message, or null if there was no error.
topicsThe ongoing reassignments for each topic.
nameThe topic name.
partitionsThe ongoing reassignments for each partition.
partition_indexThe index of the partition.
replicasThe current replica set.
adding_replicasThe set of replicas we are currently adding.
removing_replicasThe set of replicas we are currently removing.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
OffsetDelete API (Key: 47):
+ +Requests:
+
OffsetDelete Request (Version: 0) => group_id [topics] 
+  group_id => STRING
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index 
+      partition_index => INT32
+

Request header version: 1

+ + + + + + + + + + + + + +
FieldDescription
group_idThe unique group identifier.
topicsThe topics to delete offsets for.
nameThe topic name.
partitionsEach partition to delete offsets for.
partition_indexThe partition index.
+
+Responses:
+
OffsetDelete Response (Version: 0) => error_code throttle_time_ms [topics] 
+  error_code => INT16
+  throttle_time_ms => INT32
+  topics => name [partitions] 
+    name => STRING
+    partitions => partition_index error_code 
+      partition_index => INT32
+      error_code => INT16
+

Response header version: 0

+ + + + + + + + + + + + + + + + + +
FieldDescription
error_codeThe top-level error code, or 0 if there was no error.
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsThe responses for each topic.
nameThe topic name.
partitionsThe responses for each partition in the topic.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no error.
+
+
DescribeClientQuotas API (Key: 48):
+ +Requests:
+
DescribeClientQuotas Request (Version: 0) => [components] strict 
+  components => entity_type match_type match 
+    entity_type => STRING
+    match_type => INT8
+    match => NULLABLE_STRING
+  strict => BOOLEAN
+

Request header version: 1

+ + + + + + + + + + + + + +
FieldDescription
componentsFilter components to apply to quota entities.
entity_typeThe entity type that the filter component applies to.
match_typeHow to match the entity {0 = exact name, 1 = default name, 2 = any specified name}.
matchThe string to match against, or null if unused for the match type.
strictWhether the match is strict, i.e. should exclude entities with unspecified entity types.
+
+
DescribeClientQuotas Request (Version: 1) => [components] strict _tagged_fields 
+  components => entity_type match_type match _tagged_fields 
+    entity_type => COMPACT_STRING
+    match_type => INT8
+    match => COMPACT_NULLABLE_STRING
+  strict => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + + + + + +
FieldDescription
componentsFilter components to apply to quota entities.
entity_typeThe entity type that the filter component applies to.
match_typeHow to match the entity {0 = exact name, 1 = default name, 2 = any specified name}.
matchThe string to match against, or null if unused for the match type.
_tagged_fieldsThe tagged fields
strictWhether the match is strict, i.e. should exclude entities with unspecified entity types.
_tagged_fieldsThe tagged fields
+
+Responses:
+
DescribeClientQuotas Response (Version: 0) => throttle_time_ms error_code error_message [entries] 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => NULLABLE_STRING
+  entries => [entity] [values] 
+    entity => entity_type entity_name 
+      entity_type => STRING
+      entity_name => NULLABLE_STRING
+    values => key value 
+      key => STRING
+      value => FLOAT64
+

Response header version: 0

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or `0` if the quota description succeeded.
error_messageThe error message, or `null` if the quota description succeeded.
entriesA result entry.
entityThe quota entity description.
entity_typeThe entity type.
entity_nameThe entity name, or null if the default.
valuesThe quota values for the entity.
keyThe quota configuration key.
valueThe quota configuration value.
+
+
DescribeClientQuotas Response (Version: 1) => throttle_time_ms error_code error_message [entries] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  entries => [entity] [values] _tagged_fields 
+    entity => entity_type entity_name _tagged_fields 
+      entity_type => COMPACT_STRING
+      entity_name => COMPACT_NULLABLE_STRING
+    values => key value _tagged_fields 
+      key => COMPACT_STRING
+      value => FLOAT64
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or `0` if the quota description succeeded.
error_messageThe error message, or `null` if the quota description succeeded.
entriesA result entry.
entityThe quota entity description.
entity_typeThe entity type.
entity_nameThe entity name, or null if the default.
_tagged_fieldsThe tagged fields
valuesThe quota values for the entity.
keyThe quota configuration key.
valueThe quota configuration value.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
AlterClientQuotas API (Key: 49):
+ +Requests:
+
AlterClientQuotas Request (Version: 0) => [entries] validate_only 
+  entries => [entity] [ops] 
+    entity => entity_type entity_name 
+      entity_type => STRING
+      entity_name => NULLABLE_STRING
+    ops => key value remove 
+      key => STRING
+      value => FLOAT64
+      remove => BOOLEAN
+  validate_only => BOOLEAN
+

Request header version: 1

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
entriesThe quota configuration entries to alter.
entityThe quota entity to alter.
entity_typeThe entity type.
entity_nameThe name of the entity, or null if the default.
opsAn individual quota configuration entry to alter.
keyThe quota configuration key.
valueThe value to set, otherwise ignored if the value is to be removed.
removeWhether the quota configuration value should be removed, otherwise set.
validate_onlyWhether the alteration should be validated, but not performed.
+
+
AlterClientQuotas Request (Version: 1) => [entries] validate_only _tagged_fields 
+  entries => [entity] [ops] _tagged_fields 
+    entity => entity_type entity_name _tagged_fields 
+      entity_type => COMPACT_STRING
+      entity_name => COMPACT_NULLABLE_STRING
+    ops => key value remove _tagged_fields 
+      key => COMPACT_STRING
+      value => FLOAT64
+      remove => BOOLEAN
+  validate_only => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
entriesThe quota configuration entries to alter.
entityThe quota entity to alter.
entity_typeThe entity type.
entity_nameThe name of the entity, or null if the default.
_tagged_fieldsThe tagged fields
opsAn individual quota configuration entry to alter.
keyThe quota configuration key.
valueThe value to set, otherwise ignored if the value is to be removed.
removeWhether the quota configuration value should be removed, otherwise set.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
validate_onlyWhether the alteration should be validated, but not performed.
_tagged_fieldsThe tagged fields
+
+Responses:
+
AlterClientQuotas Response (Version: 0) => throttle_time_ms [entries] 
+  throttle_time_ms => INT32
+  entries => error_code error_message [entity] 
+    error_code => INT16
+    error_message => NULLABLE_STRING
+    entity => entity_type entity_name 
+      entity_type => STRING
+      entity_name => NULLABLE_STRING
+

Response header version: 0

+ + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
entriesThe quota configuration entries to alter.
error_codeThe error code, or `0` if the quota alteration succeeded.
error_messageThe error message, or `null` if the quota alteration succeeded.
entityThe quota entity to alter.
entity_typeThe entity type.
entity_nameThe name of the entity, or null if the default.
+
+
AlterClientQuotas Response (Version: 1) => throttle_time_ms [entries] _tagged_fields 
+  throttle_time_ms => INT32
+  entries => error_code error_message [entity] _tagged_fields 
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+    entity => entity_type entity_name _tagged_fields 
+      entity_type => COMPACT_STRING
+      entity_name => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
entriesThe quota configuration entries to alter.
error_codeThe error code, or `0` if the quota alteration succeeded.
error_messageThe error message, or `null` if the quota alteration succeeded.
entityThe quota entity to alter.
entity_typeThe entity type.
entity_nameThe name of the entity, or null if the default.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DescribeUserScramCredentials API (Key: 50):
+ +Requests:
+
DescribeUserScramCredentials Request (Version: 0) => [users] _tagged_fields 
+  users => name _tagged_fields 
+    name => COMPACT_STRING
+

Request header version: 2

+ + + + + + + + + + + +
FieldDescription
usersThe users to describe, or null/empty to describe all users.
nameThe user name.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
DescribeUserScramCredentials Response (Version: 0) => throttle_time_ms error_code error_message [results] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  results => user error_code error_message [credential_infos] _tagged_fields 
+    user => COMPACT_STRING
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+    credential_infos => mechanism iterations _tagged_fields 
+      mechanism => INT8
+      iterations => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe message-level error code, 0 except for user authorization or infrastructure issues.
error_messageThe message-level error message, if any.
resultsThe results for descriptions, one per user.
userThe user name.
error_codeThe user-level error code.
error_messageThe user-level error message, if any.
credential_infosThe mechanism and related information associated with the user's SCRAM credentials.
mechanismThe SCRAM mechanism.
iterationsThe number of iterations used in the SCRAM credential.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
AlterUserScramCredentials API (Key: 51):
+ +Requests:
+
AlterUserScramCredentials Request (Version: 0) => [deletions] [upsertions] _tagged_fields 
+  deletions => name mechanism _tagged_fields 
+    name => COMPACT_STRING
+    mechanism => INT8
+  upsertions => name mechanism iterations salt salted_password _tagged_fields 
+    name => COMPACT_STRING
+    mechanism => INT8
+    iterations => INT32
+    salt => COMPACT_BYTES
+    salted_password => COMPACT_BYTES
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
deletionsThe SCRAM credentials to remove.
nameThe user name.
mechanismThe SCRAM mechanism.
_tagged_fieldsThe tagged fields
upsertionsThe SCRAM credentials to update/insert.
nameThe user name.
mechanismThe SCRAM mechanism.
iterationsThe number of iterations.
saltA random salt generated by the client.
salted_passwordThe salted password.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
AlterUserScramCredentials Response (Version: 0) => throttle_time_ms [results] _tagged_fields 
+  throttle_time_ms => INT32
+  results => user error_code error_message _tagged_fields 
+    user => COMPACT_STRING
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
resultsThe results for deletions and alterations, one per affected user.
userThe user name.
error_codeThe error code.
error_messageThe error message, if any.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DescribeQuorum API (Key: 55):
+ +Requests:
+
DescribeQuorum Request (Version: 0) => [topics] _tagged_fields 
+  topics => topic_name [partitions] _tagged_fields 
+    topic_name => COMPACT_STRING
+    partitions => partition_index _tagged_fields 
+      partition_index => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + +
FieldDescription
topicsThe topics to describe.
topic_nameThe topic name.
partitionsThe partitions to describe.
partition_indexThe partition index.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DescribeQuorum Request (Version: 1) => [topics] _tagged_fields 
+  topics => topic_name [partitions] _tagged_fields 
+    topic_name => COMPACT_STRING
+    partitions => partition_index _tagged_fields 
+      partition_index => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + +
FieldDescription
topicsThe topics to describe.
topic_nameThe topic name.
partitionsThe partitions to describe.
partition_indexThe partition index.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DescribeQuorum Request (Version: 2) => [topics] _tagged_fields 
+  topics => topic_name [partitions] _tagged_fields 
+    topic_name => COMPACT_STRING
+    partitions => partition_index _tagged_fields 
+      partition_index => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + +
FieldDescription
topicsThe topics to describe.
topic_nameThe topic name.
partitionsThe partitions to describe.
partition_indexThe partition index.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
DescribeQuorum Response (Version: 0) => error_code [topics] _tagged_fields 
+  error_code => INT16
+  topics => topic_name [partitions] _tagged_fields 
+    topic_name => COMPACT_STRING
+    partitions => partition_index error_code leader_id leader_epoch high_watermark [current_voters] [observers] _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+      leader_id => INT32
+      leader_epoch => INT32
+      high_watermark => INT64
+      current_voters => replica_id log_end_offset _tagged_fields 
+        replica_id => INT32
+        log_end_offset => INT64
+      observers => replica_id log_end_offset _tagged_fields 
+        replica_id => INT32
+        log_end_offset => INT64
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
error_codeThe top level error code.
topicsThe response from the describe quorum API.
topic_nameThe topic name.
partitionsThe partition data.
partition_indexThe partition index.
error_codeThe partition error code.
leader_idThe ID of the current leader or -1 if the leader is unknown.
leader_epochThe latest known leader epoch.
high_watermarkThe high water mark.
current_votersThe current voters of the partition.
replica_idThe ID of the replica.
log_end_offsetThe last known log end offset of the follower or -1 if it is unknown.
_tagged_fieldsThe tagged fields
observersThe observers of the partition.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DescribeQuorum Response (Version: 1) => error_code [topics] _tagged_fields 
+  error_code => INT16
+  topics => topic_name [partitions] _tagged_fields 
+    topic_name => COMPACT_STRING
+    partitions => partition_index error_code leader_id leader_epoch high_watermark [current_voters] [observers] _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+      leader_id => INT32
+      leader_epoch => INT32
+      high_watermark => INT64
+      current_voters => replica_id log_end_offset last_fetch_timestamp last_caught_up_timestamp _tagged_fields 
+        replica_id => INT32
+        log_end_offset => INT64
+        last_fetch_timestamp => INT64
+        last_caught_up_timestamp => INT64
+      observers => replica_id log_end_offset last_fetch_timestamp last_caught_up_timestamp _tagged_fields 
+        replica_id => INT32
+        log_end_offset => INT64
+        last_fetch_timestamp => INT64
+        last_caught_up_timestamp => INT64
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
error_codeThe top level error code.
topicsThe response from the describe quorum API.
topic_nameThe topic name.
partitionsThe partition data.
partition_indexThe partition index.
error_codeThe partition error code.
leader_idThe ID of the current leader or -1 if the leader is unknown.
leader_epochThe latest known leader epoch.
high_watermarkThe high water mark.
current_votersThe current voters of the partition.
replica_idThe ID of the replica.
log_end_offsetThe last known log end offset of the follower or -1 if it is unknown.
last_fetch_timestampThe last known leader wall clock time time when a follower fetched from the leader. This is reported as -1 both for the current leader or if it is unknown for a voter.
last_caught_up_timestampThe leader wall clock append time of the offset for which the follower made the most recent fetch request. This is reported as the current time for the leader and -1 if unknown for a voter.
_tagged_fieldsThe tagged fields
observersThe observers of the partition.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DescribeQuorum Response (Version: 2) => error_code error_message [topics] [nodes] _tagged_fields 
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  topics => topic_name [partitions] _tagged_fields 
+    topic_name => COMPACT_STRING
+    partitions => partition_index error_code error_message leader_id leader_epoch high_watermark [current_voters] [observers] _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+      error_message => COMPACT_NULLABLE_STRING
+      leader_id => INT32
+      leader_epoch => INT32
+      high_watermark => INT64
+      current_voters => replica_id replica_directory_id log_end_offset last_fetch_timestamp last_caught_up_timestamp _tagged_fields 
+        replica_id => INT32
+        replica_directory_id => UUID
+        log_end_offset => INT64
+        last_fetch_timestamp => INT64
+        last_caught_up_timestamp => INT64
+      observers => replica_id replica_directory_id log_end_offset last_fetch_timestamp last_caught_up_timestamp _tagged_fields 
+        replica_id => INT32
+        replica_directory_id => UUID
+        log_end_offset => INT64
+        last_fetch_timestamp => INT64
+        last_caught_up_timestamp => INT64
+  nodes => node_id [listeners] _tagged_fields 
+    node_id => INT32
+    listeners => name host port _tagged_fields 
+      name => COMPACT_STRING
+      host => COMPACT_STRING
+      port => UINT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
error_codeThe top level error code.
error_messageThe error message, or null if there was no error.
topicsThe response from the describe quorum API.
topic_nameThe topic name.
partitionsThe partition data.
partition_indexThe partition index.
error_codeThe partition error code.
error_messageThe error message, or null if there was no error.
leader_idThe ID of the current leader or -1 if the leader is unknown.
leader_epochThe latest known leader epoch.
high_watermarkThe high water mark.
current_votersThe current voters of the partition.
replica_idThe ID of the replica.
replica_directory_idThe replica directory ID of the replica.
log_end_offsetThe last known log end offset of the follower or -1 if it is unknown.
last_fetch_timestampThe last known leader wall clock time time when a follower fetched from the leader. This is reported as -1 both for the current leader or if it is unknown for a voter.
last_caught_up_timestampThe leader wall clock append time of the offset for which the follower made the most recent fetch request. This is reported as the current time for the leader and -1 if unknown for a voter.
_tagged_fieldsThe tagged fields
observersThe observers of the partition.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
nodesThe nodes in the quorum.
node_idThe ID of the associated node.
listenersThe listeners of this controller.
nameThe name of the endpoint.
hostThe hostname.
portThe port.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
UpdateFeatures API (Key: 57):
+ +Requests:
+
UpdateFeatures Request (Version: 0) => timeout_ms [feature_updates] _tagged_fields 
+  timeout_ms => INT32
+  feature_updates => feature max_version_level allow_downgrade _tagged_fields 
+    feature => COMPACT_STRING
+    max_version_level => INT16
+    allow_downgrade => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + + + + + +
FieldDescription
timeout_msHow long to wait in milliseconds before timing out the request.
feature_updatesThe list of updates to finalized features.
featureThe name of the finalized feature to be updated.
max_version_levelThe new maximum version level for the finalized feature. A value >= 1 is valid. A value < 1, is special, and can be used to request the deletion of the finalized feature.
allow_downgradeDEPRECATED in version 1 (see DowngradeType). When set to true, the finalized feature version level is allowed to be downgraded/deleted. The downgrade request will fail if the new maximum version level is a value that's not lower than the existing maximum finalized version level.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
UpdateFeatures Request (Version: 1) => timeout_ms [feature_updates] validate_only _tagged_fields 
+  timeout_ms => INT32
+  feature_updates => feature max_version_level upgrade_type _tagged_fields 
+    feature => COMPACT_STRING
+    max_version_level => INT16
+    upgrade_type => INT8
+  validate_only => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
timeout_msHow long to wait in milliseconds before timing out the request.
feature_updatesThe list of updates to finalized features.
featureThe name of the finalized feature to be updated.
max_version_levelThe new maximum version level for the finalized feature. A value >= 1 is valid. A value < 1, is special, and can be used to request the deletion of the finalized feature.
upgrade_typeDetermine which type of upgrade will be performed: 1 will perform an upgrade only (default), 2 is safe downgrades only (lossless), 3 is unsafe downgrades (lossy).
_tagged_fieldsThe tagged fields
validate_onlyTrue if we should validate the request, but not perform the upgrade or downgrade.
_tagged_fieldsThe tagged fields
+
+
UpdateFeatures Request (Version: 2) => timeout_ms [feature_updates] validate_only _tagged_fields 
+  timeout_ms => INT32
+  feature_updates => feature max_version_level upgrade_type _tagged_fields 
+    feature => COMPACT_STRING
+    max_version_level => INT16
+    upgrade_type => INT8
+  validate_only => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
timeout_msHow long to wait in milliseconds before timing out the request.
feature_updatesThe list of updates to finalized features.
featureThe name of the finalized feature to be updated.
max_version_levelThe new maximum version level for the finalized feature. A value >= 1 is valid. A value < 1, is special, and can be used to request the deletion of the finalized feature.
upgrade_typeDetermine which type of upgrade will be performed: 1 will perform an upgrade only (default), 2 is safe downgrades only (lossless), 3 is unsafe downgrades (lossy).
_tagged_fieldsThe tagged fields
validate_onlyTrue if we should validate the request, but not perform the upgrade or downgrade.
_tagged_fieldsThe tagged fields
+
+Responses:
+
UpdateFeatures Response (Version: 0) => throttle_time_ms error_code error_message [results] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  results => feature error_code error_message _tagged_fields 
+    feature => COMPACT_STRING
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top-level error code, or `0` if there was no top-level error.
error_messageThe top-level error message, or `null` if there was no top-level error.
resultsResults for each feature update.
featureThe name of the finalized feature.
error_codeThe feature update error code or `0` if the feature update succeeded.
error_messageThe feature update error, or `null` if the feature update succeeded.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
UpdateFeatures Response (Version: 1) => throttle_time_ms error_code error_message [results] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  results => feature error_code error_message _tagged_fields 
+    feature => COMPACT_STRING
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top-level error code, or `0` if there was no top-level error.
error_messageThe top-level error message, or `null` if there was no top-level error.
resultsResults for each feature update.
featureThe name of the finalized feature.
error_codeThe feature update error code or `0` if the feature update succeeded.
error_messageThe feature update error, or `null` if the feature update succeeded.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
UpdateFeatures Response (Version: 2) => throttle_time_ms error_code error_message _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top-level error code, or `0` if there was no top-level error.
error_messageThe top-level error message, or `null` if there was no top-level error.
_tagged_fieldsThe tagged fields
+
+
DescribeCluster API (Key: 60):
+ +Requests:
+
DescribeCluster Request (Version: 0) => include_cluster_authorized_operations _tagged_fields 
+  include_cluster_authorized_operations => BOOLEAN
+

Request header version: 2

+ + + + + + + +
FieldDescription
include_cluster_authorized_operationsWhether to include cluster authorized operations.
_tagged_fieldsThe tagged fields
+
+
DescribeCluster Request (Version: 1) => include_cluster_authorized_operations endpoint_type _tagged_fields 
+  include_cluster_authorized_operations => BOOLEAN
+  endpoint_type => INT8
+

Request header version: 2

+ + + + + + + + + +
FieldDescription
include_cluster_authorized_operationsWhether to include cluster authorized operations.
endpoint_typeThe endpoint type to describe. 1=brokers, 2=controllers.
_tagged_fieldsThe tagged fields
+
+
DescribeCluster Request (Version: 2) => include_cluster_authorized_operations endpoint_type include_fenced_brokers _tagged_fields 
+  include_cluster_authorized_operations => BOOLEAN
+  endpoint_type => INT8
+  include_fenced_brokers => BOOLEAN
+

Request header version: 2

+ + + + + + + + + + + +
FieldDescription
include_cluster_authorized_operationsWhether to include cluster authorized operations.
endpoint_typeThe endpoint type to describe. 1=brokers, 2=controllers.
include_fenced_brokersWhether to include fenced brokers when listing brokers.
_tagged_fieldsThe tagged fields
+
+Responses:
+
DescribeCluster Response (Version: 0) => throttle_time_ms error_code error_message cluster_id controller_id [brokers] cluster_authorized_operations _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  cluster_id => COMPACT_STRING
+  controller_id => INT32
+  brokers => broker_id host port rack _tagged_fields 
+    broker_id => INT32
+    host => COMPACT_STRING
+    port => INT32
+    rack => COMPACT_NULLABLE_STRING
+  cluster_authorized_operations => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top-level error code, or 0 if there was no error.
error_messageThe top-level error message, or null if there was no error.
cluster_idThe cluster ID that responding broker belongs to.
controller_idThe ID of the controller broker.
brokersEach broker in the response.
broker_idThe broker ID.
hostThe broker hostname.
portThe broker port.
rackThe rack of the broker, or null if it has not been assigned to a rack.
_tagged_fieldsThe tagged fields
cluster_authorized_operations32-bit bitfield to represent authorized operations for this cluster.
_tagged_fieldsThe tagged fields
+
+
DescribeCluster Response (Version: 1) => throttle_time_ms error_code error_message endpoint_type cluster_id controller_id [brokers] cluster_authorized_operations _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  endpoint_type => INT8
+  cluster_id => COMPACT_STRING
+  controller_id => INT32
+  brokers => broker_id host port rack _tagged_fields 
+    broker_id => INT32
+    host => COMPACT_STRING
+    port => INT32
+    rack => COMPACT_NULLABLE_STRING
+  cluster_authorized_operations => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top-level error code, or 0 if there was no error.
error_messageThe top-level error message, or null if there was no error.
endpoint_typeThe endpoint type that was described. 1=brokers, 2=controllers.
cluster_idThe cluster ID that responding broker belongs to.
controller_idThe ID of the controller broker.
brokersEach broker in the response.
broker_idThe broker ID.
hostThe broker hostname.
portThe broker port.
rackThe rack of the broker, or null if it has not been assigned to a rack.
_tagged_fieldsThe tagged fields
cluster_authorized_operations32-bit bitfield to represent authorized operations for this cluster.
_tagged_fieldsThe tagged fields
+
+
DescribeCluster Response (Version: 2) => throttle_time_ms error_code error_message endpoint_type cluster_id controller_id [brokers] cluster_authorized_operations _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  endpoint_type => INT8
+  cluster_id => COMPACT_STRING
+  controller_id => INT32
+  brokers => broker_id host port rack is_fenced _tagged_fields 
+    broker_id => INT32
+    host => COMPACT_STRING
+    port => INT32
+    rack => COMPACT_NULLABLE_STRING
+    is_fenced => BOOLEAN
+  cluster_authorized_operations => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top-level error code, or 0 if there was no error.
error_messageThe top-level error message, or null if there was no error.
endpoint_typeThe endpoint type that was described. 1=brokers, 2=controllers.
cluster_idThe cluster ID that responding broker belongs to.
controller_idThe ID of the controller broker.
brokersEach broker in the response.
broker_idThe broker ID.
hostThe broker hostname.
portThe broker port.
rackThe rack of the broker, or null if it has not been assigned to a rack.
is_fencedWhether the broker is fenced
_tagged_fieldsThe tagged fields
cluster_authorized_operations32-bit bitfield to represent authorized operations for this cluster.
_tagged_fieldsThe tagged fields
+
+
DescribeProducers API (Key: 61):
+ +Requests:
+
DescribeProducers Request (Version: 0) => [topics] _tagged_fields 
+  topics => name [partition_indexes] _tagged_fields 
+    name => COMPACT_STRING
+    partition_indexes => INT32
+

Request header version: 2

+ + + + + + + + + + + + + +
FieldDescription
topicsThe topics to list producers for.
nameThe topic name.
partition_indexesThe indexes of the partitions to list producers for.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
DescribeProducers Response (Version: 0) => throttle_time_ms [topics] _tagged_fields 
+  throttle_time_ms => INT32
+  topics => name [partitions] _tagged_fields 
+    name => COMPACT_STRING
+    partitions => partition_index error_code error_message [active_producers] _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+      error_message => COMPACT_NULLABLE_STRING
+      active_producers => producer_id producer_epoch last_sequence last_timestamp coordinator_epoch current_txn_start_offset _tagged_fields 
+        producer_id => INT64
+        producer_epoch => INT32
+        last_sequence => INT32
+        last_timestamp => INT64
+        coordinator_epoch => INT32
+        current_txn_start_offset => INT64
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsEach topic in the response.
nameThe topic name.
partitionsEach partition in the response.
partition_indexThe partition index.
error_codeThe partition error code, or 0 if there was no error.
error_messageThe partition error message, which may be null if no additional details are available.
active_producersThe active producers for the partition.
producer_idThe producer id.
producer_epochThe producer epoch.
last_sequenceThe last sequence number sent by the producer.
last_timestampThe last timestamp sent by the producer.
coordinator_epochThe current epoch of the producer group.
current_txn_start_offsetThe current transaction start offset of the producer.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
UnregisterBroker API (Key: 64):
+ +Requests:
+
UnregisterBroker Request (Version: 0) => broker_id _tagged_fields 
+  broker_id => INT32
+

Request header version: 2

+ + + + + + + +
FieldDescription
broker_idThe broker ID to unregister.
_tagged_fieldsThe tagged fields
+
+Responses:
+
UnregisterBroker Response (Version: 0) => throttle_time_ms error_code error_message _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + +
FieldDescription
throttle_time_msDuration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
error_messageThe top-level error message, or `null` if there was no top-level error.
_tagged_fieldsThe tagged fields
+
+
DescribeTransactions API (Key: 65):
+ +Requests:
+
DescribeTransactions Request (Version: 0) => [transactional_ids] _tagged_fields 
+  transactional_ids => COMPACT_STRING
+

Request header version: 2

+ + + + + + + +
FieldDescription
transactional_idsArray of transactionalIds to include in describe results. If empty, then no results will be returned.
_tagged_fieldsThe tagged fields
+
+Responses:
+
DescribeTransactions Response (Version: 0) => throttle_time_ms [transaction_states] _tagged_fields 
+  throttle_time_ms => INT32
+  transaction_states => error_code transactional_id transaction_state transaction_timeout_ms transaction_start_time_ms producer_id producer_epoch [topics] _tagged_fields 
+    error_code => INT16
+    transactional_id => COMPACT_STRING
+    transaction_state => COMPACT_STRING
+    transaction_timeout_ms => INT32
+    transaction_start_time_ms => INT64
+    producer_id => INT64
+    producer_epoch => INT16
+    topics => topic [partitions] _tagged_fields 
+      topic => COMPACT_STRING
+      partitions => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
transaction_statesThe current state of the transaction.
error_codeThe error code.
transactional_idThe transactional id.
transaction_stateThe current transaction state of the producer.
transaction_timeout_msThe timeout in milliseconds for the transaction.
transaction_start_time_msThe start time of the transaction in milliseconds.
producer_idThe current producer id associated with the transaction.
producer_epochThe current epoch associated with the producer id.
topicsThe set of partitions included in the current transaction (if active). When a transaction is preparing to commit or abort, this will include only partitions which do not have markers.
topicThe topic name.
partitionsThe partition ids included in the current transaction.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ListTransactions API (Key: 66):
+ +Requests:
+
ListTransactions Request (Version: 0) => [state_filters] [producer_id_filters] _tagged_fields 
+  state_filters => COMPACT_STRING
+  producer_id_filters => INT64
+

Request header version: 2

+ + + + + + + + + +
FieldDescription
state_filtersThe transaction states to filter by: if empty, all transactions are returned; if non-empty, then only transactions matching one of the filtered states will be returned.
producer_id_filtersThe producerIds to filter by: if empty, all transactions will be returned; if non-empty, only transactions which match one of the filtered producerIds will be returned.
_tagged_fieldsThe tagged fields
+
+
ListTransactions Request (Version: 1) => [state_filters] [producer_id_filters] duration_filter _tagged_fields 
+  state_filters => COMPACT_STRING
+  producer_id_filters => INT64
+  duration_filter => INT64
+

Request header version: 2

+ + + + + + + + + + + +
FieldDescription
state_filtersThe transaction states to filter by: if empty, all transactions are returned; if non-empty, then only transactions matching one of the filtered states will be returned.
producer_id_filtersThe producerIds to filter by: if empty, all transactions will be returned; if non-empty, only transactions which match one of the filtered producerIds will be returned.
duration_filterDuration (in millis) to filter by: if < 0, all transactions will be returned; otherwise, only transactions running longer than this duration will be returned.
_tagged_fieldsThe tagged fields
+
+
ListTransactions Request (Version: 2) => [state_filters] [producer_id_filters] duration_filter transactional_id_pattern _tagged_fields 
+  state_filters => COMPACT_STRING
+  producer_id_filters => INT64
+  duration_filter => INT64
+  transactional_id_pattern => COMPACT_NULLABLE_STRING
+

Request header version: 2

+ + + + + + + + + + + + + +
FieldDescription
state_filtersThe transaction states to filter by: if empty, all transactions are returned; if non-empty, then only transactions matching one of the filtered states will be returned.
producer_id_filtersThe producerIds to filter by: if empty, all transactions will be returned; if non-empty, only transactions which match one of the filtered producerIds will be returned.
duration_filterDuration (in millis) to filter by: if < 0, all transactions will be returned; otherwise, only transactions running longer than this duration will be returned.
transactional_id_patternThe transactional ID regular expression pattern to filter by: if it is empty or null, all transactions are returned; Otherwise then only the transactions matching the given regular expression will be returned.
_tagged_fieldsThe tagged fields
+
+Responses:
+
ListTransactions Response (Version: 0) => throttle_time_ms error_code [unknown_state_filters] [transaction_states] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  unknown_state_filters => COMPACT_STRING
+  transaction_states => transactional_id producer_id transaction_state _tagged_fields 
+    transactional_id => COMPACT_STRING
+    producer_id => INT64
+    transaction_state => COMPACT_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
unknown_state_filtersSet of state filters provided in the request which were unknown to the transaction coordinator.
transaction_statesThe current state of the transaction for the transactional id.
transactional_idThe transactional id.
producer_idThe producer id.
transaction_stateThe current transaction state of the producer.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ListTransactions Response (Version: 1) => throttle_time_ms error_code [unknown_state_filters] [transaction_states] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  unknown_state_filters => COMPACT_STRING
+  transaction_states => transactional_id producer_id transaction_state _tagged_fields 
+    transactional_id => COMPACT_STRING
+    producer_id => INT64
+    transaction_state => COMPACT_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
unknown_state_filtersSet of state filters provided in the request which were unknown to the transaction coordinator.
transaction_statesThe current state of the transaction for the transactional id.
transactional_idThe transactional id.
producer_idThe producer id.
transaction_stateThe current transaction state of the producer.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ListTransactions Response (Version: 2) => throttle_time_ms error_code [unknown_state_filters] [transaction_states] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  unknown_state_filters => COMPACT_STRING
+  transaction_states => transactional_id producer_id transaction_state _tagged_fields 
+    transactional_id => COMPACT_STRING
+    producer_id => INT64
+    transaction_state => COMPACT_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
unknown_state_filtersSet of state filters provided in the request which were unknown to the transaction coordinator.
transaction_statesThe current state of the transaction for the transactional id.
transactional_idThe transactional id.
producer_idThe producer id.
transaction_stateThe current transaction state of the producer.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ConsumerGroupHeartbeat API (Key: 68):
+ +Requests:
+
ConsumerGroupHeartbeat Request (Version: 0) => group_id member_id member_epoch instance_id rack_id rebalance_timeout_ms [subscribed_topic_names] server_assignor [topic_partitions] _tagged_fields 
+  group_id => COMPACT_STRING
+  member_id => COMPACT_STRING
+  member_epoch => INT32
+  instance_id => COMPACT_NULLABLE_STRING
+  rack_id => COMPACT_NULLABLE_STRING
+  rebalance_timeout_ms => INT32
+  subscribed_topic_names => COMPACT_STRING
+  server_assignor => COMPACT_NULLABLE_STRING
+  topic_partitions => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
member_idThe member id generated by the consumer. The member id must be kept during the entire lifetime of the consumer process.
member_epochThe current member epoch; 0 to join the group; -1 to leave the group; -2 to indicate that the static member will rejoin.
instance_idnull if not provided or if it didn't change since the last heartbeat; the instance Id otherwise.
rack_idnull if not provided or if it didn't change since the last heartbeat; the rack ID of consumer otherwise.
rebalance_timeout_ms-1 if it didn't change since the last heartbeat; the maximum time in milliseconds that the coordinator will wait on the member to revoke its partitions otherwise.
subscribed_topic_namesnull if it didn't change since the last heartbeat; the subscribed topic names otherwise.
server_assignornull if not used or if it didn't change since the last heartbeat; the server side assignor to use otherwise.
topic_partitionsnull if it didn't change since the last heartbeat; the partitions owned by the member.
topic_idThe topic ID.
partitionsThe partitions.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ConsumerGroupHeartbeat Request (Version: 1) => group_id member_id member_epoch instance_id rack_id rebalance_timeout_ms [subscribed_topic_names] subscribed_topic_regex server_assignor [topic_partitions] _tagged_fields 
+  group_id => COMPACT_STRING
+  member_id => COMPACT_STRING
+  member_epoch => INT32
+  instance_id => COMPACT_NULLABLE_STRING
+  rack_id => COMPACT_NULLABLE_STRING
+  rebalance_timeout_ms => INT32
+  subscribed_topic_names => COMPACT_STRING
+  subscribed_topic_regex => COMPACT_NULLABLE_STRING
+  server_assignor => COMPACT_NULLABLE_STRING
+  topic_partitions => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
member_idThe member id generated by the consumer. The member id must be kept during the entire lifetime of the consumer process.
member_epochThe current member epoch; 0 to join the group; -1 to leave the group; -2 to indicate that the static member will rejoin.
instance_idnull if not provided or if it didn't change since the last heartbeat; the instance Id otherwise.
rack_idnull if not provided or if it didn't change since the last heartbeat; the rack ID of consumer otherwise.
rebalance_timeout_ms-1 if it didn't change since the last heartbeat; the maximum time in milliseconds that the coordinator will wait on the member to revoke its partitions otherwise.
subscribed_topic_namesnull if it didn't change since the last heartbeat; the subscribed topic names otherwise.
subscribed_topic_regexnull if it didn't change since the last heartbeat; the subscribed topic regex otherwise.
server_assignornull if not used or if it didn't change since the last heartbeat; the server side assignor to use otherwise.
topic_partitionsnull if it didn't change since the last heartbeat; the partitions owned by the member.
topic_idThe topic ID.
partitionsThe partitions.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
ConsumerGroupHeartbeat Response (Version: 0) => throttle_time_ms error_code error_message member_id member_epoch heartbeat_interval_ms assignment _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  member_id => COMPACT_NULLABLE_STRING
+  member_epoch => INT32
+  heartbeat_interval_ms => INT32
+  assignment => [topic_partitions] _tagged_fields 
+    topic_partitions => topic_id [partitions] _tagged_fields 
+      topic_id => UUID
+      partitions => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top-level error code, or 0 if there was no error.
error_messageThe top-level error message, or null if there was no error.
member_idThe member id is generated by the consumer starting from version 1, while in version 0, it can be provided by users or generated by the group coordinator.
member_epochThe member epoch.
heartbeat_interval_msThe heartbeat interval in milliseconds.
assignmentnull if not provided; the assignment otherwise.
topic_partitionsThe partitions assigned to the member that can be used immediately.
topic_idThe topic ID.
partitionsThe partitions.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ConsumerGroupHeartbeat Response (Version: 1) => throttle_time_ms error_code error_message member_id member_epoch heartbeat_interval_ms assignment _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  member_id => COMPACT_NULLABLE_STRING
+  member_epoch => INT32
+  heartbeat_interval_ms => INT32
+  assignment => [topic_partitions] _tagged_fields 
+    topic_partitions => topic_id [partitions] _tagged_fields 
+      topic_id => UUID
+      partitions => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top-level error code, or 0 if there was no error.
error_messageThe top-level error message, or null if there was no error.
member_idThe member id is generated by the consumer starting from version 1, while in version 0, it can be provided by users or generated by the group coordinator.
member_epochThe member epoch.
heartbeat_interval_msThe heartbeat interval in milliseconds.
assignmentnull if not provided; the assignment otherwise.
topic_partitionsThe partitions assigned to the member that can be used immediately.
topic_idThe topic ID.
partitionsThe partitions.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ConsumerGroupDescribe API (Key: 69):
+ +Requests:
+
ConsumerGroupDescribe Request (Version: 0) => [group_ids] include_authorized_operations _tagged_fields 
+  group_ids => COMPACT_STRING
+  include_authorized_operations => BOOLEAN
+

Request header version: 2

+ + + + + + + + + +
FieldDescription
group_idsThe ids of the groups to describe.
include_authorized_operationsWhether to include authorized operations.
_tagged_fieldsThe tagged fields
+
+
ConsumerGroupDescribe Request (Version: 1) => [group_ids] include_authorized_operations _tagged_fields 
+  group_ids => COMPACT_STRING
+  include_authorized_operations => BOOLEAN
+

Request header version: 2

+ + + + + + + + + +
FieldDescription
group_idsThe ids of the groups to describe.
include_authorized_operationsWhether to include authorized operations.
_tagged_fieldsThe tagged fields
+
+Responses:
+
ConsumerGroupDescribe Response (Version: 0) => throttle_time_ms [groups] _tagged_fields 
+  throttle_time_ms => INT32
+  groups => error_code error_message group_id group_state group_epoch assignment_epoch assignor_name [members] authorized_operations _tagged_fields 
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+    group_id => COMPACT_STRING
+    group_state => COMPACT_STRING
+    group_epoch => INT32
+    assignment_epoch => INT32
+    assignor_name => COMPACT_STRING
+    members => member_id instance_id rack_id member_epoch client_id client_host [subscribed_topic_names] subscribed_topic_regex assignment target_assignment _tagged_fields 
+      member_id => COMPACT_STRING
+      instance_id => COMPACT_NULLABLE_STRING
+      rack_id => COMPACT_NULLABLE_STRING
+      member_epoch => INT32
+      client_id => COMPACT_STRING
+      client_host => COMPACT_STRING
+      subscribed_topic_names => COMPACT_STRING
+      subscribed_topic_regex => COMPACT_NULLABLE_STRING
+      assignment => [topic_partitions] _tagged_fields 
+        topic_partitions => topic_id topic_name [partitions] _tagged_fields 
+          topic_id => UUID
+          topic_name => COMPACT_STRING
+          partitions => INT32
+      target_assignment => [topic_partitions] _tagged_fields 
+        topic_partitions => topic_id topic_name [partitions] _tagged_fields 
+          topic_id => UUID
+          topic_name => COMPACT_STRING
+          partitions => INT32
+    authorized_operations => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
groupsEach described group.
error_codeThe describe error, or 0 if there was no error.
error_messageThe top-level error message, or null if there was no error.
group_idThe group ID string.
group_stateThe group state string, or the empty string.
group_epochThe group epoch.
assignment_epochThe assignment epoch.
assignor_nameThe selected assignor.
membersThe members.
member_idThe member ID.
instance_idThe member instance ID.
rack_idThe member rack ID.
member_epochThe current member epoch.
client_idThe client ID.
client_hostThe client host.
subscribed_topic_namesThe subscribed topic names.
subscribed_topic_regexthe subscribed topic regex otherwise or null of not provided.
assignmentThe current assignment.
topic_partitionsThe assigned topic-partitions to the member.
topic_idThe topic ID.
topic_nameThe topic name.
partitionsThe partitions.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
target_assignmentThe target assignment.
_tagged_fieldsThe tagged fields
authorized_operations32-bit bitfield to represent authorized operations for this group.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ConsumerGroupDescribe Response (Version: 1) => throttle_time_ms [groups] _tagged_fields 
+  throttle_time_ms => INT32
+  groups => error_code error_message group_id group_state group_epoch assignment_epoch assignor_name [members] authorized_operations _tagged_fields 
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+    group_id => COMPACT_STRING
+    group_state => COMPACT_STRING
+    group_epoch => INT32
+    assignment_epoch => INT32
+    assignor_name => COMPACT_STRING
+    members => member_id instance_id rack_id member_epoch client_id client_host [subscribed_topic_names] subscribed_topic_regex assignment target_assignment member_type _tagged_fields 
+      member_id => COMPACT_STRING
+      instance_id => COMPACT_NULLABLE_STRING
+      rack_id => COMPACT_NULLABLE_STRING
+      member_epoch => INT32
+      client_id => COMPACT_STRING
+      client_host => COMPACT_STRING
+      subscribed_topic_names => COMPACT_STRING
+      subscribed_topic_regex => COMPACT_NULLABLE_STRING
+      assignment => [topic_partitions] _tagged_fields 
+        topic_partitions => topic_id topic_name [partitions] _tagged_fields 
+          topic_id => UUID
+          topic_name => COMPACT_STRING
+          partitions => INT32
+      target_assignment => [topic_partitions] _tagged_fields 
+        topic_partitions => topic_id topic_name [partitions] _tagged_fields 
+          topic_id => UUID
+          topic_name => COMPACT_STRING
+          partitions => INT32
+      member_type => INT8
+    authorized_operations => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
groupsEach described group.
error_codeThe describe error, or 0 if there was no error.
error_messageThe top-level error message, or null if there was no error.
group_idThe group ID string.
group_stateThe group state string, or the empty string.
group_epochThe group epoch.
assignment_epochThe assignment epoch.
assignor_nameThe selected assignor.
membersThe members.
member_idThe member ID.
instance_idThe member instance ID.
rack_idThe member rack ID.
member_epochThe current member epoch.
client_idThe client ID.
client_hostThe client host.
subscribed_topic_namesThe subscribed topic names.
subscribed_topic_regexthe subscribed topic regex otherwise or null of not provided.
assignmentThe current assignment.
topic_partitionsThe assigned topic-partitions to the member.
topic_idThe topic ID.
topic_nameThe topic name.
partitionsThe partitions.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
target_assignmentThe target assignment.
member_type-1 for unknown. 0 for classic member. +1 for consumer member.
_tagged_fieldsThe tagged fields
authorized_operations32-bit bitfield to represent authorized operations for this group.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
GetTelemetrySubscriptions API (Key: 71):
+ +Requests:
+
GetTelemetrySubscriptions Request (Version: 0) => client_instance_id _tagged_fields 
+  client_instance_id => UUID
+

Request header version: 2

+ + + + + + + +
FieldDescription
client_instance_idUnique id for this client instance, must be set to 0 on the first request.
_tagged_fieldsThe tagged fields
+
+Responses:
+
GetTelemetrySubscriptions Response (Version: 0) => throttle_time_ms error_code client_instance_id subscription_id [accepted_compression_types] push_interval_ms telemetry_max_bytes delta_temporality [requested_metrics] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  client_instance_id => UUID
+  subscription_id => INT32
+  accepted_compression_types => INT8
+  push_interval_ms => INT32
+  telemetry_max_bytes => INT32
+  delta_temporality => BOOLEAN
+  requested_metrics => COMPACT_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
client_instance_idAssigned client instance id if ClientInstanceId was 0 in the request, else 0.
subscription_idUnique identifier for the current subscription set for this client instance.
accepted_compression_typesCompression types that broker accepts for the PushTelemetryRequest.
push_interval_msConfigured push interval, which is the lowest configured interval in the current subscription set.
telemetry_max_bytesThe maximum bytes of binary data the broker accepts in PushTelemetryRequest.
delta_temporalityFlag to indicate monotonic/counter metrics are to be emitted as deltas or cumulative values.
requested_metricsRequested metrics prefix string match. Empty array: No metrics subscribed, Array[0] empty string: All metrics subscribed.
_tagged_fieldsThe tagged fields
+
+
PushTelemetry API (Key: 72):
+ +Requests:
+
PushTelemetry Request (Version: 0) => client_instance_id subscription_id terminating compression_type metrics _tagged_fields 
+  client_instance_id => UUID
+  subscription_id => INT32
+  terminating => BOOLEAN
+  compression_type => INT8
+  metrics => COMPACT_BYTES
+

Request header version: 2

+ + + + + + + + + + + + + + + +
FieldDescription
client_instance_idUnique id for this client instance.
subscription_idUnique identifier for the current subscription.
terminatingClient is terminating the connection.
compression_typeCompression codec used to compress the metrics.
metricsMetrics encoded in OpenTelemetry MetricsData v1 protobuf format.
_tagged_fieldsThe tagged fields
+
+Responses:
+
PushTelemetry Response (Version: 0) => throttle_time_ms error_code _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+

Response header version: 1

+ + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
_tagged_fieldsThe tagged fields
+
+
ListConfigResources API (Key: 74):
+ +Requests:
+
ListConfigResources Request (Version: 0) => _tagged_fields 
+

Request header version: 2

+ + + + + +
FieldDescription
_tagged_fieldsThe tagged fields
+
+
ListConfigResources Request (Version: 1) => [resource_types] _tagged_fields 
+  resource_types => INT8
+

Request header version: 2

+ + + + + + + +
FieldDescription
resource_typesThe list of resource type. If the list is empty, it uses default supported config resource types.
_tagged_fieldsThe tagged fields
+
+Responses:
+
ListConfigResources Response (Version: 0) => throttle_time_ms error_code [config_resources] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  config_resources => resource_name _tagged_fields 
+    resource_name => COMPACT_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
config_resourcesEach config resource in the response.
resource_nameThe resource name.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ListConfigResources Response (Version: 1) => throttle_time_ms error_code [config_resources] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  config_resources => resource_name resource_type _tagged_fields 
+    resource_name => COMPACT_STRING
+    resource_type => INT8
+

Response header version: 1

+ + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
config_resourcesEach config resource in the response.
resource_nameThe resource name.
resource_typeThe resource type.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DescribeTopicPartitions API (Key: 75):
+ +Requests:
+
DescribeTopicPartitions Request (Version: 0) => [topics] response_partition_limit cursor _tagged_fields 
+  topics => name _tagged_fields 
+    name => COMPACT_STRING
+  response_partition_limit => INT32
+  cursor => topic_name partition_index _tagged_fields 
+    topic_name => COMPACT_STRING
+    partition_index => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
topicsThe topics to fetch details for.
nameThe topic name.
_tagged_fieldsThe tagged fields
response_partition_limitThe maximum number of partitions included in the response.
cursorThe first topic and partition index to fetch details for.
topic_nameThe name for the first topic to process.
partition_indexThe partition index to start with.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
DescribeTopicPartitions Response (Version: 0) => throttle_time_ms [topics] next_cursor _tagged_fields 
+  throttle_time_ms => INT32
+  topics => error_code name topic_id is_internal [partitions] topic_authorized_operations _tagged_fields 
+    error_code => INT16
+    name => COMPACT_NULLABLE_STRING
+    topic_id => UUID
+    is_internal => BOOLEAN
+    partitions => error_code partition_index leader_id leader_epoch [replica_nodes] [isr_nodes] [eligible_leader_replicas] [last_known_elr] [offline_replicas] _tagged_fields 
+      error_code => INT16
+      partition_index => INT32
+      leader_id => INT32
+      leader_epoch => INT32
+      replica_nodes => INT32
+      isr_nodes => INT32
+      eligible_leader_replicas => INT32
+      last_known_elr => INT32
+      offline_replicas => INT32
+    topic_authorized_operations => INT32
+  next_cursor => topic_name partition_index _tagged_fields 
+    topic_name => COMPACT_STRING
+    partition_index => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
topicsEach topic in the response.
error_codeThe topic error, or 0 if there was no error.
nameThe topic name.
topic_idThe topic id.
is_internalTrue if the topic is internal.
partitionsEach partition in the topic.
error_codeThe partition error, or 0 if there was no error.
partition_indexThe partition index.
leader_idThe ID of the leader broker.
leader_epochThe leader epoch of this partition.
replica_nodesThe set of all nodes that host this partition.
isr_nodesThe set of nodes that are in sync with the leader for this partition.
eligible_leader_replicasThe new eligible leader replicas otherwise.
last_known_elrThe last known ELR.
offline_replicasThe set of offline replicas of this partition.
_tagged_fieldsThe tagged fields
topic_authorized_operations32-bit bitfield to represent authorized operations for this topic.
_tagged_fieldsThe tagged fields
next_cursorThe next topic and partition index to fetch details for.
topic_nameThe name for the first topic to process.
partition_indexThe partition index to start with.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ShareGroupHeartbeat API (Key: 76):
+ +Requests:
+
ShareGroupHeartbeat Request (Version: 1) => group_id member_id member_epoch rack_id [subscribed_topic_names] _tagged_fields 
+  group_id => COMPACT_STRING
+  member_id => COMPACT_STRING
+  member_epoch => INT32
+  rack_id => COMPACT_NULLABLE_STRING
+  subscribed_topic_names => COMPACT_STRING
+

Request header version: 2

+ + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
member_idThe member id generated by the consumer. The member id must be kept during the entire lifetime of the consumer process.
member_epochThe current member epoch; 0 to join the group; -1 to leave the group.
rack_idnull if not provided or if it didn't change since the last heartbeat; the rack ID of consumer otherwise.
subscribed_topic_namesnull if it didn't change since the last heartbeat; the subscribed topic names otherwise.
_tagged_fieldsThe tagged fields
+
+Responses:
+
ShareGroupHeartbeat Response (Version: 1) => throttle_time_ms error_code error_message member_id member_epoch heartbeat_interval_ms assignment _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  member_id => COMPACT_NULLABLE_STRING
+  member_epoch => INT32
+  heartbeat_interval_ms => INT32
+  assignment => [topic_partitions] _tagged_fields 
+    topic_partitions => topic_id [partitions] _tagged_fields 
+      topic_id => UUID
+      partitions => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top-level error code, or 0 if there was no error.
error_messageThe top-level error message, or null if there was no error.
member_idThe member ID is generated by the consumer and provided by the consumer for all requests.
member_epochThe member epoch.
heartbeat_interval_msThe heartbeat interval in milliseconds.
assignmentnull if not provided; the assignment otherwise.
topic_partitionsThe partitions assigned to the member.
topic_idThe topic ID.
partitionsThe partitions.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ShareGroupDescribe API (Key: 77):
+ +Requests:
+
ShareGroupDescribe Request (Version: 1) => [group_ids] include_authorized_operations _tagged_fields 
+  group_ids => COMPACT_STRING
+  include_authorized_operations => BOOLEAN
+

Request header version: 2

+ + + + + + + + + +
FieldDescription
group_idsThe ids of the groups to describe.
include_authorized_operationsWhether to include authorized operations.
_tagged_fieldsThe tagged fields
+
+Responses:
+
ShareGroupDescribe Response (Version: 1) => throttle_time_ms [groups] _tagged_fields 
+  throttle_time_ms => INT32
+  groups => error_code error_message group_id group_state group_epoch assignment_epoch assignor_name [members] authorized_operations _tagged_fields 
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+    group_id => COMPACT_STRING
+    group_state => COMPACT_STRING
+    group_epoch => INT32
+    assignment_epoch => INT32
+    assignor_name => COMPACT_STRING
+    members => member_id rack_id member_epoch client_id client_host [subscribed_topic_names] assignment _tagged_fields 
+      member_id => COMPACT_STRING
+      rack_id => COMPACT_NULLABLE_STRING
+      member_epoch => INT32
+      client_id => COMPACT_STRING
+      client_host => COMPACT_STRING
+      subscribed_topic_names => COMPACT_STRING
+      assignment => [topic_partitions] _tagged_fields 
+        topic_partitions => topic_id topic_name [partitions] _tagged_fields 
+          topic_id => UUID
+          topic_name => COMPACT_STRING
+          partitions => INT32
+    authorized_operations => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
groupsEach described group.
error_codeThe describe error, or 0 if there was no error.
error_messageThe top-level error message, or null if there was no error.
group_idThe group ID string.
group_stateThe group state string, or the empty string.
group_epochThe group epoch.
assignment_epochThe assignment epoch.
assignor_nameThe selected assignor.
membersThe members.
member_idThe member ID.
rack_idThe member rack ID.
member_epochThe current member epoch.
client_idThe client ID.
client_hostThe client host.
subscribed_topic_namesThe subscribed topic names.
assignmentThe current assignment.
topic_partitionsThe assigned topic-partitions to the member.
topic_idThe topic ID.
topic_nameThe topic name.
partitionsThe partitions.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
authorized_operations32-bit bitfield to represent authorized operations for this group.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ShareFetch API (Key: 78):
+ +Requests:
+
ShareFetch Request (Version: 1) => group_id member_id share_session_epoch max_wait_ms min_bytes max_bytes max_records batch_size [topics] [forgotten_topics_data] _tagged_fields 
+  group_id => COMPACT_NULLABLE_STRING
+  member_id => COMPACT_NULLABLE_STRING
+  share_session_epoch => INT32
+  max_wait_ms => INT32
+  min_bytes => INT32
+  max_bytes => INT32
+  max_records => INT32
+  batch_size => INT32
+  topics => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition_index [acknowledgement_batches] _tagged_fields 
+      partition_index => INT32
+      acknowledgement_batches => first_offset last_offset [acknowledge_types] _tagged_fields 
+        first_offset => INT64
+        last_offset => INT64
+        acknowledge_types => INT8
+  forgotten_topics_data => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
member_idThe member ID.
share_session_epochThe current share session epoch: 0 to open a share session; -1 to close it; otherwise increments for consecutive requests.
max_wait_msThe maximum time in milliseconds to wait for the response.
min_bytesThe minimum bytes to accumulate in the response.
max_bytesThe maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored.
max_recordsThe maximum number of records to fetch. This limit can be exceeded for alignment of batch boundaries.
batch_sizeThe optimal number of records for batches of acquired records and acknowledgements.
topicsThe topics to fetch.
topic_idThe unique topic ID.
partitionsThe partitions to fetch.
partition_indexThe partition index.
acknowledgement_batchesRecord batches to acknowledge.
first_offsetFirst offset of batch of records to acknowledge.
last_offsetLast offset (inclusive) of batch of records to acknowledge.
acknowledge_typesArray of acknowledge types - 0:Gap,1:Accept,2:Release,3:Reject.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
forgotten_topics_dataThe partitions to remove from this share session.
topic_idThe unique topic ID.
partitionsThe partitions indexes to forget.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
ShareFetch Response (Version: 1) => throttle_time_ms error_code error_message acquisition_lock_timeout_ms [responses] [node_endpoints] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  acquisition_lock_timeout_ms => INT32
+  responses => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition_index error_code error_message acknowledge_error_code acknowledge_error_message current_leader records [acquired_records] _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+      error_message => COMPACT_NULLABLE_STRING
+      acknowledge_error_code => INT16
+      acknowledge_error_message => COMPACT_NULLABLE_STRING
+      current_leader => leader_id leader_epoch _tagged_fields 
+        leader_id => INT32
+        leader_epoch => INT32
+      records => COMPACT_RECORDS
+      acquired_records => first_offset last_offset delivery_count _tagged_fields 
+        first_offset => INT64
+        last_offset => INT64
+        delivery_count => INT16
+  node_endpoints => node_id host port rack _tagged_fields 
+    node_id => INT32
+    host => COMPACT_STRING
+    port => INT32
+    rack => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top-level response error code.
error_messageThe top-level error message, or null if there was no error.
acquisition_lock_timeout_msThe time in milliseconds for which the acquired records are locked.
responsesThe response topics.
topic_idThe unique topic ID.
partitionsThe topic partitions.
partition_indexThe partition index.
error_codeThe fetch error code, or 0 if there was no fetch error.
error_messageThe fetch error message, or null if there was no fetch error.
acknowledge_error_codeThe acknowledge error code, or 0 if there was no acknowledge error.
acknowledge_error_messageThe acknowledge error message, or null if there was no acknowledge error.
current_leaderThe current leader of the partition.
leader_idThe ID of the current leader or -1 if the leader is unknown.
leader_epochThe latest known leader epoch.
_tagged_fieldsThe tagged fields
recordsThe record data.
acquired_recordsThe acquired records.
first_offsetThe earliest offset in this batch of acquired records.
last_offsetThe last offset of this batch of acquired records.
delivery_countThe delivery count of this batch of acquired records.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
node_endpointsEndpoints for all current leaders enumerated in PartitionData with error NOT_LEADER_OR_FOLLOWER.
node_idThe ID of the associated node.
hostThe node's hostname.
portThe node's port.
rackThe rack of the node, or null if it has not been assigned to a rack.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ShareAcknowledge API (Key: 79):
+ +Requests:
+
ShareAcknowledge Request (Version: 1) => group_id member_id share_session_epoch [topics] _tagged_fields 
+  group_id => COMPACT_NULLABLE_STRING
+  member_id => COMPACT_NULLABLE_STRING
+  share_session_epoch => INT32
+  topics => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition_index [acknowledgement_batches] _tagged_fields 
+      partition_index => INT32
+      acknowledgement_batches => first_offset last_offset [acknowledge_types] _tagged_fields 
+        first_offset => INT64
+        last_offset => INT64
+        acknowledge_types => INT8
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
member_idThe member ID.
share_session_epochThe current share session epoch: 0 to open a share session; -1 to close it; otherwise increments for consecutive requests.
topicsThe topics containing records to acknowledge.
topic_idThe unique topic ID.
partitionsThe partitions containing records to acknowledge.
partition_indexThe partition index.
acknowledgement_batchesRecord batches to acknowledge.
first_offsetFirst offset of batch of records to acknowledge.
last_offsetLast offset (inclusive) of batch of records to acknowledge.
acknowledge_typesArray of acknowledge types - 0:Gap,1:Accept,2:Release,3:Reject.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
ShareAcknowledge Response (Version: 1) => throttle_time_ms error_code error_message [responses] [node_endpoints] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  responses => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition_index error_code error_message current_leader _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+      error_message => COMPACT_NULLABLE_STRING
+      current_leader => leader_id leader_epoch _tagged_fields 
+        leader_id => INT32
+        leader_epoch => INT32
+  node_endpoints => node_id host port rack _tagged_fields 
+    node_id => INT32
+    host => COMPACT_STRING
+    port => INT32
+    rack => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top level response error code.
error_messageThe top-level error message, or null if there was no error.
responsesThe response topics.
topic_idThe unique topic ID.
partitionsThe topic partitions.
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
current_leaderThe current leader of the partition.
leader_idThe ID of the current leader or -1 if the leader is unknown.
leader_epochThe latest known leader epoch.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
node_endpointsEndpoints for all current leaders enumerated in PartitionData with error NOT_LEADER_OR_FOLLOWER.
node_idThe ID of the associated node.
hostThe node's hostname.
portThe node's port.
rackThe rack of the node, or null if it has not been assigned to a rack.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
AddRaftVoter API (Key: 80):
+ +Requests:
+
AddRaftVoter Request (Version: 0) => cluster_id timeout_ms voter_id voter_directory_id [listeners] _tagged_fields 
+  cluster_id => COMPACT_NULLABLE_STRING
+  timeout_ms => INT32
+  voter_id => INT32
+  voter_directory_id => UUID
+  listeners => name host port _tagged_fields 
+    name => COMPACT_STRING
+    host => COMPACT_STRING
+    port => UINT16
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
cluster_idThe cluster id.
timeout_msThe maximum time to wait for the request to complete before returning.
voter_idThe replica id of the voter getting added to the topic partition.
voter_directory_idThe directory id of the voter getting added to the topic partition.
listenersThe endpoints that can be used to communicate with the voter.
nameThe name of the endpoint.
hostThe hostname.
portThe port.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
AddRaftVoter Response (Version: 0) => throttle_time_ms error_code error_message _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
_tagged_fieldsThe tagged fields
+
+
RemoveRaftVoter API (Key: 81):
+ +Requests:
+
RemoveRaftVoter Request (Version: 0) => cluster_id voter_id voter_directory_id _tagged_fields 
+  cluster_id => COMPACT_NULLABLE_STRING
+  voter_id => INT32
+  voter_directory_id => UUID
+

Request header version: 2

+ + + + + + + + + + + +
FieldDescription
cluster_idThe cluster id of the request.
voter_idThe replica id of the voter getting removed from the topic partition.
voter_directory_idThe directory id of the voter getting removed from the topic partition.
_tagged_fieldsThe tagged fields
+
+Responses:
+
RemoveRaftVoter Response (Version: 0) => throttle_time_ms error_code error_message _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
_tagged_fieldsThe tagged fields
+
+
InitializeShareGroupState API (Key: 83):
+ +Requests:
+
InitializeShareGroupState Request (Version: 0) => group_id [topics] _tagged_fields 
+  group_id => COMPACT_STRING
+  topics => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition state_epoch start_offset _tagged_fields 
+      partition => INT32
+      state_epoch => INT32
+      start_offset => INT64
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
topicsThe data for the topics.
topic_idThe topic identifier.
partitionsThe data for the partitions.
partitionThe partition index.
state_epochThe state epoch for this share-partition.
start_offsetThe share-partition start offset, or -1 if the start offset is not being initialized.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
InitializeShareGroupState Response (Version: 0) => [results] _tagged_fields 
+  results => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition error_code error_message _tagged_fields 
+      partition => INT32
+      error_code => INT16
+      error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
resultsThe initialization results.
topic_idThe topic identifier.
partitionsThe results for the partitions.
partitionThe partition index.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ReadShareGroupState API (Key: 84):
+ +Requests:
+
ReadShareGroupState Request (Version: 0) => group_id [topics] _tagged_fields 
+  group_id => COMPACT_STRING
+  topics => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition leader_epoch _tagged_fields 
+      partition => INT32
+      leader_epoch => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
topicsThe data for the topics.
topic_idThe topic identifier.
partitionsThe data for the partitions.
partitionThe partition index.
leader_epochThe leader epoch of the share-partition.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
ReadShareGroupState Response (Version: 0) => [results] _tagged_fields 
+  results => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition error_code error_message state_epoch start_offset [state_batches] _tagged_fields 
+      partition => INT32
+      error_code => INT16
+      error_message => COMPACT_NULLABLE_STRING
+      state_epoch => INT32
+      start_offset => INT64
+      state_batches => first_offset last_offset delivery_state delivery_count _tagged_fields 
+        first_offset => INT64
+        last_offset => INT64
+        delivery_state => INT8
+        delivery_count => INT16
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
resultsThe read results.
topic_idThe topic identifier.
partitionsThe results for the partitions.
partitionThe partition index.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
state_epochThe state epoch of the share-partition.
start_offsetThe share-partition start offset, which can be -1 if it is not yet initialized.
state_batchesThe state batches for this share-partition.
first_offsetThe first offset of this state batch.
last_offsetThe last offset of this state batch.
delivery_stateThe delivery state - 0:Available,2:Acked,4:Archived.
delivery_countThe delivery count.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
WriteShareGroupState API (Key: 85):
+ +Requests:
+
WriteShareGroupState Request (Version: 0) => group_id [topics] _tagged_fields 
+  group_id => COMPACT_STRING
+  topics => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition state_epoch leader_epoch start_offset [state_batches] _tagged_fields 
+      partition => INT32
+      state_epoch => INT32
+      leader_epoch => INT32
+      start_offset => INT64
+      state_batches => first_offset last_offset delivery_state delivery_count _tagged_fields 
+        first_offset => INT64
+        last_offset => INT64
+        delivery_state => INT8
+        delivery_count => INT16
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
topicsThe data for the topics.
topic_idThe topic identifier.
partitionsThe data for the partitions.
partitionThe partition index.
state_epochThe state epoch of the share-partition.
leader_epochThe leader epoch of the share-partition.
start_offsetThe share-partition start offset, or -1 if the start offset is not being written.
state_batchesThe state batches for the share-partition.
first_offsetThe first offset of this state batch.
last_offsetThe last offset of this state batch.
delivery_stateThe delivery state - 0:Available,2:Acked,4:Archived.
delivery_countThe delivery count.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
WriteShareGroupState Response (Version: 0) => [results] _tagged_fields 
+  results => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition error_code error_message _tagged_fields 
+      partition => INT32
+      error_code => INT16
+      error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
resultsThe write results.
topic_idThe topic identifier.
partitionsThe results for the partitions.
partitionThe partition index.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DeleteShareGroupState API (Key: 86):
+ +Requests:
+
DeleteShareGroupState Request (Version: 0) => group_id [topics] _tagged_fields 
+  group_id => COMPACT_STRING
+  topics => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition _tagged_fields 
+      partition => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
topicsThe data for the topics.
topic_idThe topic identifier.
partitionsThe data for the partitions.
partitionThe partition index.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
DeleteShareGroupState Response (Version: 0) => [results] _tagged_fields 
+  results => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition error_code error_message _tagged_fields 
+      partition => INT32
+      error_code => INT16
+      error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
resultsThe delete results.
topic_idThe topic identifier.
partitionsThe results for the partitions.
partitionThe partition index.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
ReadShareGroupStateSummary API (Key: 87):
+ +Requests:
+
ReadShareGroupStateSummary Request (Version: 0) => group_id [topics] _tagged_fields 
+  group_id => COMPACT_STRING
+  topics => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition leader_epoch _tagged_fields 
+      partition => INT32
+      leader_epoch => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
topicsThe data for the topics.
topic_idThe topic identifier.
partitionsThe data for the partitions.
partitionThe partition index.
leader_epochThe leader epoch of the share-partition.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
ReadShareGroupStateSummary Response (Version: 0) => [results] _tagged_fields 
+  results => topic_id [partitions] _tagged_fields 
+    topic_id => UUID
+    partitions => partition error_code error_message state_epoch leader_epoch start_offset _tagged_fields 
+      partition => INT32
+      error_code => INT16
+      error_message => COMPACT_NULLABLE_STRING
+      state_epoch => INT32
+      leader_epoch => INT32
+      start_offset => INT64
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
resultsThe read results.
topic_idThe topic identifier.
partitionsThe results for the partitions.
partitionThe partition index.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
state_epochThe state epoch of the share-partition.
leader_epochThe leader epoch of the share-partition.
start_offsetThe share-partition start offset.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
StreamsGroupHeartbeat API (Key: 88):
+ +Requests:
+
StreamsGroupHeartbeat Request (Version: 0) => group_id member_id member_epoch endpoint_information_epoch instance_id rack_id rebalance_timeout_ms topology [active_tasks] [standby_tasks] [warmup_tasks] process_id user_endpoint [client_tags] [task_offsets] [task_end_offsets] shutdown_application _tagged_fields 
+  group_id => COMPACT_STRING
+  member_id => COMPACT_STRING
+  member_epoch => INT32
+  endpoint_information_epoch => INT32
+  instance_id => COMPACT_NULLABLE_STRING
+  rack_id => COMPACT_NULLABLE_STRING
+  rebalance_timeout_ms => INT32
+  topology => epoch [subtopologies] _tagged_fields 
+    epoch => INT32
+    subtopologies => subtopology_id [source_topics] [source_topic_regex] [state_changelog_topics] [repartition_sink_topics] [repartition_source_topics] [copartition_groups] _tagged_fields 
+      subtopology_id => COMPACT_STRING
+      source_topics => COMPACT_STRING
+      source_topic_regex => COMPACT_STRING
+      state_changelog_topics => name partitions replication_factor [topic_configs] _tagged_fields 
+        name => COMPACT_STRING
+        partitions => INT32
+        replication_factor => INT16
+        topic_configs => key value _tagged_fields 
+          key => COMPACT_STRING
+          value => COMPACT_STRING
+      repartition_sink_topics => COMPACT_STRING
+      repartition_source_topics => name partitions replication_factor [topic_configs] _tagged_fields 
+        name => COMPACT_STRING
+        partitions => INT32
+        replication_factor => INT16
+        topic_configs => key value _tagged_fields 
+          key => COMPACT_STRING
+          value => COMPACT_STRING
+      copartition_groups => [source_topics] [source_topic_regex] [repartition_source_topics] _tagged_fields 
+        source_topics => INT16
+        source_topic_regex => INT16
+        repartition_source_topics => INT16
+  active_tasks => subtopology_id [partitions] _tagged_fields 
+    subtopology_id => COMPACT_STRING
+    partitions => INT32
+  standby_tasks => subtopology_id [partitions] _tagged_fields 
+    subtopology_id => COMPACT_STRING
+    partitions => INT32
+  warmup_tasks => subtopology_id [partitions] _tagged_fields 
+    subtopology_id => COMPACT_STRING
+    partitions => INT32
+  process_id => COMPACT_NULLABLE_STRING
+  user_endpoint => host port _tagged_fields 
+    host => COMPACT_STRING
+    port => UINT16
+  client_tags => key value _tagged_fields 
+    key => COMPACT_STRING
+    value => COMPACT_STRING
+  task_offsets => subtopology_id partition offset _tagged_fields 
+    subtopology_id => COMPACT_STRING
+    partition => INT32
+    offset => INT64
+  task_end_offsets => subtopology_id partition offset _tagged_fields 
+    subtopology_id => COMPACT_STRING
+    partition => INT32
+    offset => INT64
+  shutdown_application => BOOLEAN
+

This version of the request is unstable.

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
member_idThe member ID generated by the streams consumer. The member ID must be kept during the entire lifetime of the streams consumer process.
member_epochThe current member epoch; 0 to join the group; -1 to leave the group; -2 to indicate that the static member will rejoin.
endpoint_information_epochThe current endpoint epoch of this client, represents the latest endpoint epoch this client received
instance_idnull if not provided or if it didn't change since the last heartbeat; the instance ID for static membership otherwise.
rack_idnull if not provided or if it didn't change since the last heartbeat; the rack ID of the member otherwise.
rebalance_timeout_ms-1 if it didn't change since the last heartbeat; the maximum time in milliseconds that the coordinator will wait on the member to revoke its tasks otherwise.
topologyThe topology metadata of the streams application. Used to initialize the topology of the group and to check if the topology corresponds to the topology initialized for the group. Only sent when memberEpoch = 0, must be non-empty. Null otherwise.
epochThe epoch of the topology. Used to check if the topology corresponds to the topology initialized on the brokers.
subtopologiesThe sub-topologies of the streams application.
subtopology_idString to uniquely identify the subtopology. Deterministically generated from the topology
source_topicsThe topics the topology reads from.
source_topic_regexThe regular expressions identifying topics the subtopology reads from.
state_changelog_topicsThe set of state changelog topics associated with this subtopology. Created automatically.
nameThe name of the topic.
partitionsThe number of partitions in the topic. Can be 0 if no specific number of partitions is enforced. Always 0 for changelog topics.
replication_factorThe replication factor of the topic. Can be 0 if the default replication factor should be used.
topic_configsTopic-level configurations as key-value pairs.
keykey of the config
valuevalue of the config
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
repartition_sink_topicsThe repartition topics the subtopology writes to.
repartition_source_topicsThe set of source topics that are internally created repartition topics. Created automatically.
copartition_groupsA subset of source topics that must be copartitioned.
source_topicsThe topics the topology reads from. Index into the array on the subtopology level.
source_topic_regexRegular expressions identifying topics the subtopology reads from. Index into the array on the subtopology level.
repartition_source_topicsThe set of source topics that are internally created repartition topics. Index into the array on the subtopology level.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
active_tasksCurrently owned active tasks for this client. Null if unchanged since last heartbeat.
subtopology_idThe subtopology identifier.
partitionsThe partitions of the input topics processed by this member.
_tagged_fieldsThe tagged fields
standby_tasksCurrently owned standby tasks for this client. Null if unchanged since last heartbeat.
warmup_tasksCurrently owned warm-up tasks for this client. Null if unchanged since last heartbeat.
process_idIdentity of the streams instance that may have multiple consumers. Null if unchanged since last heartbeat.
user_endpointUser-defined endpoint for Interactive Queries. Null if unchanged since last heartbeat, or if not defined on the client.
hosthost of the endpoint
portport of the endpoint
_tagged_fieldsThe tagged fields
client_tagsUsed for rack-aware assignment algorithm. Null if unchanged since last heartbeat.
task_offsetsCumulative changelog offsets for tasks. Only updated when a warm-up task has caught up, and according to the task offset interval. Null if unchanged since last heartbeat.
subtopology_idThe subtopology identifier.
partitionThe partition.
offsetThe offset.
_tagged_fieldsThe tagged fields
task_end_offsetsCumulative changelog end-offsets for tasks. Only updated when a warm-up task has caught up, and according to the task offset interval. Null if unchanged since last heartbeat.
shutdown_applicationWhether all Streams clients in the group should shut down.
_tagged_fieldsThe tagged fields
+
+Responses:
+
StreamsGroupHeartbeat Response (Version: 0) => throttle_time_ms error_code error_message member_id member_epoch heartbeat_interval_ms acceptable_recovery_lag task_offset_interval_ms [status] [active_tasks] [standby_tasks] [warmup_tasks] endpoint_information_epoch [partitions_by_user_endpoint] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  member_id => COMPACT_STRING
+  member_epoch => INT32
+  heartbeat_interval_ms => INT32
+  acceptable_recovery_lag => INT32
+  task_offset_interval_ms => INT32
+  status => status_code status_detail _tagged_fields 
+    status_code => INT8
+    status_detail => COMPACT_STRING
+  active_tasks => subtopology_id [partitions] _tagged_fields 
+    subtopology_id => COMPACT_STRING
+    partitions => INT32
+  standby_tasks => subtopology_id [partitions] _tagged_fields 
+    subtopology_id => COMPACT_STRING
+    partitions => INT32
+  warmup_tasks => subtopology_id [partitions] _tagged_fields 
+    subtopology_id => COMPACT_STRING
+    partitions => INT32
+  endpoint_information_epoch => INT32
+  partitions_by_user_endpoint => user_endpoint [active_partitions] [standby_partitions] _tagged_fields 
+    user_endpoint => host port _tagged_fields 
+      host => COMPACT_STRING
+      port => UINT16
+    active_partitions => topic [partitions] _tagged_fields 
+      topic => COMPACT_STRING
+      partitions => INT32
+    standby_partitions => topic [partitions] _tagged_fields 
+      topic => COMPACT_STRING
+      partitions => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top-level error code, or 0 if there was no error
error_messageThe top-level error message, or null if there was no error.
member_idThe member id is always generated by the streams consumer.
member_epochThe member epoch.
heartbeat_interval_msThe heartbeat interval in milliseconds.
acceptable_recovery_lagThe maximal lag a warm-up task can have to be considered caught-up.
task_offset_interval_msThe interval in which the task changelog offsets on a client are updated on the broker. The offsets are sent with the next heartbeat after this time has passed.
statusIndicate zero or more status for the group. Null if unchanged since last heartbeat.
status_codeA code to indicate that a particular status is active for the group membership
status_detailA string representation of the status.
_tagged_fieldsThe tagged fields
active_tasksAssigned active tasks for this client. Null if unchanged since last heartbeat.
subtopology_idThe subtopology identifier.
partitionsThe partitions of the input topics processed by this member.
_tagged_fieldsThe tagged fields
standby_tasksAssigned standby tasks for this client. Null if unchanged since last heartbeat.
warmup_tasksAssigned warm-up tasks for this client. Null if unchanged since last heartbeat.
endpoint_information_epochThe endpoint epoch set in the response
partitions_by_user_endpointGlobal assignment information used for IQ. Null if unchanged since last heartbeat.
user_endpointUser-defined endpoint to connect to the node
hosthost of the endpoint
portport of the endpoint
_tagged_fieldsThe tagged fields
active_partitionsAll topic partitions materialized by active tasks on the node
topictopic name
partitionspartitions
_tagged_fieldsThe tagged fields
standby_partitionsAll topic partitions materialized by standby tasks on the node
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
StreamsGroupDescribe API (Key: 89):
+ +Requests:
+
StreamsGroupDescribe Request (Version: 0) => [group_ids] include_authorized_operations _tagged_fields 
+  group_ids => COMPACT_STRING
+  include_authorized_operations => BOOLEAN
+

This version of the request is unstable.

Request header version: 2

+ + + + + + + + + +
FieldDescription
group_idsThe ids of the groups to describe
include_authorized_operationsWhether to include authorized operations.
_tagged_fieldsThe tagged fields
+
+Responses:
+
StreamsGroupDescribe Response (Version: 0) => throttle_time_ms [groups] _tagged_fields 
+  throttle_time_ms => INT32
+  groups => error_code error_message group_id group_state group_epoch assignment_epoch topology [members] authorized_operations _tagged_fields 
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+    group_id => COMPACT_STRING
+    group_state => COMPACT_STRING
+    group_epoch => INT32
+    assignment_epoch => INT32
+    topology => epoch [subtopologies] _tagged_fields 
+      epoch => INT32
+      subtopologies => subtopology_id [source_topics] [repartition_sink_topics] [state_changelog_topics] [repartition_source_topics] _tagged_fields 
+        subtopology_id => COMPACT_STRING
+        source_topics => COMPACT_STRING
+        repartition_sink_topics => COMPACT_STRING
+        state_changelog_topics => name partitions replication_factor [topic_configs] _tagged_fields 
+          name => COMPACT_STRING
+          partitions => INT32
+          replication_factor => INT16
+          topic_configs => key value _tagged_fields 
+            key => COMPACT_STRING
+            value => COMPACT_STRING
+        repartition_source_topics => name partitions replication_factor [topic_configs] _tagged_fields 
+          name => COMPACT_STRING
+          partitions => INT32
+          replication_factor => INT16
+          topic_configs => key value _tagged_fields 
+            key => COMPACT_STRING
+            value => COMPACT_STRING
+    members => member_id member_epoch instance_id rack_id client_id client_host topology_epoch process_id user_endpoint [client_tags] [task_offsets] [task_end_offsets] assignment target_assignment is_classic _tagged_fields 
+      member_id => COMPACT_STRING
+      member_epoch => INT32
+      instance_id => COMPACT_NULLABLE_STRING
+      rack_id => COMPACT_NULLABLE_STRING
+      client_id => COMPACT_STRING
+      client_host => COMPACT_STRING
+      topology_epoch => INT32
+      process_id => COMPACT_STRING
+      user_endpoint => host port _tagged_fields 
+        host => COMPACT_STRING
+        port => UINT16
+      client_tags => key value _tagged_fields 
+        key => COMPACT_STRING
+        value => COMPACT_STRING
+      task_offsets => subtopology_id partition offset _tagged_fields 
+        subtopology_id => COMPACT_STRING
+        partition => INT32
+        offset => INT64
+      task_end_offsets => subtopology_id partition offset _tagged_fields 
+        subtopology_id => COMPACT_STRING
+        partition => INT32
+        offset => INT64
+      assignment => [active_tasks] [standby_tasks] [warmup_tasks] _tagged_fields 
+        active_tasks => subtopology_id [partitions] _tagged_fields 
+          subtopology_id => COMPACT_STRING
+          partitions => INT32
+        standby_tasks => subtopology_id [partitions] _tagged_fields 
+          subtopology_id => COMPACT_STRING
+          partitions => INT32
+        warmup_tasks => subtopology_id [partitions] _tagged_fields 
+          subtopology_id => COMPACT_STRING
+          partitions => INT32
+      target_assignment => [active_tasks] [standby_tasks] [warmup_tasks] _tagged_fields 
+        active_tasks => subtopology_id [partitions] _tagged_fields 
+          subtopology_id => COMPACT_STRING
+          partitions => INT32
+        standby_tasks => subtopology_id [partitions] _tagged_fields 
+          subtopology_id => COMPACT_STRING
+          partitions => INT32
+        warmup_tasks => subtopology_id [partitions] _tagged_fields 
+          subtopology_id => COMPACT_STRING
+          partitions => INT32
+      is_classic => BOOLEAN
+    authorized_operations => INT32
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
groupsEach described group.
error_codeThe describe error, or 0 if there was no error.
error_messageThe top-level error message, or null if there was no error.
group_idThe group ID string.
group_stateThe group state string, or the empty string.
group_epochThe group epoch.
assignment_epochThe assignment epoch.
topologyThe topology metadata currently initialized for the streams application. Can be null in case of a describe error.
epochThe epoch of the currently initialized topology for this group.
subtopologiesThe subtopologies of the streams application. This contains the configured subtopologies, where the number of partitions are set and any regular expressions are resolved to actual topics. Null if the group is uninitialized, source topics are missing or incorrectly partitioned.
subtopology_idString to uniquely identify the subtopology.
source_topicsThe topics the subtopology reads from.
repartition_sink_topicsThe repartition topics the subtopology writes to.
state_changelog_topicsThe set of state changelog topics associated with this subtopology. Created automatically.
nameThe name of the topic.
partitionsThe number of partitions in the topic. Can be 0 if no specific number of partitions is enforced. Always 0 for changelog topics.
replication_factorThe replication factor of the topic. Can be 0 if the default replication factor should be used.
topic_configsTopic-level configurations as key-value pairs.
keykey of the config
valuevalue of the config
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
repartition_source_topicsThe set of source topics that are internally created repartition topics. Created automatically.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
membersThe members.
member_idThe member ID.
member_epochThe member epoch.
instance_idThe member instance ID for static membership.
rack_idThe rack ID.
client_idThe client ID.
client_hostThe client host.
topology_epochThe epoch of the topology on the client.
process_idIdentity of the streams instance that may have multiple clients.
user_endpointUser-defined endpoint for Interactive Queries. Null if not defined for this client.
hosthost of the endpoint
portport of the endpoint
_tagged_fieldsThe tagged fields
client_tagsUsed for rack-aware assignment algorithm.
task_offsetsCumulative changelog offsets for tasks.
subtopology_idThe subtopology identifier.
partitionThe partition.
offsetThe offset.
_tagged_fieldsThe tagged fields
task_end_offsetsCumulative changelog end offsets for tasks.
assignmentThe current assignment.
active_tasksActive tasks for this client.
subtopology_idThe subtopology identifier.
partitionsThe partitions of the input topics processed by this member.
_tagged_fieldsThe tagged fields
standby_tasksStandby tasks for this client.
warmup_tasksWarm-up tasks for this client.
_tagged_fieldsThe tagged fields
target_assignmentThe target assignment.
is_classicTrue for classic members that have not been upgraded yet.
_tagged_fieldsThe tagged fields
authorized_operations32-bit bitfield to represent authorized operations for this group.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DescribeShareGroupOffsets API (Key: 90):
+ +Requests:
+
DescribeShareGroupOffsets Request (Version: 0) => [groups] _tagged_fields 
+  groups => group_id [topics] _tagged_fields 
+    group_id => COMPACT_STRING
+    topics => topic_name [partitions] _tagged_fields 
+      topic_name => COMPACT_STRING
+      partitions => INT32
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + +
FieldDescription
groupsThe groups to describe offsets for.
group_idThe group identifier.
topicsThe topics to describe offsets for, or null for all topic-partitions.
topic_nameThe topic name.
partitionsThe partitions.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
DescribeShareGroupOffsets Response (Version: 0) => throttle_time_ms [groups] _tagged_fields 
+  throttle_time_ms => INT32
+  groups => group_id [topics] error_code error_message _tagged_fields 
+    group_id => COMPACT_STRING
+    topics => topic_name topic_id [partitions] _tagged_fields 
+      topic_name => COMPACT_STRING
+      topic_id => UUID
+      partitions => partition_index start_offset leader_epoch error_code error_message _tagged_fields 
+        partition_index => INT32
+        start_offset => INT64
+        leader_epoch => INT32
+        error_code => INT16
+        error_message => COMPACT_NULLABLE_STRING
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
groupsThe results for each group.
group_idThe group identifier.
topicsThe results for each topic.
topic_nameThe topic name.
topic_idThe unique topic ID.
partitions
partition_indexThe partition index.
start_offsetThe share-partition start offset.
leader_epochThe leader epoch of the partition.
error_codeThe partition-level error code, or 0 if there was no error.
error_messageThe partition-level error message, or null if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
error_codeThe group-level error code, or 0 if there was no error.
error_messageThe group-level error message, or null if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
AlterShareGroupOffsets API (Key: 91):
+ +Requests:
+
AlterShareGroupOffsets Request (Version: 0) => group_id [topics] _tagged_fields 
+  group_id => COMPACT_STRING
+  topics => topic_name [partitions] _tagged_fields 
+    topic_name => COMPACT_STRING
+    partitions => partition_index start_offset _tagged_fields 
+      partition_index => INT32
+      start_offset => INT64
+

Request header version: 2

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
topicsThe topics to alter offsets for.
topic_nameThe topic name.
partitionsEach partition to alter offsets for.
partition_indexThe partition index.
start_offsetThe share-partition start offset.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
AlterShareGroupOffsets Response (Version: 0) => throttle_time_ms error_code error_message [responses] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  responses => topic_name topic_id [partitions] _tagged_fields 
+    topic_name => COMPACT_STRING
+    topic_id => UUID
+    partitions => partition_index error_code error_message _tagged_fields 
+      partition_index => INT32
+      error_code => INT16
+      error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top-level error code, or 0 if there was no error.
error_messageThe top-level error message, or null if there was no error.
responsesThe results for each topic.
topic_nameThe topic name.
topic_idThe unique topic ID.
partitions
partition_indexThe partition index.
error_codeThe error code, or 0 if there was no error.
error_messageThe error message, or null if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+
DeleteShareGroupOffsets API (Key: 92):
+ +Requests:
+
DeleteShareGroupOffsets Request (Version: 0) => group_id [topics] _tagged_fields 
+  group_id => COMPACT_STRING
+  topics => topic_name _tagged_fields 
+    topic_name => COMPACT_STRING
+

Request header version: 2

+ + + + + + + + + + + + + +
FieldDescription
group_idThe group identifier.
topicsThe topics to delete offsets for.
topic_nameThe topic name.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+Responses:
+
DeleteShareGroupOffsets Response (Version: 0) => throttle_time_ms error_code error_message [responses] _tagged_fields 
+  throttle_time_ms => INT32
+  error_code => INT16
+  error_message => COMPACT_NULLABLE_STRING
+  responses => topic_name topic_id error_code error_message _tagged_fields 
+    topic_name => COMPACT_STRING
+    topic_id => UUID
+    error_code => INT16
+    error_message => COMPACT_NULLABLE_STRING
+

Response header version: 1

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
throttle_time_msThe duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
error_codeThe top-level error code, or 0 if there was no error.
error_messageThe top-level error message, or null if there was no error.
responsesThe results for each topic.
topic_nameThe topic name.
topic_idThe unique topic ID.
error_codeThe topic-level error code, or 0 if there was no error.
error_messageThe topic-level error message, or null if there was no error.
_tagged_fieldsThe tagged fields
_tagged_fieldsThe tagged fields
+
+ diff --git a/static/41/generated/protocol_types.html b/static/41/generated/protocol_types.html new file mode 100644 index 000000000..1c19ae9d1 --- /dev/null +++ b/static/41/generated/protocol_types.html @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDescription
BOOLEANRepresents a boolean value in a byte. Values 0 and 1 are used to represent false and true respectively. When reading a boolean value, any non-zero value is considered true.
INT8Represents an integer between -27 and 27-1 inclusive.
INT16Represents an integer between -215 and 215-1 inclusive. The values are encoded using two bytes in network byte order (big-endian).
INT32Represents an integer between -231 and 231-1 inclusive. The values are encoded using four bytes in network byte order (big-endian).
INT64Represents an integer between -263 and 263-1 inclusive. The values are encoded using eight bytes in network byte order (big-endian).
UINT16Represents an integer between 0 and 65535 inclusive. The values are encoded using two bytes in network byte order (big-endian).
UINT32Represents an integer between 0 and 232-1 inclusive. The values are encoded using four bytes in network byte order (big-endian).
VARINTRepresents an integer between -231 and 231-1 inclusive. Encoding follows the variable-length zig-zag encoding from Google Protocol Buffers.
VARLONGRepresents an integer between -263 and 263-1 inclusive. Encoding follows the variable-length zig-zag encoding from Google Protocol Buffers.
UUIDRepresents a type 4 immutable universally unique identifier (Uuid). The values are encoded using sixteen bytes in network byte order (big-endian).
FLOAT64Represents a double-precision 64-bit format IEEE 754 value. The values are encoded using eight bytes in network byte order (big-endian).
STRINGRepresents a sequence of characters. First the length N is given as an INT16. Then N bytes follow which are the UTF-8 encoding of the character sequence. Length must not be negative.
COMPACT_STRINGRepresents a sequence of characters. First the length N + 1 is given as an UNSIGNED_VARINT . Then N bytes follow which are the UTF-8 encoding of the character sequence.
NULLABLE_STRINGRepresents a sequence of characters or null. For non-null strings, first the length N is given as an INT16. Then N bytes follow which are the UTF-8 encoding of the character sequence. A null value is encoded with length of -1 and there are no following bytes.
COMPACT_NULLABLE_STRINGRepresents a sequence of characters. First the length N + 1 is given as an UNSIGNED_VARINT . Then N bytes follow which are the UTF-8 encoding of the character sequence. A null string is represented with a length of 0.
BYTESRepresents a raw sequence of bytes. First the length N is given as an INT32. Then N bytes follow.
COMPACT_BYTESRepresents a raw sequence of bytes. First the length N+1 is given as an UNSIGNED_VARINT.Then N bytes follow.
NULLABLE_BYTESRepresents a raw sequence of bytes or null. For non-null values, first the length N is given as an INT32. Then N bytes follow. A null value is encoded with length of -1 and there are no following bytes.
COMPACT_NULLABLE_BYTESRepresents a raw sequence of bytes. First the length N+1 is given as an UNSIGNED_VARINT.Then N bytes follow. A null object is represented with a length of 0.
RECORDSRepresents a sequence of Kafka records as NULLABLE_BYTES. For a detailed description of records see Message Sets.
COMPACT_RECORDSRepresents a sequence of Kafka records as COMPACT_NULLABLE_BYTES. For a detailed description of records see Message Sets.
ARRAYRepresents a sequence of objects of a given type T. Type T can be either a primitive type (e.g. STRING) or a structure. First, the length N is given as an INT32. Then N instances of type T follow. A null array is represented with a length of -1. In protocol documentation an array of T instances is referred to as [T].
COMPACT_ARRAYRepresents a sequence of objects of a given type T. Type T can be either a primitive type (e.g. STRING) or a structure. First, the length N + 1 is given as an UNSIGNED_VARINT. Then N instances of type T follow. A null array is represented with a length of 0. In protocol documentation an array of T instances is referred to as [T].
+ diff --git a/static/41/generated/remote_log_manager_config.html b/static/41/generated/remote_log_manager_config.html new file mode 100644 index 000000000..ba9309d0c --- /dev/null +++ b/static/41/generated/remote_log_manager_config.html @@ -0,0 +1,263 @@ +
    +
  • +

    log.local.retention.bytes

    +

    The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. Default value is -2, it represents `log.retention.bytes` value to be used. The effective value should always be less than or equal to `log.retention.bytes` value.

    + + + + + +
    Type:long
    Default:-2
    Valid Values:[-2,...]
    Importance:medium
    +
  • +
  • +

    log.local.retention.ms

    +

    The number of milliseconds to keep the local log segments before it gets eligible for deletion. Default value is -2, it represents `log.retention.ms` value is to be used. The effective value should always be less than or equal to `log.retention.ms` value.

    + + + + + +
    Type:long
    Default:-2
    Valid Values:[-2,...]
    Importance:medium
    +
  • +
  • +

    remote.fetch.max.wait.ms

    +

    The maximum amount of time the server will wait before answering the remote fetch request. Note that the broker currently only fetches one partition per fetch request from the remote store. (KAFKA-14915)

    + + + + + +
    Type:int
    Default:500
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    remote.list.offsets.request.timeout.ms

    +

    The maximum amount of time the server will wait for the remote list offsets request to complete.

    + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    remote.log.manager.copier.thread.pool.size

    +

    Size of the thread pool used in scheduling tasks to copy segments.

    + + + + + +
    Type:int
    Default:10
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    remote.log.manager.copy.max.bytes.per.second

    +

    The maximum number of bytes that can be copied from local storage to remote storage per second. This is a global limit for all the partitions that are being copied from local storage to remote storage. The default value is Long.MAX_VALUE, which means there is no limit on the number of bytes that can be copied per second.

    + + + + + +
    Type:long
    Default:9223372036854775807
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    remote.log.manager.copy.quota.window.num

    +

    The number of samples to retain in memory for remote copy quota management. The default value is 11, which means there are 10 whole windows + 1 current window.

    + + + + + +
    Type:int
    Default:11
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    remote.log.manager.copy.quota.window.size.seconds

    +

    The time span of each sample for remote copy quota management. The default value is 1 second.

    + + + + + +
    Type:int
    Default:1
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    remote.log.manager.expiration.thread.pool.size

    +

    Size of the thread pool used in scheduling tasks to clean up the expired remote log segments.

    + + + + + +
    Type:int
    Default:10
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    remote.log.manager.fetch.max.bytes.per.second

    +

    The maximum number of bytes that can be fetched from remote storage to local storage per second. This is a global limit for all the partitions that are being fetched from remote storage to local storage. The default value is Long.MAX_VALUE, which means there is no limit on the number of bytes that can be fetched per second.

    + + + + + +
    Type:long
    Default:9223372036854775807
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    remote.log.manager.fetch.quota.window.num

    +

    The number of samples to retain in memory for remote fetch quota management. The default value is 11, which means there are 10 whole windows + 1 current window.

    + + + + + +
    Type:int
    Default:11
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    remote.log.manager.fetch.quota.window.size.seconds

    +

    The time span of each sample for remote fetch quota management. The default value is 1 second.

    + + + + + +
    Type:int
    Default:1
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    remote.log.manager.thread.pool.size

    +

    Size of the thread pool used in scheduling follower tasks to read the highest-uploaded remote-offset for follower partitions.

    + + + + + +
    Type:int
    Default:2
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    remote.log.metadata.manager.class.name

    +

    Fully qualified class name of `RemoteLogMetadataManager` implementation.

    + + + + + +
    Type:string
    Default:org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManager
    Valid Values:non-empty string
    Importance:medium
    +
  • +
  • +

    remote.log.metadata.manager.class.path

    +

    Class path of the `RemoteLogMetadataManager` implementation. If specified, the RemoteLogMetadataManager implementation and its dependent libraries will be loaded by a dedicated classloader which searches this class path before the Kafka broker class path. The syntax of this parameter is same as the standard Java class path string.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    remote.log.metadata.manager.impl.prefix

    +

    Prefix used for properties to be passed to RemoteLogMetadataManager implementation. For example this value can be `rlmm.config.`.

    + + + + + +
    Type:string
    Default:rlmm.config.
    Valid Values:non-empty string
    Importance:medium
    +
  • +
  • +

    remote.log.metadata.manager.listener.name

    +

    Listener name of the local broker to which it should get connected if needed by RemoteLogMetadataManager implementation.

    + + + + + +
    Type:string
    Default:null
    Valid Values:non-empty string
    Importance:medium
    +
  • +
  • +

    remote.log.reader.max.pending.tasks

    +

    Maximum remote log reader thread pool task queue size. If the task queue is full, fetch requests are served with an error.

    + + + + + +
    Type:int
    Default:100
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    remote.log.reader.threads

    +

    Size of the thread pool that is allocated for handling remote log reads.

    + + + + + +
    Type:int
    Default:10
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    remote.log.storage.manager.class.name

    +

    Fully qualified class name of `RemoteStorageManager` implementation.

    + + + + + +
    Type:string
    Default:null
    Valid Values:non-empty string
    Importance:medium
    +
  • +
  • +

    remote.log.storage.manager.class.path

    +

    Class path of the `RemoteStorageManager` implementation. If specified, the RemoteStorageManager implementation and its dependent libraries will be loaded by a dedicated classloader which searches this class path before the Kafka broker class path. The syntax of this parameter is same as the standard Java class path string.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    remote.log.storage.manager.impl.prefix

    +

    Prefix used for properties to be passed to RemoteStorageManager implementation. For example this value can be `rsm.config.`.

    + + + + + +
    Type:string
    Default:rsm.config.
    Valid Values:non-empty string
    Importance:medium
    +
  • +
  • +

    remote.log.storage.system.enable

    +

    Whether to enable tiered storage functionality in a broker or not. When it is true broker starts all the services required for the tiered storage functionality.

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:medium
    +
  • +
  • +

    remote.log.index.file.cache.total.size.bytes

    +

    The total size of the space allocated to store index files fetched from remote storage in the local storage.

    + + + + + +
    Type:long
    Default:1073741824 (1 gibibyte)
    Valid Values:[1,...]
    Importance:low
    +
  • +
  • +

    remote.log.manager.task.interval.ms

    +

    Interval at which remote log manager runs the scheduled tasks like copy segments, and clean up remote log segments.

    + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:[1,...]
    Importance:low
    +
  • +
  • +

    remote.log.metadata.custom.metadata.max.bytes

    +

    The maximum size of custom metadata in bytes that the broker should accept from a remote storage plugin. If custom metadata exceeds this limit, the updated segment metadata will not be stored, the copied data will be attempted to delete, and the remote copying task for this topic-partition will stop with an error.

    + + + + + +
    Type:int
    Default:128
    Valid Values:[0,...]
    Importance:low
    +
  • +
+ diff --git a/static/41/generated/remote_log_metadata_manager_config.html b/static/41/generated/remote_log_metadata_manager_config.html new file mode 100644 index 000000000..db8af9481 --- /dev/null +++ b/static/41/generated/remote_log_metadata_manager_config.html @@ -0,0 +1,63 @@ +
    +
  • +

    remote.log.metadata.consume.wait.ms

    +

    The amount of time in milliseconds to wait for the local consumer to receive the published event.

    + + + + + +
    Type:long
    Default:120000 (2 minutes)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    remote.log.metadata.initialization.retry.interval.ms

    +

    The retry interval in milliseconds for retrying RemoteLogMetadataManager resources initialization again.

    + + + + + +
    Type:long
    Default:100
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    remote.log.metadata.initialization.retry.max.timeout.ms

    +

    The maximum amount of time in milliseconds for retrying RemoteLogMetadataManager resources initialization. When total retry intervals reach this timeout, initialization is considered as failed and broker starts shutting down.

    + + + + + +
    Type:long
    Default:120000 (2 minutes)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    remote.log.metadata.topic.num.partitions

    +

    The number of partitions for remote log metadata topic.

    + + + + + +
    Type:int
    Default:50
    Valid Values:[1,...]
    Importance:low
    +
  • +
  • +

    remote.log.metadata.topic.replication.factor

    +

    Replication factor of remote log metadata topic.

    + + + + + +
    Type:short
    Default:3
    Valid Values:[1,...]
    Importance:low
    +
  • +
  • +

    remote.log.metadata.topic.retention.ms

    +

    Retention of remote log metadata topic in milliseconds. Default: -1, that means unlimited. Users can configure this value based on their use cases. To avoid any data loss, this value should be more than the maximum retention period of any topic enabled with tiered storage in the cluster.

    + + + + + +
    Type:long
    Default:-1
    Valid Values:
    Importance:low
    +
  • +
+ diff --git a/static/41/generated/sink_connector_config.html b/static/41/generated/sink_connector_config.html new file mode 100644 index 000000000..c8a00db60 --- /dev/null +++ b/static/41/generated/sink_connector_config.html @@ -0,0 +1,243 @@ +
    +
  • +

    name

    +

    Globally unique name to use for this connector.

    + + + + + +
    Type:string
    Default:
    Valid Values:non-empty string without ISO control characters
    Importance:high
    +
  • +
  • +

    connector.class

    +

    Name or alias of the class for this connector. Must be a subclass of org.apache.kafka.connect.connector.Connector. If the connector is org.apache.kafka.connect.file.FileStreamSinkConnector, you can either specify this full name, or use "FileStreamSink" or "FileStreamSinkConnector" to make the configuration a bit shorter

    + + + + + +
    Type:string
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    connector.plugin.version

    +

    Version of the connector.

    + + + + + +
    Type:string
    Default:null
    Valid Values:org.apache.kafka.connect.runtime.ConnectorConfig$PluginVersionValidator@799f7e29
    Importance:medium
    +
  • +
  • +

    tasks.max

    +

    Maximum number of tasks to use for this connector.

    + + + + + +
    Type:int
    Default:1
    Valid Values:[1,...]
    Importance:high
    +
  • +
  • +

    topics

    +

    List of topics to consume, separated by commas

    + + + + + +
    Type:list
    Default:""
    Valid Values:
    Importance:high
    +
  • +
  • +

    topics.regex

    +

    Regular expression giving topics to consume. Under the hood, the regex is compiled to a java.util.regex.Pattern. Only one of topics or topics.regex should be specified.

    + + + + + +
    Type:string
    Default:""
    Valid Values:valid regex
    Importance:high
    +
  • +
  • +

    tasks.max.enforce

    +

    (Deprecated) Whether to enforce that the tasks.max property is respected by the connector. By default, connectors that generate too many tasks will fail, and existing sets of tasks that exceed the tasks.max property will also be failed. If this property is set to false, then connectors will be allowed to generate more than the maximum number of tasks, and existing sets of tasks that exceed the tasks.max property will be allowed to run. This property is deprecated and will be removed in an upcoming major release.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    key.converter

    +

    Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the keys in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro.

    + + + + + +
    Type:class
    Default:null
    Valid Values:A concrete subclass of org.apache.kafka.connect.storage.Converter, A class with a public, no-argument constructor
    Importance:low
    +
  • +
  • +

    key.converter.plugin.version

    +

    Version of the key converter.

    + + + + + +
    Type:string
    Default:null
    Valid Values:org.apache.kafka.connect.runtime.ConnectorConfig$PluginVersionValidator@531d72ca
    Importance:low
    +
  • +
  • +

    value.converter

    +

    Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the values in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro.

    + + + + + +
    Type:class
    Default:null
    Valid Values:A concrete subclass of org.apache.kafka.connect.storage.Converter, A class with a public, no-argument constructor
    Importance:low
    +
  • +
  • +

    value.converter.plugin.version

    +

    Version of the value converter.

    + + + + + +
    Type:string
    Default:null
    Valid Values:org.apache.kafka.connect.runtime.ConnectorConfig$PluginVersionValidator@22d8cfe0
    Importance:low
    +
  • +
  • +

    header.converter

    +

    HeaderConverter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the header values in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro. By default, the SimpleHeaderConverter is used to serialize header values to strings and deserialize them by inferring the schemas.

    + + + + + +
    Type:class
    Default:null
    Valid Values:A concrete subclass of org.apache.kafka.connect.storage.HeaderConverter, A class with a public, no-argument constructor
    Importance:low
    +
  • +
  • +

    header.converter.plugin.version

    +

    Version of the header converter.

    + + + + + +
    Type:string
    Default:null
    Valid Values:org.apache.kafka.connect.runtime.ConnectorConfig$PluginVersionValidator@579bb367
    Importance:low
    +
  • +
  • +

    config.action.reload

    +

    The action that Connect should take on the connector when changes in external configuration providers result in a change in the connector's configuration properties. A value of 'none' indicates that Connect will do nothing. A value of 'restart' indicates that Connect should restart/reload the connector with the updated configuration properties.The restart may actually be scheduled in the future if the external configuration provider indicates that a configuration value will expire in the future.

    + + + + + +
    Type:string
    Default:restart
    Valid Values:[none, restart]
    Importance:low
    +
  • +
  • +

    transforms

    +

    Aliases for the transformations to be applied to records.

    + + + + + +
    Type:list
    Default:""
    Valid Values:non-null string, unique transformation aliases
    Importance:low
    +
  • +
  • +

    predicates

    +

    Aliases for the predicates used by transformations.

    + + + + + +
    Type:list
    Default:""
    Valid Values:non-null string, unique predicate aliases
    Importance:low
    +
  • +
  • +

    errors.retry.timeout

    +

    The maximum duration in milliseconds that a failed operation will be reattempted. The default is 0, which means no retries will be attempted. Use -1 for infinite retries.

    + + + + + +
    Type:long
    Default:0
    Valid Values:
    Importance:medium
    +
  • +
  • +

    errors.retry.delay.max.ms

    +

    The maximum duration in milliseconds between consecutive retry attempts. Jitter will be added to the delay once this limit is reached to prevent thundering herd issues.

    + + + + + +
    Type:long
    Default:60000 (1 minute)
    Valid Values:
    Importance:medium
    +
  • +
  • +

    errors.tolerance

    +

    Behavior for tolerating errors during connector operation. 'none' is the default value and signals that any error will result in an immediate connector task failure; 'all' changes the behavior to skip over problematic records.

    + + + + + +
    Type:string
    Default:none
    Valid Values:[none, all]
    Importance:medium
    +
  • +
  • +

    errors.log.enable

    +

    If true, write each error and the details of the failed operation and problematic record to the Connect application log. This is 'false' by default, so that only errors that are not tolerated are reported.

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:medium
    +
  • +
  • +

    errors.log.include.messages

    +

    Whether to include in the log the Connect record that resulted in a failure. For sink records, the topic, partition, offset, and timestamp will be logged. For source records, the key and value (and their schemas), all headers, and the timestamp, Kafka topic, Kafka partition, source partition, and source offset will be logged. This is 'false' by default, which will prevent record keys, values, and headers from being written to log files.

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:medium
    +
  • +
  • +

    errors.deadletterqueue.topic.name

    +

    The name of the topic to be used as the dead letter queue (DLQ) for messages that result in an error when processed by this sink connector, or its transformations or converters. The topic name is blank by default, which means that no messages are to be recorded in the DLQ.

    + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:medium
    +
  • +
  • +

    errors.deadletterqueue.topic.replication.factor

    +

    Replication factor used to create the dead letter queue topic when it doesn't already exist.

    + + + + + +
    Type:short
    Default:3
    Valid Values:
    Importance:medium
    +
  • +
  • +

    errors.deadletterqueue.context.headers.enable

    +

    If true, add headers containing error context to the messages written to the dead letter queue. To avoid clashing with headers from the original record, all error context header keys, all error context header keys will start with __connect.errors.

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:medium
    +
  • +
+ diff --git a/static/41/generated/source_connector_config.html b/static/41/generated/source_connector_config.html new file mode 100644 index 000000000..ecbc0bc5c --- /dev/null +++ b/static/41/generated/source_connector_config.html @@ -0,0 +1,243 @@ +
    +
  • +

    name

    +

    Globally unique name to use for this connector.

    + + + + + +
    Type:string
    Default:
    Valid Values:non-empty string without ISO control characters
    Importance:high
    +
  • +
  • +

    connector.class

    +

    Name or alias of the class for this connector. Must be a subclass of org.apache.kafka.connect.connector.Connector. If the connector is org.apache.kafka.connect.file.FileStreamSinkConnector, you can either specify this full name, or use "FileStreamSink" or "FileStreamSinkConnector" to make the configuration a bit shorter

    + + + + + +
    Type:string
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    connector.plugin.version

    +

    Version of the connector.

    + + + + + +
    Type:string
    Default:null
    Valid Values:org.apache.kafka.connect.runtime.ConnectorConfig$PluginVersionValidator@2d38eb89
    Importance:medium
    +
  • +
  • +

    tasks.max

    +

    Maximum number of tasks to use for this connector.

    + + + + + +
    Type:int
    Default:1
    Valid Values:[1,...]
    Importance:high
    +
  • +
  • +

    tasks.max.enforce

    +

    (Deprecated) Whether to enforce that the tasks.max property is respected by the connector. By default, connectors that generate too many tasks will fail, and existing sets of tasks that exceed the tasks.max property will also be failed. If this property is set to false, then connectors will be allowed to generate more than the maximum number of tasks, and existing sets of tasks that exceed the tasks.max property will be allowed to run. This property is deprecated and will be removed in an upcoming major release.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    key.converter

    +

    Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the keys in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro.

    + + + + + +
    Type:class
    Default:null
    Valid Values:A concrete subclass of org.apache.kafka.connect.storage.Converter, A class with a public, no-argument constructor
    Importance:low
    +
  • +
  • +

    key.converter.plugin.version

    +

    Version of the key converter.

    + + + + + +
    Type:string
    Default:null
    Valid Values:org.apache.kafka.connect.runtime.ConnectorConfig$PluginVersionValidator@5fa7e7ff
    Importance:low
    +
  • +
  • +

    value.converter

    +

    Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the values in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro.

    + + + + + +
    Type:class
    Default:null
    Valid Values:A concrete subclass of org.apache.kafka.connect.storage.Converter, A class with a public, no-argument constructor
    Importance:low
    +
  • +
  • +

    value.converter.plugin.version

    +

    Version of the value converter.

    + + + + + +
    Type:string
    Default:null
    Valid Values:org.apache.kafka.connect.runtime.ConnectorConfig$PluginVersionValidator@4629104a
    Importance:low
    +
  • +
  • +

    header.converter

    +

    HeaderConverter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the header values in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro. By default, the SimpleHeaderConverter is used to serialize header values to strings and deserialize them by inferring the schemas.

    + + + + + +
    Type:class
    Default:null
    Valid Values:A concrete subclass of org.apache.kafka.connect.storage.HeaderConverter, A class with a public, no-argument constructor
    Importance:low
    +
  • +
  • +

    header.converter.plugin.version

    +

    Version of the header converter.

    + + + + + +
    Type:string
    Default:null
    Valid Values:org.apache.kafka.connect.runtime.ConnectorConfig$PluginVersionValidator@27f8302d
    Importance:low
    +
  • +
  • +

    config.action.reload

    +

    The action that Connect should take on the connector when changes in external configuration providers result in a change in the connector's configuration properties. A value of 'none' indicates that Connect will do nothing. A value of 'restart' indicates that Connect should restart/reload the connector with the updated configuration properties.The restart may actually be scheduled in the future if the external configuration provider indicates that a configuration value will expire in the future.

    + + + + + +
    Type:string
    Default:restart
    Valid Values:[none, restart]
    Importance:low
    +
  • +
  • +

    transforms

    +

    Aliases for the transformations to be applied to records.

    + + + + + +
    Type:list
    Default:""
    Valid Values:non-null string, unique transformation aliases
    Importance:low
    +
  • +
  • +

    predicates

    +

    Aliases for the predicates used by transformations.

    + + + + + +
    Type:list
    Default:""
    Valid Values:non-null string, unique predicate aliases
    Importance:low
    +
  • +
  • +

    errors.retry.timeout

    +

    The maximum duration in milliseconds that a failed operation will be reattempted. The default is 0, which means no retries will be attempted. Use -1 for infinite retries.

    + + + + + +
    Type:long
    Default:0
    Valid Values:
    Importance:medium
    +
  • +
  • +

    errors.retry.delay.max.ms

    +

    The maximum duration in milliseconds between consecutive retry attempts. Jitter will be added to the delay once this limit is reached to prevent thundering herd issues.

    + + + + + +
    Type:long
    Default:60000 (1 minute)
    Valid Values:
    Importance:medium
    +
  • +
  • +

    errors.tolerance

    +

    Behavior for tolerating errors during connector operation. 'none' is the default value and signals that any error will result in an immediate connector task failure; 'all' changes the behavior to skip over problematic records.

    + + + + + +
    Type:string
    Default:none
    Valid Values:[none, all]
    Importance:medium
    +
  • +
  • +

    errors.log.enable

    +

    If true, write each error and the details of the failed operation and problematic record to the Connect application log. This is 'false' by default, so that only errors that are not tolerated are reported.

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:medium
    +
  • +
  • +

    errors.log.include.messages

    +

    Whether to include in the log the Connect record that resulted in a failure. For sink records, the topic, partition, offset, and timestamp will be logged. For source records, the key and value (and their schemas), all headers, and the timestamp, Kafka topic, Kafka partition, source partition, and source offset will be logged. This is 'false' by default, which will prevent record keys, values, and headers from being written to log files.

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:medium
    +
  • +
  • +

    topic.creation.groups

    +

    Groups of configurations for topics created by source connectors

    + + + + + +
    Type:list
    Default:""
    Valid Values:non-null string, unique topic creation groups
    Importance:low
    +
  • +
  • +

    exactly.once.support

    +

    Permitted values are requested, required. If set to "required", forces a preflight check for the connector to ensure that it can provide exactly-once semantics with the given configuration. Some connectors may be capable of providing exactly-once semantics but not signal to Connect that they support this; in that case, documentation for the connector should be consulted carefully before creating it, and the value for this property should be set to "requested". Additionally, if the value is set to "required" but the worker that performs preflight validation does not have exactly-once support enabled for source connectors, requests to create or validate the connector will fail.

    + + + + + +
    Type:string
    Default:requested
    Valid Values:(case insensitive) [REQUIRED, REQUESTED]
    Importance:medium
    +
  • +
  • +

    transaction.boundary

    +

    Permitted values are: poll, interval, connector. If set to 'poll', a new producer transaction will be started and committed for every batch of records that each task from this connector provides to Connect. If set to 'connector', relies on connector-defined transaction boundaries; note that not all connectors are capable of defining their own transaction boundaries, and in that case, attempts to instantiate a connector with this value will fail. Finally, if set to 'interval', commits transactions only after a user-defined time interval has passed.

    + + + + + +
    Type:string
    Default:poll
    Valid Values:(case insensitive) [INTERVAL, POLL, CONNECTOR]
    Importance:medium
    +
  • +
  • +

    transaction.boundary.interval.ms

    +

    If 'transaction.boundary' is set to 'interval', determines the interval for producer transaction commits by connector tasks. If unset, defaults to the value of the worker-level 'offset.flush.interval.ms' property. It has no effect if a different transaction.boundary is specified.

    + + + + + +
    Type:long
    Default:null
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    offsets.storage.topic

    +

    The name of a separate offsets topic to use for this connector. If empty or not specified, the worker’s global offsets topic name will be used. If specified, the offsets topic will be created if it does not already exist on the Kafka cluster targeted by this connector (which may be different from the one used for the worker's global offsets topic if the bootstrap.servers property of the connector's producer has been overridden from the worker's). Only applicable in distributed mode; in standalone mode, setting this property will have no effect.

    + + + + + +
    Type:string
    Default:null
    Valid Values:non-empty string
    Importance:low
    +
  • +
+ diff --git a/static/41/generated/streams_config.html b/static/41/generated/streams_config.html new file mode 100644 index 000000000..79811320a --- /dev/null +++ b/static/41/generated/streams_config.html @@ -0,0 +1,683 @@ +
    +
  • +

    application.id

    +

    An identifier for the stream processing application. Must be unique within the Kafka cluster. It is used as 1) the default client-id prefix, 2) the group-id for membership management, 3) the changelog topic prefix.

    + + + + + +
    Type:string
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    bootstrap.servers

    +

    A list of host/port pairs used to establish the initial connection to the Kafka cluster. Clients use this list to bootstrap and discover the full set of Kafka brokers. While the order of servers in the list does not matter, we recommend including more than one server to ensure resilience if any servers are down. This list does not need to contain the entire set of brokers, as Kafka clients automatically manage and update connections to the cluster efficiently. This list must be in the form host1:port1,host2:port2,....

    + + + + + +
    Type:list
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    ensure.explicit.internal.resource.naming

    +

    Whether to enforce explicit naming for all internal resources of the topology, including internal topics (e.g., changelog and repartition topics) and their associated state stores. When enabled, the application will refuse to start if any internal resource has an auto-generated name.

    + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:high
    +
  • +
  • +

    num.standby.replicas

    +

    The number of standby replicas for each task.

    + + + + + +
    Type:int
    Default:0
    Valid Values:
    Importance:high
    +
  • +
  • +

    state.dir

    +

    Directory location for state store. This path must be unique for each streams instance sharing the same underlying filesystem. Note that if not configured, then the default location will be different in each environment as it is computed using System.getProperty("java.io.tmpdir")

    + + + + + +
    Type:string
    Default:${java.io.tmpdir}
    Valid Values:
    Importance:high
    +
  • +
  • +

    acceptable.recovery.lag

    +

    The maximum acceptable lag (number of offsets to catch up) for a client to be considered caught-up enough to receive an active task assignment. Upon assignment, it will still restore the rest of the changelog before processing. To avoid a pause in processing during rebalances, this config should correspond to a recovery time of well under a minute for a given workload. Must be at least 0.

    + + + + + +
    Type:long
    Default:10000
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    cache.max.bytes.buffering

    +

    Maximum number of memory bytes to be used for buffering across all threads

    + + + + + +
    Type:long
    Default:10485760
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    client.id

    +

    An ID prefix string used for the client IDs of internal (main, restore, and global) consumers , producers, and admin clients with pattern <client.id>-[Global]StreamThread[-<threadSequenceNumber>]-<consumer|producer|restore-consumer|global-consumer>.

    + + + + + +
    Type:string
    Default:<application.id>-<random-UUID>
    Valid Values:
    Importance:medium
    +
  • +
  • +

    default.deserialization.exception.handler

    +

    Exception handling class that implements the org.apache.kafka.streams.errors.DeserializationExceptionHandler interface.

    + + + + + +
    Type:class
    Default:org.apache.kafka.streams.errors.LogAndFailExceptionHandler
    Valid Values:
    Importance:medium
    +
  • +
  • +

    default.key.serde

    +

    Default serializer / deserializer class for key that implements the org.apache.kafka.common.serialization.Serde interface.

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    default.list.key.serde.inner

    +

    Default inner class of list serde for key that implements the org.apache.kafka.common.serialization.Serde interface. This configuration will be read if and only if default.key.serde configuration is set to org.apache.kafka.common.serialization.Serdes.ListSerde

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    default.list.key.serde.type

    +

    Default class for key that implements the java.util.List interface. This configuration will be read if and only if default.key.serde configuration is set to org.apache.kafka.common.serialization.Serdes.ListSerde Note when list serde class is used, one needs to set the inner serde class that implements the org.apache.kafka.common.serialization.Serde interface via 'default.list.key.serde.inner'

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    default.list.value.serde.inner

    +

    Default inner class of list serde for value that implements the org.apache.kafka.common.serialization.Serde interface. This configuration will be read if and only if default.value.serde configuration is set to org.apache.kafka.common.serialization.Serdes.ListSerde

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    default.list.value.serde.type

    +

    Default class for value that implements the java.util.List interface. This configuration will be read if and only if default.value.serde configuration is set to org.apache.kafka.common.serialization.Serdes.ListSerde Note when list serde class is used, one needs to set the inner serde class that implements the org.apache.kafka.common.serialization.Serde interface via 'default.list.value.serde.inner'

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    default.production.exception.handler

    +

    Exception handling class that implements the org.apache.kafka.streams.errors.ProductionExceptionHandler interface.

    + + + + + +
    Type:class
    Default:org.apache.kafka.streams.errors.DefaultProductionExceptionHandler
    Valid Values:
    Importance:medium
    +
  • +
  • +

    default.timestamp.extractor

    +

    Default timestamp extractor class that implements the org.apache.kafka.streams.processor.TimestampExtractor interface.

    + + + + + +
    Type:class
    Default:org.apache.kafka.streams.processor.FailOnInvalidTimestamp
    Valid Values:
    Importance:medium
    +
  • +
  • +

    default.value.serde

    +

    Default serializer / deserializer class for value that implements the org.apache.kafka.common.serialization.Serde interface.

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    deserialization.exception.handler

    +

    Exception handling class that implements the org.apache.kafka.streams.errors.DeserializationExceptionHandler interface.

    + + + + + +
    Type:class
    Default:org.apache.kafka.streams.errors.LogAndFailExceptionHandler
    Valid Values:
    Importance:medium
    +
  • +
  • +

    group.protocol

    +

    The group protocol streams should use. We currently support "classic" or "streams". If "streams" is specified, then the streams rebalance protocol will be used. Otherwise, the classic group protocol will be used.

    + + + + + +
    Type:string
    Default:classic
    Valid Values:(case insensitive) [STREAMS, CLASSIC]
    Importance:medium
    +
  • +
  • +

    max.task.idle.ms

    +

    This config controls whether joins and merges may produce out-of-order results. The config value is the maximum amount of time in milliseconds a stream task will stay idle when it is fully caught up on some (but not all) input partitions to wait for producers to send additional records and avoid potential out-of-order record processing across multiple input streams. The default (zero) does not wait for producers to send more records, but it does wait to fetch data that is already present on the brokers. This default means that for records that are already present on the brokers, Streams will process them in timestamp order. Set to -1 to disable idling entirely and process any locally available data, even though doing so may produce out-of-order processing.

    + + + + + +
    Type:long
    Default:0
    Valid Values:
    Importance:medium
    +
  • +
  • +

    max.warmup.replicas

    +

    The maximum number of warmup replicas (extra standbys beyond the configured num.standbys) that can be assigned at once for the purpose of keeping the task available on one instance while it is warming up on another instance it has been reassigned to. Used to throttle how much extra broker traffic and cluster state can be used for high availability. Must be at least 1.Note that one warmup replica corresponds to one Stream Task. Furthermore, note that each warmup replica can only be promoted to an active task during a rebalance (normally during a so-called probing rebalance, which occur at a frequency specified by the `probing.rebalance.interval.ms` config). This means that the maximum rate at which active tasks can be migrated from one Kafka Streams Instance to another instance can be determined by (`max.warmup.replicas` / `probing.rebalance.interval.ms`).

    + + + + + +
    Type:int
    Default:2
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    num.stream.threads

    +

    The number of threads to execute stream processing.

    + + + + + +
    Type:int
    Default:1
    Valid Values:
    Importance:medium
    +
  • +
  • +

    processing.exception.handler

    +

    Exception handling class that implements the org.apache.kafka.streams.errors.ProcessingExceptionHandler interface.

    + + + + + +
    Type:class
    Default:org.apache.kafka.streams.errors.LogAndFailProcessingExceptionHandler
    Valid Values:
    Importance:medium
    +
  • +
  • +

    processing.guarantee

    +

    The processing guarantee that should be used. Possible values are at_least_once (default) and exactly_once_v2 (requires brokers version 2.5 or higher). Note that exactly-once processing requires a cluster of at least three brokers by default what is the recommended setting for production; for development you can change this, by adjusting broker setting transaction.state.log.replication.factor and transaction.state.log.min.isr.

    + + + + + +
    Type:string
    Default:at_least_once
    Valid Values:[at_least_once, exactly_once_v2]
    Importance:medium
    +
  • +
  • +

    production.exception.handler

    +

    Exception handling class that implements the org.apache.kafka.streams.errors.ProductionExceptionHandler interface.

    + + + + + +
    Type:class
    Default:org.apache.kafka.streams.errors.DefaultProductionExceptionHandler
    Valid Values:
    Importance:medium
    +
  • +
  • +

    replication.factor

    +

    The replication factor for change log topics and repartition topics created by the stream processing application. The default of -1 (meaning: use broker default replication factor) requires broker version 2.4 or newer

    + + + + + +
    Type:int
    Default:-1
    Valid Values:
    Importance:medium
    +
  • +
  • +

    security.protocol

    +

    Protocol used to communicate with brokers.

    + + + + + +
    Type:string
    Default:PLAINTEXT
    Valid Values:(case insensitive) [SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT]
    Importance:medium
    +
  • +
  • +

    statestore.cache.max.bytes

    +

    Maximum number of memory bytes to be used for statestore cache across all threads

    + + + + + +
    Type:long
    Default:10485760 (10 mebibytes)
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    task.assignor.class

    +

    A task assignor class or class name implementing the org.apache.kafka.streams.processor.assignment.TaskAssignor interface. Defaults to the HighAvailabilityTaskAssignor class.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    task.timeout.ms

    +

    The maximum amount of time in milliseconds a task might stall due to internal errors and retries until an error is raised. For a timeout of 0ms, a task would raise an error for the first internal error. For any timeout larger than 0ms, a task will retry at least once before an error is raised.

    + + + + + +
    Type:long
    Default:300000 (5 minutes)
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    topology.optimization

    +

    A configuration telling Kafka Streams if it should optimize the topology and what optimizations to apply. Acceptable values are: "+NO_OPTIMIZATION+", "+OPTIMIZE+", or a comma separated list of specific optimizations: ("+REUSE_KTABLE_SOURCE_TOPICS+", "+MERGE_REPARTITION_TOPICS+" + "SINGLE_STORE_SELF_JOIN+")."NO_OPTIMIZATION" by default.

    + + + + + +
    Type:string
    Default:none
    Valid Values:[all, none, reuse.ktable.source.topics, merge.repartition.topics, single.store.self.join]
    Importance:medium
    +
  • +
  • +

    application.server

    +

    A host:port pair pointing to a user-defined endpoint that can be used for state store discovery and interactive queries on this KafkaStreams instance.

    + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:low
    +
  • +
  • +

    buffered.records.per.partition

    +

    Maximum number of records to buffer per partition.

    + + + + + +
    Type:int
    Default:1000
    Valid Values:
    Importance:low
    +
  • +
  • +

    built.in.metrics.version

    +

    Version of the built-in metrics to use.

    + + + + + +
    Type:string
    Default:latest
    Valid Values:[latest]
    Importance:low
    +
  • +
  • +

    commit.interval.ms

    +

    The frequency in milliseconds with which to commit processing progress. For at-least-once processing, committing means to save the position (ie, offsets) of the processor. For exactly-once processing, it means to commit the transaction which includes to save the position and to make the committed data in the output topic visible to consumers with isolation level read_committed. (Note, if processing.guarantee is set to exactly_once_v2, the default value is 100, otherwise the default value is 30000.

    + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    connections.max.idle.ms

    +

    Close idle connections after the number of milliseconds specified by this config.

    + + + + + +
    Type:long
    Default:540000 (9 minutes)
    Valid Values:
    Importance:low
    +
  • +
  • +

    default.client.supplier

    +

    Client supplier class that implements the org.apache.kafka.streams.KafkaClientSupplier interface.

    + + + + + +
    Type:class
    Default:org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier
    Valid Values:
    Importance:low
    +
  • +
  • +

    default.dsl.store

    +

    The default state store type used by DSL operators.

    + + + + + +
    Type:string
    Default:rocksDB
    Valid Values:[rocksDB, in_memory]
    Importance:low
    +
  • +
  • +

    dsl.store.suppliers.class

    +

    Defines which store implementations to plug in to DSL operators. Must implement the org.apache.kafka.streams.state.DslStoreSuppliers interface.

    + + + + + +
    Type:class
    Default:org.apache.kafka.streams.state.BuiltInDslStoreSuppliers$RocksDBDslStoreSuppliers
    Valid Values:
    Importance:low
    +
  • +
  • +

    enable.metrics.push

    +

    Whether to enable pushing of internal client metrics for (main, restore, and global) consumers, producers, and admin clients. The cluster must have a client metrics subscription which corresponds to a client.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    log.summary.interval.ms

    +

    The output interval in milliseconds for logging summary information.
    If greater or equal to 0, the summary log will be output according to the set time interval;
    If less than 0, summary output is disabled.

    + + + + + +
    Type:long
    Default:120000 (2 minutes)
    Valid Values:
    Importance:low
    +
  • +
  • +

    metadata.max.age.ms

    +

    The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.

    + + + + + +
    Type:long
    Default:300000 (5 minutes)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    metadata.recovery.rebootstrap.trigger.ms

    +

    If a client configured to rebootstrap using metadata.recovery.strategy=rebootstrap is unable to obtain metadata from any of the brokers in the last known metadata for this interval, client repeats the bootstrap process using bootstrap.servers configuration.

    + + + + + +
    Type:long
    Default:300000 (5 minutes)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    metadata.recovery.strategy

    +

    Controls how the client recovers when none of the brokers known to it is available. If set to none, the client fails. If set to rebootstrap, the client repeats the bootstrap process using bootstrap.servers. Rebootstrapping is useful when a client communicates with brokers so infrequently that the set of brokers may change entirely before the client refreshes metadata. Metadata recovery is triggered when all last-known brokers appear unavailable simultaneously. Brokers appear unavailable when disconnected and no current retry attempt is in-progress. Consider increasing reconnect.backoff.ms and reconnect.backoff.max.ms and decreasing socket.connection.setup.timeout.ms and socket.connection.setup.timeout.max.ms for the client. Rebootstrap is also triggered if connection cannot be established to any of the brokers for metadata.recovery.rebootstrap.trigger.ms milliseconds or if server requests rebootstrap.

    + + + + + +
    Type:string
    Default:rebootstrap
    Valid Values:(case insensitive) [REBOOTSTRAP, NONE]
    Importance:low
    +
  • +
  • +

    metric.reporters

    +

    A list of classes to use as metrics reporters. Implementing the org.apache.kafka.common.metrics.MetricsReporter interface allows plugging in classes that will be notified of new metric creation. When custom reporters are set and org.apache.kafka.common.metrics.JmxReporter is needed, it has to be explicitly added to the list.

    + + + + + +
    Type:list
    Default:org.apache.kafka.common.metrics.JmxReporter
    Valid Values:
    Importance:low
    +
  • +
  • +

    metrics.num.samples

    +

    The number of samples maintained to compute metrics.

    + + + + + +
    Type:int
    Default:2
    Valid Values:[1,...]
    Importance:low
    +
  • +
  • +

    metrics.recording.level

    +

    The highest recording level for metrics. It has three levels for recording metrics - info, debug, and trace.

    INFO level records only essential metrics necessary for monitoring system performance and health. It collects vital data without gathering too much detail, making it suitable for production environments where minimal overhead is desired.

    DEBUG level records most metrics, providing more detailed information about the system's operation. It's useful for development and testing environments where you need deeper insights to debug and fine-tune the application.

    TRACE level records all possible metrics, capturing every detail about the system's performance and operation. It's best for controlled environments where in-depth analysis is required, though it can introduce significant overhead.

    + + + + + +
    Type:string
    Default:INFO
    Valid Values:[INFO, DEBUG, TRACE]
    Importance:low
    +
  • +
  • +

    metrics.sample.window.ms

    +

    The window of time a metrics sample is computed over.

    + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    poll.ms

    +

    The amount of time in milliseconds to block waiting for input.

    + + + + + +
    Type:long
    Default:100
    Valid Values:
    Importance:low
    +
  • +
  • +

    probing.rebalance.interval.ms

    +

    The maximum time in milliseconds to wait before triggering a rebalance to probe for warmup replicas that have finished warming up and are ready to become active. Probing rebalances will continue to be triggered until the assignment is balanced. Must be at least 1 minute.

    + + + + + +
    Type:long
    Default:600000 (10 minutes)
    Valid Values:[60000,...]
    Importance:low
    +
  • +
  • +

    processor.wrapper.class

    +

    A processor wrapper class or class name that implements the org.apache.kafka.streams.state.ProcessorWrapper interface. Must be passed in to the StreamsBuilder or Topology constructor in order to take effect

    + + + + + +
    Type:class
    Default:org.apache.kafka.streams.processor.internals.NoOpProcessorWrapper
    Valid Values:
    Importance:low
    +
  • +
  • +

    rack.aware.assignment.non_overlap_cost

    +

    Cost associated with moving tasks from existing assignment. This config and rack.aware.assignment.traffic_cost controls whether the optimization algorithm favors minimizing cross rack traffic or minimize the movement of tasks in existing assignment. If set a larger value org.apache.kafka.streams.processor.internals.assignment.RackAwareTaskAssignor will optimize to maintain the existing assignment. The default value is null which means it will use default non_overlap cost values in different assignors.

    + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    rack.aware.assignment.strategy

    +

    The strategy we use for rack aware assignment. Rack aware assignment will take client.rack and racks of TopicPartition into account when assigning tasks to minimize cross rack traffic. Valid settings are : none (default), which will disable rack aware assignment; min_traffic, which will compute minimum cross rack traffic assignment; balance_subtopology, which will compute minimum cross rack traffic and try to balance the tasks of same subtopologies across different clients

    + + + + + +
    Type:string
    Default:none
    Valid Values:[none, min_traffic, balance_subtopology]
    Importance:low
    +
  • +
  • +

    rack.aware.assignment.tags

    +

    List of client tag keys used to distribute standby replicas across Kafka Streams instances. When configured, Kafka Streams will make a best-effort to distribute the standby tasks over each client tag dimension.

    + + + + + +
    Type:list
    Default:""
    Valid Values:List containing maximum of 5 elements
    Importance:low
    +
  • +
  • +

    rack.aware.assignment.traffic_cost

    +

    Cost associated with cross rack traffic. This config and rack.aware.assignment.non_overlap_cost controls whether the optimization algorithm favors minimizing cross rack traffic or minimize the movement of tasks in existing assignment. If set a larger value org.apache.kafka.streams.processor.internals.assignment.RackAwareTaskAssignor will optimize for minimizing cross rack traffic. The default value is null which means it will use default traffic cost values in different assignors.

    + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    receive.buffer.bytes

    +

    The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.

    + + + + + +
    Type:int
    Default:32768 (32 kibibytes)
    Valid Values:[-1,...]
    Importance:low
    +
  • +
  • +

    reconnect.backoff.max.ms

    +

    The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.

    + + + + + +
    Type:long
    Default:1000 (1 second)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    reconnect.backoff.ms

    +

    The base amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all connection attempts by the client to a broker. This value is the initial backoff value and will increase exponentially for each consecutive connection failure, up to the reconnect.backoff.max.ms value.

    + + + + + +
    Type:long
    Default:50
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    repartition.purge.interval.ms

    +

    The frequency in milliseconds with which to delete fully consumed records from repartition topics. Purging will occur after at least this value since the last purge, but may be delayed until later. (Note, unlike commit.interval.ms, the default for this value remains unchanged when processing.guarantee is set to exactly_once_v2).

    + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    request.timeout.ms

    +

    The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.

    + + + + + +
    Type:int
    Default:40000 (40 seconds)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    retry.backoff.ms

    +

    The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios. This value is the initial backoff value and will increase exponentially for each failed request, up to the retry.backoff.max.ms value.

    + + + + + +
    Type:long
    Default:100
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    rocksdb.config.setter

    +

    A Rocks DB config setter class or class name that implements the org.apache.kafka.streams.state.RocksDBConfigSetter interface

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    send.buffer.bytes

    +

    The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.

    + + + + + +
    Type:int
    Default:131072 (128 kibibytes)
    Valid Values:[-1,...]
    Importance:low
    +
  • +
  • +

    state.cleanup.delay.ms

    +

    The amount of time in milliseconds to wait before deleting state when a partition has migrated. Only state directories that have not been modified for at least state.cleanup.delay.ms will be removed

    + + + + + +
    Type:long
    Default:600000 (10 minutes)
    Valid Values:
    Importance:low
    +
  • +
  • +

    upgrade.from

    +

    Allows live upgrading (and downgrading in some cases -- see upgrade guide) in a backward compatible way. Default is `null`. Please refer to the Kafka Streams upgrade guide for instructions on how and when to use this config. Note that when upgrading from 3.5 to a newer version it is never required to specify this config, while upgrading live directly to 4.0+ from 2.3 or below is no longer supported even with this config. Accepted values are "2.4", "2.5", "2.6", "2.7", "2.8", "3.0", "3.1", "3.2", "3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "(for upgrading from the corresponding old version).

    + + + + + +
    Type:string
    Default:null
    Valid Values:[null, 0.10.0, 0.10.1, 0.10.2, 0.11.0, 1.0, 1.1, 2.0, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0]
    Importance:low
    +
  • +
  • +

    window.size.ms

    +

    Sets window size for the deserializer in order to calculate window end times.

    + + + + + +
    Type:long
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    windowed.inner.class.serde

    +

    Default serializer / deserializer for the inner class of a windowed record. Must implement the org.apache.kafka.common.serialization.Serde interface. Note that setting this config in KafkaStreams application would result in an error as it is meant to be used only from Plain consumer client.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    windowstore.changelog.additional.retention.ms

    +

    Added to a windows maintainMs to ensure data is not deleted from the log prematurely. Allows for clock drift. Default is 1 day

    + + + + + +
    Type:long
    Default:86400000 (1 day)
    Valid Values:
    Importance:low
    +
  • +
+ diff --git a/static/41/generated/topic_config.html b/static/41/generated/topic_config.html new file mode 100644 index 000000000..1a1312cc0 --- /dev/null +++ b/static/41/generated/topic_config.html @@ -0,0 +1,366 @@ +
    +
  • +

    cleanup.policy

    +

    This config designates the retention policy to use on log segments. The "delete" policy (which is the default) will discard old segments when their retention time or size limit has been reached. The "compact" policy will enable log compaction, which retains the latest value for each key. It is also possible to specify both policies in a comma-separated list (e.g. "delete,compact"). In this case, old segments will be discarded per the retention time and size configuration, while retained segments will be compacted.

    + + + + + + +
    Type:list
    Default:delete
    Valid Values:[compact, delete]
    Server Default Property:log.cleanup.policy
    Importance:medium
    +
  • +
  • +

    compression.gzip.level

    +

    The compression level to use if compression.type is set to gzip.

    + + + + + + +
    Type:int
    Default:-1
    Valid Values:[1,...,9] or -1
    Server Default Property:compression.gzip.level
    Importance:medium
    +
  • +
  • +

    compression.lz4.level

    +

    The compression level to use if compression.type is set to lz4.

    + + + + + + +
    Type:int
    Default:9
    Valid Values:[1,...,17]
    Server Default Property:compression.lz4.level
    Importance:medium
    +
  • +
  • +

    compression.type

    +

    Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.

    + + + + + + +
    Type:string
    Default:producer
    Valid Values:[uncompressed, zstd, lz4, snappy, gzip, producer]
    Server Default Property:compression.type
    Importance:medium
    +
  • +
  • +

    compression.zstd.level

    +

    The compression level to use if compression.type is set to zstd.

    + + + + + + +
    Type:int
    Default:3
    Valid Values:[-131072,...,22]
    Server Default Property:compression.zstd.level
    Importance:medium
    +
  • +
  • +

    delete.retention.ms

    +

    The amount of time to retain delete tombstone markers for log compacted topics. This setting also gives a bound on the time in which a consumer must complete a read if they begin from offset 0 to ensure that they get a valid snapshot of the final stage (otherwise delete tombstones may be collected before they complete their scan).

    + + + + + + +
    Type:long
    Default:86400000 (1 day)
    Valid Values:[0,...]
    Server Default Property:log.cleaner.delete.retention.ms
    Importance:medium
    +
  • +
  • +

    file.delete.delay.ms

    +

    The time to wait before deleting a file from the filesystem

    + + + + + + +
    Type:long
    Default:60000 (1 minute)
    Valid Values:[0,...]
    Server Default Property:log.segment.delete.delay.ms
    Importance:medium
    +
  • +
  • +

    flush.messages

    +

    This setting allows specifying an interval at which we will force an fsync of data written to the log. For example if this was set to 1 we would fsync after every message; if it were 5 we would fsync after every five messages. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient. This setting can be overridden on a per-topic basis (see the per-topic configuration section).

    + + + + + + +
    Type:long
    Default:9223372036854775807
    Valid Values:[1,...]
    Server Default Property:log.flush.interval.messages
    Importance:medium
    +
  • +
  • +

    flush.ms

    +

    This setting allows specifying a time interval at which we will force an fsync of data written to the log. For example if this was set to 1000 we would fsync after 1000 ms had passed. Note that this setting depends on the broker-level configuration log.flush.scheduler.interval.ms, which controls how frequently the flush check occurs. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.

    + + + + + + +
    Type:long
    Default:9223372036854775807
    Valid Values:[0,...]
    Server Default Property:log.flush.interval.ms
    Importance:medium
    +
  • +
  • +

    follower.replication.throttled.replicas

    +

    A list of replicas for which log replication should be throttled on the follower side. The list should describe a set of replicas in the form [PartitionId]:[BrokerId],[PartitionId]:[BrokerId]:... or alternatively the wildcard '*' can be used to throttle all replicas for this topic.

    + + + + + + +
    Type:list
    Default:""
    Valid Values:[partitionId]:[brokerId],[partitionId]:[brokerId],...
    Server Default Property:null
    Importance:medium
    +
  • +
  • +

    index.interval.bytes

    +

    This setting controls how frequently Kafka adds entries to its offset index and, conditionally, to its time index. The default setting ensures that we index a message roughly every 4096 bytes. More frequent indexing allows reads to jump closer to the exact position in the log but results in larger index files. You probably don't need to change this.

    Note: the time index will be inserted only when the timestamp is greater than the last indexed timestamp.

    + + + + + + +
    Type:int
    Default:4096 (4 kibibytes)
    Valid Values:[0,...]
    Server Default Property:log.index.interval.bytes
    Importance:medium
    +
  • +
  • +

    leader.replication.throttled.replicas

    +

    A list of replicas for which log replication should be throttled on the leader side. The list should describe a set of replicas in the form [PartitionId]:[BrokerId],[PartitionId]:[BrokerId]:... or alternatively the wildcard '*' can be used to throttle all replicas for this topic.

    + + + + + + +
    Type:list
    Default:""
    Valid Values:[partitionId]:[brokerId],[partitionId]:[brokerId],...
    Server Default Property:null
    Importance:medium
    +
  • +
  • +

    local.retention.bytes

    +

    The maximum size of local log segments that can grow for a partition before it deletes the old segments. Default value is -2, it represents `retention.bytes` value to be used. The effective value should always be less than or equal to `retention.bytes` value.

    + + + + + + +
    Type:long
    Default:-2
    Valid Values:[-2,...]
    Server Default Property:log.local.retention.bytes
    Importance:medium
    +
  • +
  • +

    local.retention.ms

    +

    The number of milliseconds to keep the local log segment before it gets deleted. Default value is -2, it represents `retention.ms` value is to be used. The effective value should always be less than or equal to `retention.ms` value.

    + + + + + + +
    Type:long
    Default:-2
    Valid Values:[-2,...]
    Server Default Property:log.local.retention.ms
    Importance:medium
    +
  • +
  • +

    max.compaction.lag.ms

    +

    The maximum time a message will remain ineligible for compaction in the log. Only applicable for logs that are being compacted.

    + + + + + + +
    Type:long
    Default:9223372036854775807
    Valid Values:[1,...]
    Server Default Property:log.cleaner.max.compaction.lag.ms
    Importance:medium
    +
  • +
  • +

    max.message.bytes

    +

    The largest record batch size allowed by Kafka (after compression if compression is enabled).

    + + + + + + +
    Type:int
    Default:1048588
    Valid Values:[0,...]
    Server Default Property:message.max.bytes
    Importance:medium
    +
  • +
  • +

    message.timestamp.after.max.ms

    +

    This configuration sets the allowable timestamp difference between the message timestamp and the broker's timestamp. The message timestamp can be later than or equal to the broker's timestamp, with the maximum allowable difference determined by the value set in this configuration. If message.timestamp.type=CreateTime, the message will be rejected if the difference in timestamps exceeds this specified threshold. This configuration is ignored if message.timestamp.type=LogAppendTime.

    + + + + + + +
    Type:long
    Default:3600000 (1 hour)
    Valid Values:[0,...]
    Server Default Property:log.message.timestamp.after.max.ms
    Importance:medium
    +
  • +
  • +

    message.timestamp.before.max.ms

    +

    This configuration sets the allowable timestamp difference between the broker's timestamp and the message timestamp. The message timestamp can be earlier than or equal to the broker's timestamp, with the maximum allowable difference determined by the value set in this configuration. If message.timestamp.type=CreateTime, the message will be rejected if the difference in timestamps exceeds this specified threshold. This configuration is ignored if message.timestamp.type=LogAppendTime.

    + + + + + + +
    Type:long
    Default:9223372036854775807
    Valid Values:[0,...]
    Server Default Property:log.message.timestamp.before.max.ms
    Importance:medium
    +
  • +
  • +

    message.timestamp.type

    +

    Define whether the timestamp in the message is message create time or log append time.

    + + + + + + +
    Type:string
    Default:CreateTime
    Valid Values:[CreateTime, LogAppendTime]
    Server Default Property:log.message.timestamp.type
    Importance:medium
    +
  • +
  • +

    min.cleanable.dirty.ratio

    +

    This configuration controls how frequently the log compactor will attempt to clean the log (assuming log compaction is enabled). By default we will avoid cleaning a log where more than 50% of the log has been compacted. This ratio bounds the maximum space wasted in the log by duplicates (at 50% at most 50% of the log could be duplicates). A higher ratio will mean fewer, more efficient cleanings but will mean more wasted space in the log. If the max.compaction.lag.ms or the min.compaction.lag.ms configurations are also specified, then the log compactor considers the log to be eligible for compaction as soon as either: (i) the dirty ratio threshold has been met and the log has had dirty (uncompacted) records for at least the min.compaction.lag.ms duration, or (ii) if the log has had dirty (uncompacted) records for at most the max.compaction.lag.ms period.

    + + + + + + +
    Type:double
    Default:0.5
    Valid Values:[0,...,1]
    Server Default Property:log.cleaner.min.cleanable.ratio
    Importance:medium
    +
  • +
  • +

    min.compaction.lag.ms

    +

    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.

    + + + + + + +
    Type:long
    Default:0
    Valid Values:[0,...]
    Server Default Property:log.cleaner.min.compaction.lag.ms
    Importance:medium
    +
  • +
  • +

    min.insync.replicas

    +

    Specifies the minimum number of in-sync replicas (including the leader) required for a write to succeed when a producer sets acks to "all" (or "-1"). In the acks=all case, every in-sync replica must acknowledge a write for it to be considered successful. E.g., if a topic has replication.factor of 3 and the ISR set includes all three replicas, then all three replicas must acknowledge an acks=all write for it to succeed, even if min.insync.replicas happens to be less than 3. If acks=all and the current ISR set contains fewer than min.insync.replicas members, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend).
    Regardless of the acks setting, the messages will not be visible to the consumers until they are replicated to all in-sync replicas and the min.insync.replicas condition is met.
    When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and produce with acks of "all". This ensures that a majority of replicas must persist a write before it's considered successful by the producer and it's visible to consumers.

    Note that when the Eligible Leader Replicas feature is enabled, the semantics of this config changes. Please refer to the ELR section for more info.

    + + + + + + +
    Type:int
    Default:1
    Valid Values:[1,...]
    Server Default Property:min.insync.replicas
    Importance:medium
    +
  • +
  • +

    preallocate

    +

    True if we should preallocate the file on disk when creating a new log segment.

    + + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Server Default Property:log.preallocate
    Importance:medium
    +
  • +
  • +

    remote.log.copy.disable

    +

    Determines whether tiered data for a topic should become read only, and no more data uploading on a topic. Once this config is set to true, the local retention configuration (i.e. local.retention.ms/bytes) becomes irrelevant, and all data expiration follows the topic-wide retention configuration(i.e. retention.ms/bytes).

    + + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Server Default Property:null
    Importance:medium
    +
  • +
  • +

    remote.log.delete.on.disable

    +

    Determines whether tiered data for a topic should be deleted after tiered storage is disabled on a topic. This configuration should be enabled when trying to set `remote.storage.enable` from true to false

    + + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Server Default Property:null
    Importance:medium
    +
  • +
  • +

    remote.storage.enable

    +

    To enable tiered storage for a topic, set this configuration to true. To disable tiered storage for a topic that has it enabled, set this configuration to false. When disabling, you must also set remote.log.delete.on.disable to true.

    + + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Server Default Property:null
    Importance:medium
    +
  • +
  • +

    retention.bytes

    +

    This configuration controls the maximum size a partition (which consists of log segments) can grow to before we will discard old log segments to free up space if we are using the "delete" retention policy. By default there is no size limit only a time limit. Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes. Additionally, retention.bytes configuration operates independently of "segment.ms" and "segment.bytes" configurations. Moreover, it triggers the rolling of new segment if the retention.bytes is configured to zero.

    + + + + + + +
    Type:long
    Default:-1
    Valid Values:
    Server Default Property:log.retention.bytes
    Importance:medium
    +
  • +
  • +

    retention.ms

    +

    This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space if we are using the "delete" retention policy. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied. Additionally, retention.ms configuration operates independently of "segment.ms" and "segment.bytes" configurations. Moreover, it triggers the rolling of new segment if the retention.ms condition is satisfied.

    + + + + + + +
    Type:long
    Default:604800000 (7 days)
    Valid Values:[-1,...]
    Server Default Property:log.retention.ms
    Importance:medium
    +
  • +
  • +

    segment.bytes

    +

    This configuration controls the segment file size for the log. Retention and cleaning is always done a file at a time so a larger segment size means fewer files but less granular control over retention.

    + + + + + + +
    Type:int
    Default:1073741824 (1 gibibyte)
    Valid Values:[1048576,...]
    Server Default Property:log.segment.bytes
    Importance:medium
    +
  • +
  • +

    segment.index.bytes

    +

    This configuration controls the size of the index that maps offsets to file positions. We preallocate this index file and shrink it only after log rolls. You generally should not need to change this setting.

    + + + + + + +
    Type:int
    Default:10485760 (10 mebibytes)
    Valid Values:[4,...]
    Server Default Property:log.index.size.max.bytes
    Importance:medium
    +
  • +
  • +

    segment.jitter.ms

    +

    The maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling

    + + + + + + +
    Type:long
    Default:0
    Valid Values:[0,...]
    Server Default Property:log.roll.jitter.ms
    Importance:medium
    +
  • +
  • +

    segment.ms

    +

    This configuration controls the period of time after which Kafka will force the log to roll even if the segment file isn't full to ensure that retention can delete or compact old data.

    + + + + + + +
    Type:long
    Default:604800000 (7 days)
    Valid Values:[1,...]
    Server Default Property:log.roll.ms
    Importance:medium
    +
  • +
  • +

    unclean.leader.election.enable

    +

    Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss.

    Note: In KRaft mode, when enabling this config dynamically, it needs to wait for the unclean leader electionthread to trigger election periodically (default is 5 minutes). Please run `kafka-leader-election.sh` with `unclean` option to trigger the unclean leader election immediately if needed.

    + + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Server Default Property:unclean.leader.election.enable
    Importance:medium
    +
  • +
+ diff --git a/static/41/images/consumer-groups.png b/static/41/images/consumer-groups.png new file mode 100644 index 000000000..16fe2936c Binary files /dev/null and b/static/41/images/consumer-groups.png differ diff --git a/static/41/images/creating-streams-iframe-placeholder.png b/static/41/images/creating-streams-iframe-placeholder.png new file mode 100644 index 000000000..479a83076 Binary files /dev/null and b/static/41/images/creating-streams-iframe-placeholder.png differ diff --git a/static/41/images/icons/NYT.jpg b/static/41/images/icons/NYT.jpg new file mode 100644 index 000000000..f4a7e8fc2 Binary files /dev/null and b/static/41/images/icons/NYT.jpg differ diff --git a/static/41/images/icons/architecture--white.png b/static/41/images/icons/architecture--white.png new file mode 100644 index 000000000..98b1b03d8 Binary files /dev/null and b/static/41/images/icons/architecture--white.png differ diff --git a/static/41/images/icons/architecture.png b/static/41/images/icons/architecture.png new file mode 100644 index 000000000..6f9fd4008 Binary files /dev/null and b/static/41/images/icons/architecture.png differ diff --git a/static/41/images/icons/documentation--white.png b/static/41/images/icons/documentation--white.png new file mode 100644 index 000000000..1e8fd976a Binary files /dev/null and b/static/41/images/icons/documentation--white.png differ diff --git a/static/41/images/icons/documentation.png b/static/41/images/icons/documentation.png new file mode 100644 index 000000000..8d9da19c6 Binary files /dev/null and b/static/41/images/icons/documentation.png differ diff --git a/static/41/images/icons/line.png b/static/41/images/icons/line.png new file mode 100644 index 000000000..4587d210a Binary files /dev/null and b/static/41/images/icons/line.png differ diff --git a/static/41/images/icons/new-york.png b/static/41/images/icons/new-york.png new file mode 100644 index 000000000..42a4b0ba2 Binary files /dev/null and b/static/41/images/icons/new-york.png differ diff --git a/static/41/images/icons/rabobank.png b/static/41/images/icons/rabobank.png new file mode 100644 index 000000000..ddad710e0 Binary files /dev/null and b/static/41/images/icons/rabobank.png differ diff --git a/static/41/images/icons/tutorials--white.png b/static/41/images/icons/tutorials--white.png new file mode 100644 index 000000000..97a0c0449 Binary files /dev/null and b/static/41/images/icons/tutorials--white.png differ diff --git a/static/41/images/icons/tutorials.png b/static/41/images/icons/tutorials.png new file mode 100644 index 000000000..983da6c94 Binary files /dev/null and b/static/41/images/icons/tutorials.png differ diff --git a/static/41/images/icons/zalando.png b/static/41/images/icons/zalando.png new file mode 100644 index 000000000..719a7dc66 Binary files /dev/null and b/static/41/images/icons/zalando.png differ diff --git a/static/41/images/intro_to_streams-iframe-placeholder.png b/static/41/images/intro_to_streams-iframe-placeholder.png new file mode 100644 index 000000000..462ec036e Binary files /dev/null and b/static/41/images/intro_to_streams-iframe-placeholder.png differ diff --git a/static/41/images/kafka-apis.png b/static/41/images/kafka-apis.png new file mode 100644 index 000000000..db6053ccc Binary files /dev/null and b/static/41/images/kafka-apis.png differ diff --git a/static/41/images/kafka-logo-readme-dark.svg b/static/41/images/kafka-logo-readme-dark.svg new file mode 100644 index 000000000..00d131bf2 --- /dev/null +++ b/static/41/images/kafka-logo-readme-dark.svg @@ -0,0 +1,217 @@ + + + + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + + + + + + + diff --git a/static/41/images/kafka-logo-readme-light.svg b/static/41/images/kafka-logo-readme-light.svg new file mode 100644 index 000000000..91e55d2fd --- /dev/null +++ b/static/41/images/kafka-logo-readme-light.svg @@ -0,0 +1,217 @@ + + + + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + + + + + + + diff --git a/static/41/images/kafka_log.png b/static/41/images/kafka_log.png new file mode 100644 index 000000000..6658b3f43 Binary files /dev/null and b/static/41/images/kafka_log.png differ diff --git a/static/41/images/kafka_multidc.png b/static/41/images/kafka_multidc.png new file mode 100644 index 000000000..7bc56f4a5 Binary files /dev/null and b/static/41/images/kafka_multidc.png differ diff --git a/static/41/images/kafka_multidc_complex.png b/static/41/images/kafka_multidc_complex.png new file mode 100644 index 000000000..ab88debde Binary files /dev/null and b/static/41/images/kafka_multidc_complex.png differ diff --git a/static/41/images/log_anatomy.png b/static/41/images/log_anatomy.png new file mode 100644 index 000000000..a64949992 Binary files /dev/null and b/static/41/images/log_anatomy.png differ diff --git a/static/41/images/log_cleaner_anatomy.png b/static/41/images/log_cleaner_anatomy.png new file mode 100644 index 000000000..fb425b055 Binary files /dev/null and b/static/41/images/log_cleaner_anatomy.png differ diff --git a/static/41/images/log_compaction.png b/static/41/images/log_compaction.png new file mode 100644 index 000000000..4e4a8334c Binary files /dev/null and b/static/41/images/log_compaction.png differ diff --git a/static/41/images/log_consumer.png b/static/41/images/log_consumer.png new file mode 100644 index 000000000..fbc45f206 Binary files /dev/null and b/static/41/images/log_consumer.png differ diff --git a/static/41/images/mirror-maker.png b/static/41/images/mirror-maker.png new file mode 100644 index 000000000..8f76b1f80 Binary files /dev/null and b/static/41/images/mirror-maker.png differ diff --git a/static/41/images/producer_consumer.png b/static/41/images/producer_consumer.png new file mode 100644 index 000000000..4b10cc95d Binary files /dev/null and b/static/41/images/producer_consumer.png differ diff --git a/static/41/images/streams-architecture-overview.jpg b/static/41/images/streams-architecture-overview.jpg new file mode 100644 index 000000000..92220791a Binary files /dev/null and b/static/41/images/streams-architecture-overview.jpg differ diff --git a/static/41/images/streams-architecture-states.jpg b/static/41/images/streams-architecture-states.jpg new file mode 100644 index 000000000..fde12db52 Binary files /dev/null and b/static/41/images/streams-architecture-states.jpg differ diff --git a/static/41/images/streams-architecture-tasks.jpg b/static/41/images/streams-architecture-tasks.jpg new file mode 100644 index 000000000..2e957f976 Binary files /dev/null and b/static/41/images/streams-architecture-tasks.jpg differ diff --git a/static/41/images/streams-architecture-threads.jpg b/static/41/images/streams-architecture-threads.jpg new file mode 100644 index 000000000..d5f10dbe6 Binary files /dev/null and b/static/41/images/streams-architecture-threads.jpg differ diff --git a/static/41/images/streams-architecture-topology.jpg b/static/41/images/streams-architecture-topology.jpg new file mode 100644 index 000000000..f42e8cddf Binary files /dev/null and b/static/41/images/streams-architecture-topology.jpg differ diff --git a/static/41/images/streams-cache-and-commit-interval.png b/static/41/images/streams-cache-and-commit-interval.png new file mode 100644 index 000000000..a663bc623 Binary files /dev/null and b/static/41/images/streams-cache-and-commit-interval.png differ diff --git a/static/41/images/streams-concepts-topology.jpg b/static/41/images/streams-concepts-topology.jpg new file mode 100644 index 000000000..832f6d43a Binary files /dev/null and b/static/41/images/streams-concepts-topology.jpg differ diff --git a/static/41/images/streams-elastic-scaling-1.png b/static/41/images/streams-elastic-scaling-1.png new file mode 100644 index 000000000..7823ac1f8 Binary files /dev/null and b/static/41/images/streams-elastic-scaling-1.png differ diff --git a/static/41/images/streams-elastic-scaling-2.png b/static/41/images/streams-elastic-scaling-2.png new file mode 100644 index 000000000..374b5ff40 Binary files /dev/null and b/static/41/images/streams-elastic-scaling-2.png differ diff --git a/static/41/images/streams-elastic-scaling-3.png b/static/41/images/streams-elastic-scaling-3.png new file mode 100644 index 000000000..0b4adafa0 Binary files /dev/null and b/static/41/images/streams-elastic-scaling-3.png differ diff --git a/static/41/images/streams-interactive-queries-01.png b/static/41/images/streams-interactive-queries-01.png new file mode 100644 index 000000000..d5d5031c3 Binary files /dev/null and b/static/41/images/streams-interactive-queries-01.png differ diff --git a/static/41/images/streams-interactive-queries-02.png b/static/41/images/streams-interactive-queries-02.png new file mode 100644 index 000000000..ea894b62b Binary files /dev/null and b/static/41/images/streams-interactive-queries-02.png differ diff --git a/static/41/images/streams-interactive-queries-03.png b/static/41/images/streams-interactive-queries-03.png new file mode 100644 index 000000000..403e3ae90 Binary files /dev/null and b/static/41/images/streams-interactive-queries-03.png differ diff --git a/static/41/images/streams-interactive-queries-api-01.png b/static/41/images/streams-interactive-queries-api-01.png new file mode 100644 index 000000000..2b4aaed59 Binary files /dev/null and b/static/41/images/streams-interactive-queries-api-01.png differ diff --git a/static/41/images/streams-interactive-queries-api-02.png b/static/41/images/streams-interactive-queries-api-02.png new file mode 100644 index 000000000..e5e7527a3 Binary files /dev/null and b/static/41/images/streams-interactive-queries-api-02.png differ diff --git a/static/41/images/streams-session-windows-01.png b/static/41/images/streams-session-windows-01.png new file mode 100644 index 000000000..2d711d876 Binary files /dev/null and b/static/41/images/streams-session-windows-01.png differ diff --git a/static/41/images/streams-session-windows-02.png b/static/41/images/streams-session-windows-02.png new file mode 100644 index 000000000..6c0382fae Binary files /dev/null and b/static/41/images/streams-session-windows-02.png differ diff --git a/static/41/images/streams-sliding-windows.png b/static/41/images/streams-sliding-windows.png new file mode 100644 index 000000000..fa6d5c3b7 Binary files /dev/null and b/static/41/images/streams-sliding-windows.png differ diff --git a/static/41/images/streams-stateful_operations.png b/static/41/images/streams-stateful_operations.png new file mode 100644 index 000000000..b0fe3de97 Binary files /dev/null and b/static/41/images/streams-stateful_operations.png differ diff --git a/static/41/images/streams-table-duality-01.png b/static/41/images/streams-table-duality-01.png new file mode 100644 index 000000000..4fa4d1bf8 Binary files /dev/null and b/static/41/images/streams-table-duality-01.png differ diff --git a/static/41/images/streams-table-duality-02.png b/static/41/images/streams-table-duality-02.png new file mode 100644 index 000000000..4e805c10f Binary files /dev/null and b/static/41/images/streams-table-duality-02.png differ diff --git a/static/41/images/streams-table-duality-03.png b/static/41/images/streams-table-duality-03.png new file mode 100644 index 000000000..b0b04f591 Binary files /dev/null and b/static/41/images/streams-table-duality-03.png differ diff --git a/static/41/images/streams-table-updates-01.png b/static/41/images/streams-table-updates-01.png new file mode 100644 index 000000000..3a2c35ef3 Binary files /dev/null and b/static/41/images/streams-table-updates-01.png differ diff --git a/static/41/images/streams-table-updates-02.png b/static/41/images/streams-table-updates-02.png new file mode 100644 index 000000000..a0a5b1ff5 Binary files /dev/null and b/static/41/images/streams-table-updates-02.png differ diff --git a/static/41/images/streams-time-windows-hopping.png b/static/41/images/streams-time-windows-hopping.png new file mode 100644 index 000000000..5fcb9d26d Binary files /dev/null and b/static/41/images/streams-time-windows-hopping.png differ diff --git a/static/41/images/streams-time-windows-tumbling.png b/static/41/images/streams-time-windows-tumbling.png new file mode 100644 index 000000000..571ab7942 Binary files /dev/null and b/static/41/images/streams-time-windows-tumbling.png differ diff --git a/static/41/images/streams-welcome.png b/static/41/images/streams-welcome.png new file mode 100644 index 000000000..63918c4b3 Binary files /dev/null and b/static/41/images/streams-welcome.png differ diff --git a/static/41/images/tracking_high_level.png b/static/41/images/tracking_high_level.png new file mode 100644 index 000000000..b6432306a Binary files /dev/null and b/static/41/images/tracking_high_level.png differ diff --git a/static/41/images/transforming_part_1-iframe-placeholder.png b/static/41/images/transforming_part_1-iframe-placeholder.png new file mode 100644 index 000000000..e959f0e97 Binary files /dev/null and b/static/41/images/transforming_part_1-iframe-placeholder.png differ diff --git a/static/41/images/transforming_part_2-iframe-placeholder.png b/static/41/images/transforming_part_2-iframe-placeholder.png new file mode 100644 index 000000000..008ec16bb Binary files /dev/null and b/static/41/images/transforming_part_2-iframe-placeholder.png differ diff --git a/static/41/javadoc/allclasses-index.html b/static/41/javadoc/allclasses-index.html new file mode 100644 index 000000000..6bf220f7f --- /dev/null +++ b/static/41/javadoc/allclasses-index.html @@ -0,0 +1,3862 @@ + + + + +All Classes and Interfaces (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+
+

All Classes and Interfaces

+
+
+
+
+
+
Class
+
Description
+ +
 
+ + + +
 
+ +
+
A convenient base class for configurations to extend.
+
+ +
 
+ +
+
Provides the current status for a connector or a task, along with an identifier for its Connect worker
+
+ +
+
Represents an access control entry.
+
+ +
+
Represents a filter which matches access control entries.
+
+ +
+
A callback interface that the user can implement to trigger custom actions when an acknowledgement completes.
+
+ +
+
The acknowledge type is used with KafkaShareConsumer.acknowledge(ConsumerRecord, AcknowledgeType) to indicate + whether the record was consumed successfully.
+
+ +
+
Represents a binding between a resource pattern and an access control entry.
+
+ +
+
A filter which can match AclBinding objects.
+
+ +
 
+ +
 
+ +
+
Delete result for each ACL binding that matched a delete filter.
+
+ +
+
Represents an operation which an ACL grants or denies permission to perform.
+
+ +
+
Represents whether an ACL grants or denies permissions.
+
+ +
 
+ + + + + +
+
The administrative client for Kafka, which supports managing and inspecting topics, brokers, configurations and ACLs.
+
+ +
+
The base class for in-built admin clients.
+
+ +
+
The AdminClient configuration class, which also contains constants for configuration entry names.
+
+ +
+
The Aggregator interface for aggregating values of the given key.
+
+ +
+
Indicates the operation tried to create an entity that already exists.
+
+ + + + + +
+
A class representing an alter configuration entry containing name, value and operation type.
+
+ +
 
+ +
+
An interface for enforcing a policy on alter configs requests.
+
+ +
+
Class containing the create request parameters.
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
Any API exception that is part of the public protocol and should be a subclass of this class and be part of this + package.
+
+ +
+
Indicates that the error is fatal to the producer, and the application + needs to restart the producer after handling the error.
+
+ +
+
A read-only metadata class representing the state of the application and the current rebalance.
+
+ +
+
Assignment related configs for the Kafka Streams TaskAssignor.
+
+ +
 
+ +
+
An object representing contextual information from the authentication session.
+
+ +
+
This exception indicates that SASL authentication has failed.
+
+ +
+
Request context interface that provides data from request header as well as connection + and authentication information to plugins.
+
+ +
 
+ +
 
+ +
+
Pluggable authorizer interface for Kafka brokers.
+
+ +
+
An exception that indicates that the authorizer is not ready to receive the request yet.
+
+ +
+
Runtime broker configuration metadata provided to authorizers during start up.
+
+ +
+
Sets the auto.offset.reset configuration when + adding a source processor + or when creating KStream or KTable via StreamsBuilder.
+
+ +
+
A SampledStat that maintains a simple average over its samples.
+
+ +
+
Interface for batching restoration of a StateStore + + It is expected that implementations of this class will not call the StateRestoreCallback.restore(byte[], byte[]) method.
+
+ +
 
+ +
 
+ +
+
The Branched class is used to define the optional parameters when building branches with + BranchedKStream.
+
+ +
+
BranchedKStream is an abstraction of a branched record stream of key-value pairs.
+
+ +
 
+ +
+
BrokerJwtValidator is an implementation of JwtValidator that is used + by the broker to perform more extensive validation of the JWT access token that is received + from the client, but ultimately from posting the client credentials to the OAuth/OIDC provider's + token endpoint.
+
+ +
 
+ +
 
+ +
+
Indicates that none of the specified brokers + could be found.
+
+ +
+
This exception is thrown if the producer cannot allocate memory for a record within max.block.ms due to the buffer + being too full.
+
+ +
+
Collection of builtin DslStoreSuppliers for Kafka Streams.
+
+ +
+
A DslStoreSuppliers that supplies all stores backed by an in-memory map
+
+ +
+
A DslStoreSuppliers that supplies all stores backed by RocksDB
+
+ +
 
+ +
 
+ +
 
+ +
+
ByteBufferSerializer always rewinds the position of the input buffer to zero for + serialization.
+
+ +
 
+ +
 
+ +
+
A callback interface that the user can implement to allow code to execute when the request is complete.
+
+ + + +
+
Checkpoint records emitted by MirrorCheckpointConnector.
+
+ +
+
A detailed description of a single classic group in the cluster.
+
+ +
+
The classic group state.
+
+ +
+
ClientCredentialsJwtRetriever is a JwtRetriever that performs the steps to request + a JWT from an OAuth/OIDC identity provider using the client_credentials grant type.
+
+ +
+
Encapsulates the client instance id used for metrics collection by + producers, consumers, and the admin client used by Kafka Streams.
+
+ +
+
ClientJwtValidator is an implementation of JwtValidator that is used + by the client to perform some rudimentary validation of the JWT access token that is received + as part of the response from posting the client credentials to the OAuth/OIDC provider's + token endpoint.
+
+ +
Deprecated.
+ +
+
Describes a configuration alteration to be made to a client quota entity.
+
+ +
 
+ +
+
Quota callback interface for brokers and controllers that enables customization of client quota computation.
+
+ +
+
Describes a client quota entity, which is a mapping of entity types to their names.
+
+ +
+
The metadata for an entity for which quota is configured.
+
+ +
+
Interface representing a quota configuration entity.
+
+ +
+ +
+ +
+
Describes a client quota entity filter.
+
+ +
+
Describes a component for applying a client quota filter.
+
+ +
+
Types of quotas that may be configured on brokers for client requests.
+
+ +
+
A MetricsReporter may implement this interface to indicate support for collecting client + telemetry on the server side.
+
+ +
+
A client telemetry payload as sent by the client to the telemetry receiver.
+
+ +
+
ClientTelemetryReceiver defines the behaviour for telemetry receiver on the broker side + which receives client telemetry metrics.
+
+ +
 
+ +
+
Enum to specify the group membership operation upon leaving group.
+
+ +
+
An immutable representation of a subset of the nodes, topics, and partitions in the Kafka cluster.
+
+ +
 
+ +
+
The ClusterResource class encapsulates metadata for a Kafka cluster.
+
+ +
+
A callback interface that users can implement when they wish to get notified about changes in the Cluster metadata.
+
+ +
+
CogroupedKStream is an abstraction of one or more grouped record streams of + key-value pairs.
+
+ +
+
Stores can register this callback to be notified upon successful commit.
+
+ +
+
This exception is raised when an offset commit with KafkaConsumer.commitSync() fails + with an unrecoverable error.
+
+ +
+
A compound stat is a stat where a single measurement and associated data structure feeds many metrics.
+
+ +
 
+ +
 
+ +
+
A configuration object containing the configuration entries for a resource.
+
+ +
 
+ +
+
A callback passed to ConfigProvider for subscribing to changes.
+
+ +
+
Configuration data from a ConfigProvider.
+
+ +
+
This class is used for specifying the set of expected configurations.
+
+ +
 
+ +
 
+ +
 
+ +
+
The importance level for a configuration
+
+ +
 
+ +
 
+ +
 
+ +
 
+ +
 
+ +
+
Validation logic for numeric ranges
+
+ +
+
This is used by the ConfigDef.validate(Map) to get valid values for a configuration given the current + configuration values in order to perform full configuration validation and visibility modification.
+
+ +
+
The type for a configuration value
+
+ +
+
Validation logic the user may provide to perform single configuration validation.
+
+ +
 
+ +
 
+ +
+
The width of a configuration value
+
+ +
+
A class representing a configuration entry containing name, value and additional metadata.
+
+ +
+
Source of configuration entries.
+
+ +
+
Class representing a configuration synonym of a ConfigEntry.
+
+ +
+
Data type of configuration entry.
+
+ +
+
Thrown if the user supplies an invalid configuration
+
+ +
+
A provider of configuration data, which may optionally support subscriptions to configuration changes.
+
+ +
+
A class representing resources that have configs.
+
+ +
+
Type of resource.
+
+ +
+
This class wraps a set of ConfigProvider instances and uses them to perform + transformations.
+
+ +
+
The result of a transformation from ConfigTransformer.
+
+ +
+
A Mix-in style interface for classes that are instantiated by reflection and need to take configuration parameters
+
+ +
 
+ +
+
Provides immutable Connect cluster information, such as the ID of the backing Kafka cluster.
+
+ +
+
Provides the ability to lookup connector metadata, including status and configurations, as well + as immutable cluster information such as Kafka cluster ID.
+
+ +
+
Provides a set of StoreBuilders that will be automatically added to the topology and connected to the + associated processor.
+
+ +
+
ConnectException is the top-level exception type generated by Kafka Connect and connector implementations.
+
+ +
+
A basic Headers implementation.
+
+ +
+
+ Connectors manage integration of Kafka Connect with another system, either as an input that ingests + data into Kafka or an output that passes data to an external system.
+
+ +
+
An interface for enforcing a policy on overriding of Kafka client configs via the connector configs.
+
+ +
 
+ +
 
+ +
+
ConnectorContext allows Connectors to proactively interact with the Kafka Connect runtime.
+
+ +
+
Provides basic health information about the connector and its tasks.
+
+ +
+
Describes the status, worker ID, and any errors associated with a connector.
+
+ +
+
An enum to represent the level of support for connector-defined transaction boundaries.
+
+ +
+
Enum definition that identifies the type of the connector.
+
+ +
+
Utilities that connector implementations might find useful.
+
+ +
+
+ Base class for records containing data to be copied to/from Kafka.
+
+ +
+
A plugin interface to allow registration of new JAX-RS resources like Filters, REST endpoints, providers, etc.
+
+ +
+
The interface provides the ability for ConnectRestExtension implementations to access the JAX-RS + Configurable and cluster state ConnectClusterState.
+
+ +
 
+ +
+
The Consumed class is used to define the optional parameters when using StreamsBuilder to + build instances of KStream, KTable, and GlobalKTable.
+
+ +
 
+ +
+
The consumer configuration keys
+
+ +
+
A detailed description of a single consumer group in the cluster.
+
+ +
Deprecated. +
Since 4.1.
+
+ +
+
A metadata struct containing the consumer group information.
+
+ +
+
Server-side partition assignor for consumer groups used by the GroupCoordinator.
+
+ +
Deprecated. +
Since 4.0.
+
+ +
+
A plugin interface that allows you to intercept (and possibly mutate) records received by the consumer.
+
+ +
+
This interface is used to define custom partition assignment for use in + KafkaConsumer.
+
+ +
 
+ +
 
+ +
 
+ +
+
The rebalance protocol defines partition assignment and revocation semantics.
+
+ +
 
+ +
+
A callback interface that the user can implement to trigger custom actions when the set of partitions assigned to the + consumer changes.
+
+ +
+
A key/value pair to be received from Kafka.
+
+ +
+
A container that holds the list ConsumerRecord per partition for a + particular topic.
+
+ +
+
An abstract implementation of FixedKeyProcessor that manages the FixedKeyProcessorContext instance.
+
+ +
+
An abstract implementation of Processor that manages the ProcessorContext instance.
+
+ +
 
+ +
+
The Converter interface provides support for translating between Kafka Connect's runtime data format + and byte[].
+
+ +
+
Abstract class that defines the configuration options for Converter and HeaderConverter instances.
+
+ +
+
The type of Converter and HeaderConverter.
+
+ +
+
A cooperative version of the AbstractStickyAssignor.
+
+ +
+
In the context of the group coordinator, the broker returns this error code for any coordinator request if + it is still loading the group metadata (e.g.
+
+ +
+
In the context of the group coordinator, the broker returns this error code for metadata or offset commit + requests if the group metadata topic has not been created yet.
+
+ +
+
This exception indicates a record has failed its internal CRC check, this generally indicates network or disk + corruption.
+
+ + + +
+
The result of the Admin.createAcls(Collection) call.
+
+ + + + + + + +
+
The result of the Admin.createPartitions(Map) call.
+
+ +
+
An interface for enforcing a policy on create topics requests.
+
+ +
+
Class containing the create request parameters.
+
+ + + + + +
 
+ +
+
A non-sampled version of WindowedCount maintained over all time.
+
+ +
+
An non-sampled cumulative total maintained over all time.
+
+ +
+
Base class for all Kafka Connect data API exceptions.
+
+ +
+
+ A date representing a calendar day with no time of day or timezone.
+
+ +
+
+ An arbitrary-precision signed decimal number.
+
+ +
+
A decoder is a method of turning byte arrays into objects.
+
+ +
+
The default implementation does nothing, just returns the same byte array it takes in.
+
+ +
+
DefaultJwtRetriever instantiates and delegates JwtRetriever API calls to an embedded implementation + based on configuration: + + + + If the value of sasl.oauthbearer.token.endpoint.url is set to a value that starts with the + file protocol (e.g.
+
+ +
+
This JwtValidator uses the delegation approach, instantiating and delegating calls to a + more concrete implementation.
+
+ +
+
ProductionExceptionHandler that always instructs streams to fail when an exception + happens while attempting to produce result records.
+
+ +
+
Default implementation of ReplicationPolicy which prepends the source cluster alias to + remote topic names.
+
+ +
+
A class representing a delegation token.
+
+ +
 
+ +
 
+ +
 
+ +
 
+ +
 
+ +
+
Options for the Admin.deleteAcls(Collection) call.
+
+ +
+
The result of the Admin.deleteAcls(Collection) call.
+
+ +
+
A class containing either the deleted ACL binding or an exception if the delete failed.
+
+ +
+
A class containing the results of the delete ACLs operation.
+
+ + + + + +
+ +
+ + + +
+
Represents information about deleted records
+
+ + + +
+
The result of the Admin.deleteRecords(Map) call.
+
+ + + + + + + + + + + + + + + + + + + +
+
The result of the Admin.deleteTopics(Collection) call.
+
+ + + +
+
The result of the Admin.describeAcls(AclBindingFilter) call.
+
+ + + + + + + + + +
+ +
+ +
+
The result of the Admin.describeCluster() call.
+
+ + + +
+
The result of the Admin.describeConfigs(Collection) call.
+
+ + + + + + + + + + + + + + + +
+
The result of the Admin.describeLogDirs(Collection) call.
+
+ + + + + + + +
 
+ +
 
+ + + + + +
 
+ + + + + + + + + + + +
+
The result of the Admin.describeTopics(Collection) call.
+
+ + + +
 
+ + + +
+
The result of the Admin.describeUserScramCredentials() call.
+
+ +
+
Interface that specifies how an exception from source node deserialization + (e.g., reading from Kafka) should be handled.
+
+ +
+
Enumeration that describes the response from the exception handler.
+
+ +
+
An interface for converting bytes to objects.
+
+ +
+
An implementation of ConfigProvider based on a directory of files.
+
+ +
+
Server disconnected before a request could be completed.
+
+ +
 
+ +
 
+ +
+
DslKeyValueParams is a wrapper class for all parameters that function + as inputs to DslStoreSuppliers.keyValueStore(DslKeyValueParams).
+
+ +
+
DslSessionParams is a wrapper class for all parameters that function + as inputs to DslStoreSuppliers.sessionStore(DslSessionParams).
+
+ +
+
DslStoreSuppliers defines a grouping of factories to construct + stores for each of the types of state store implementations in Kafka + Streams.
+
+ +
+
DslWindowParams is a wrapper class for all parameters that function + as inputs to DslStoreSuppliers.windowStore(DslWindowParams).
+
+ +
 
+ +
+
Exception thrown due to a request that illegally refers to the same resource twice + (for example, trying to both create and delete the same SCRAM credential for a particular user in a single request).
+
+ +
 
+ +
 
+ +
 
+ + + + + +
+
The result of Admin.electLeaders(ElectionType, Set, ElectLeadersOptions) + + The API of this class is evolving, see Admin for details.
+
+ +
 
+ +
+
This interface controls the strategy that can be used to control how we emit results in a processor.
+
+ +
 
+ +
+
Represents a broker endpoint.
+
+ +
+
Identifies the endpoint type, as specified by KIP-919.
+
+ +
+
An implementation of ConfigProvider based on environment variables.
+
+ +
+
Component that a SinkTask can use to report problematic records (and their corresponding problems) as it + writes them through SinkTask.put(java.util.Collection).
+
+ +
+
This interface allows user code to inspect the context of a record that has failed during processing.
+
+ +
+
An enum to represent the level of support for exactly-once semantics from a source connector.
+
+ + + + + +
+
Retrieves embedded metadata timestamps from Kafka messages.
+
+ +
+
This enumeration type captures the various top-level reasons that a particular + partition of a store would fail to execute a query.
+
+ +
+
Encapsulates details about finalized as well as supported features.
+
+ +
+
Encapsulates details about an update to a finalized feature.
+
+ +
 
+ +
 
+ +
 
+ +
+
The request contained a leader epoch which is smaller than that on the broker that received the + request.
+
+ +
 
+ +
+
Thrown when the share coordinator rejected the request because the share-group state epoch did not match.
+
+ + + +
+
The result of the Admin.fenceProducers(Collection) call.
+
+ +
 
+ +
 
+ +
+
+ A field in a Struct, consisting of a field name, index, and Schema for the field value.
+
+ +
+
An implementation of ConfigProvider that represents a Properties file.
+
+ +
+
FileJwtRetriever is an JwtRetriever that will load the contents + of a file, interpreting them as a JWT access key in the serialized form.
+
+ +
+
Represents a range of version levels supported by every broker in a cluster for some feature.
+
+ +
+
A processor of key-value pair records where keys are immutable.
+
+ +
+
Processor context interface for FixedKeyRecord.
+
+ +
+
A processor supplier that can create one or more FixedKeyProcessor instances.
+
+ +
+
A data class representing an incoming record with fixed key for processing in a FixedKeyProcessor + or a record to forward to downstream processors via FixedKeyProcessorContext.
+
+ +
 
+ +
 
+ +
+
The ForeachAction interface for performing an action on a key-value + pair.
+
+ +
Deprecated. +
Since 4.0 and should not be used any longer.
+
+ +
+
ForwardingAdmin is the default value of forwarding.admin.class in MirrorMaker.
+
+ +
+
A CompoundStat that represents a normalized distribution with a Frequency metric for each + bucketed value.
+
+ +
+
Definition of a frequency metric used in a Frequencies compound statistic.
+
+ +
+
A gauge metric is an instantaneous reading of a particular value.
+
+ +
+
GlobalKTable is an abstraction of a changelog stream from a primary-keyed table.
+
+ +
+
The partition assignment for a consumer group.
+
+ +
 
+ +
+
The class that is used to capture the key and value Serdes and set the part of name used for + repartition topics when performing KStream.groupBy(KeyValueMapper, Grouped), KStream.groupByKey(Grouped), or KTable.groupBy(KeyValueMapper, Grouped) operations.
+
+ +
 
+ +
+
A listing of a group in the cluster.
+
+ +
+
Indicates that a group is already at its configured maximum capacity and cannot accommodate more members
+
+ +
 
+ +
 
+ +
 
+ +
+
The group metadata specifications required to compute the target assignment.
+
+ +
+
The group state.
+
+ +
 
+ +
 
+ +
 
+ +
+
A Header is a key-value pair, and multiple headers can be included with the key, value, and timestamp in each Kafka message.
+
+ +
+
The HeaderConverter interface provides support for translating between Kafka Connect's runtime data format + and byte[].
+
+ +
 
+ +
+
A mutable ordered collection of Header objects.
+
+ +
+
A function to transform the supplied Header.
+
+ +
+
Heartbeat records emitted by MirrorHeartbeatConnector.
+
+ +
 
+ +
+
An algorithm for determining the bin in which a value is to be placed as well as calculating the upper end + of each bin.
+
+ +
+
A scheme for calculating the bins where the width of each bin is a constant determined by the range of values + and the number of bins.
+
+ +
+
A scheme for calculating the bins where the width of each bin is one more than the previous bin, and therefore + the bin widths are increasing at a linear rate.
+
+ +
+
Represents a user defined endpoint in a KafkaStreams application.
+
+ +
+
Alternative implementation of ReplicationPolicy that does not rename remote topics.
+
+ +
 
+ +
+
This exception indicates unexpected requests prior to SASL authentication.
+
+ +
+
Indicates that a method has been invoked illegally or at an invalid time by a connector or task.
+
+ +
 
+ +
 
+ +
 
+ +
 
+ +
 
+ +
+
The Initializer interface for creating an initial value in aggregations.
+
+ +
+
The integer decoder translates bytes into integers.
+
+ +
 
+ +
 
+ +
+
Annotation to inform users of how much to rely on a particular package, class or method not changing over time.
+
+ +
+
Compatibility may be broken at minor release (i.e.
+
+ +
+
Compatibility is maintained in major, minor and patch releases with one exception: compatibility may be broken + in a major release (i.e.
+
+ +
+
No guarantee is provided as to reliability or stability across any level of release granularity.
+
+ +
 
+ +
+
An unchecked wrapper for InterruptedException
+
+ +
 
+ +
 
+ +
 
+ +
 
+ +
 
+ +
+
An exception that may indicate the client's metadata is out of date
+
+ +
+
Thrown when the offset for a set of partitions is invalid (either undefined or out of range), + and no reset policy has been configured.
+
+ +
+
Thrown when the offset for a set of partitions is invalid (either undefined or out of range), + and no reset policy has been configured.
+
+ +
 
+ +
 
+ +
 
+ +
+
This exception indicates that the produce request sent to the partition leader + contains a non-matching producer epoch.
+
+ +
 
+ +
+
Thrown when the acknowledgement of delivery of a record could not be completed because the record + state is invalid.
+
+ +
+
Thrown when a broker registration request is considered invalid by the controller.
+
+ +
+
Thrown when a regular expression received in a request is not valid.
+
+ +
 
+ +
 
+ +
+
Thrown when a request breaks basic wire protocol rules.
+
+ +
 
+ +
 
+ +
+
Thrown when the share session epoch is invalid.
+
+ +
+
Indicates that there was a problem when trying to access a StateStore.
+
+ +
+
Indicates that the specific state store being queried via + StoreQueryParameters used a partitioning that is not assigned to this instance.
+
+ +
+
Indicate the timestamp of a record is invalid.
+
+ +
+
The client has attempted to perform an operation on an invalid topic.
+
+ +
 
+ +
+
The transaction coordinator returns this error code if the timeout received via the InitProducerIdRequest is larger than + the `transaction.max.timeout.ms` config value.
+
+ +
 
+ +
 
+ +
 
+ +
+
Register metrics in JMX as dynamic mbeans based on the metric names
+
+ +
+
The Joined class represents optional params that can be passed to + KStream#join(KTable,...) and + KStream#leftJoin(KTable,...) operations.
+
+ +
+
The window specifications used for joins.
+
+ +
+
JwtBearerJwtRetriever is a JwtRetriever that performs the steps to request + a JWT from an OAuth/OIDC identity provider using the urn:ietf:params:oauth:grant-type:jwt-bearer + grant type.
+
+ +
+
A JwtRetriever is the internal API by which the login module will + retrieve an access token for use in authorization by the broker.
+
+ +
+
A JwtRetrieverException is thrown in cases where the JWT cannot be retrieved.
+
+ +
+
An instance of JwtValidator acts as a function object that, given an access + token in base-64 encoded JWT format, can parse the data, perform validation, and construct an + OAuthBearerToken for use by the caller.
+
+ +
+
A JwtValidatorException is thrown in cases where the validity of a JWT cannot be + determined.
+
+ +
+
The default implementation of Admin.
+
+ +
+
KafkaClientSupplier can be used to provide custom Kafka clients to a KafkaStreams instance.
+
+ +
+
A client that consumes records from a Kafka cluster.
+
+ +
+
The base class of all other Kafka exceptions
+
+ +
+
A flexible future which supports call chaining and other asynchronous programming patterns.
+
+ +
+
A function which takes objects of type A and returns objects of type B.
+
+ +
+
A consumer of two different types of object.
+
+ +
 
+ +
+
An implementation of MetricsContext, it encapsulates required metrics context properties for Kafka services and clients
+
+ +
+
Principals in Kafka are defined by a type and a name.
+
+ +
+
Pluggable principal builder interface which supports both SSL authentication through + SslAuthenticationContext and SASL through SaslAuthenticationContext.
+
+ +
+
Serializer/Deserializer interface for KafkaPrincipal for the purpose of inter-broker forwarding.
+
+ +
+
A Kafka client that publishes records to the Kafka cluster.
+
+ +
+
A client that consumes records from a Kafka cluster using a share group.
+
+ +
+
Miscellaneous disk-related IOException occurred when handling a request.
+
+ +
+
A Kafka client that allows for performing continuous computation on input coming from one or more input topics and + sends output to zero, one, or more output topics.
+
+ +
+
Class that handles options passed in case of KafkaStreams instance scale down
+
+ +
+
Kafka Streams states are the possible state that a Kafka Streams instance can be in.
+
+ +
+
Listen to KafkaStreams.State change events.
+
+ +
+
A simple container class for the assignor to return the desired placement of active and standby tasks on + KafkaStreams clients.
+
+ +
 
+ +
 
+ +
+
A read-only metadata class representing the current state of each KafkaStreams client with at least one StreamThread participating in this rebalance
+
+ +
+
Interactive query for retrieving a single record based on its key.
+
+ +
+
Represents all the metadata related to a key, where a particular key resides in a KafkaStreams application.
+
+ +
+
A key-value pair defined for a single Kafka Streams record.
+
+ +
+
A store supplier that can be used to create one or more KeyValueStore<Bytes, byte[]> instances of type <Bytes, byte[]>.
+
+ +
+
Iterator interface of KeyValue.
+
+ +
+
The KeyValueMapper interface for mapping a key-value pair to a new value of arbitrary type.
+
+ +
+
A key-value store that supports put/get/delete and range queries.
+
+ +
+
KGroupedStream is an abstraction of a grouped record stream of key-value pairs.
+
+ +
+
KGroupedTable is an abstraction of a re-grouped changelog stream from a primary-keyed table, + on a different grouping key than the original primary key.
+
+ +
+
KStream is an abstraction of a record stream of key-value pairs, i.e., each record is + an independent entity/event in the real world.
+
+ +
+
KTable is an abstraction of a changelog stream from a primary-keyed table.
+
+ +
+
Encapsulates information about lag, at a store partition replica (active or standby).
+
+ +
+
There is no currently available leader for the given partition (either because a leadership election is in progress + or because all replicas are down).
+
+ +
Deprecated. +
Since 4.1.
+
+ +
Deprecated. +
Since 4.1.
+
+ + + +
+
The result of the Admin.listConfigResources() call.
+
+ + + + + +
+
Specification of consumer group offsets to list using Admin.listConsumerGroupOffsets(java.util.Map).
+
+ +
Deprecated. +
Since 4.1.
+
+ +
Deprecated. +
Since 4.1.
+
+ +
 
+ +
+
The leader does not have an endpoint corresponding to the listener on which metadata was requested.
+
+ +
+
Options for Admin.listGroups().
+
+ +
+
The result of the Admin.listGroups() call.
+
+ +
+ +
+ +
+
The result of the Admin.listOffsets(Map) call.
+
+ +
 
+ + + + + +
 
+ + + + + +
+
Specification of share group offsets to list using Admin.listShareGroupOffsets(Map, ListShareGroupOffsetsOptions).
+
+ + + + + +
+
Specification of streams group offsets to list using Admin.listStreamsGroupOffsets(Map, ListStreamsGroupOffsetsOptions).
+
+ +
+
Options for Admin.listTopics().
+
+ +
+
The result of the Admin.listTopics() call.
+
+ +
+ +
+ +
+
The result of the Admin.listTransactions() call.
+
+ +
+
Indicates that the state store directory lock could not be acquired because another thread holds the lock.
+
+ +
+
Deserialization handler that logs a deserialization exception and then + signals the processing pipeline to continue processing more records.
+
+ +
+
Processing exception handler that logs a processing exception and then + signals the processing pipeline to continue processing more records.
+
+ +
+
Deserialization handler that logs a deserialization exception and then + signals the processing pipeline to stop processing more records and fail.
+
+ +
+
Processing exception handler that logs a processing exception and then + signals the processing pipeline to stop processing more records and fail.
+
+ +
+
Retrieves embedded metadata timestamps from Kafka messages.
+
+ +
+
A description of a log directory on a particular broker.
+
+ +
+
Thrown when a request is made for a log directory that is not present on the broker
+
+ +
+
Login interface for authentication.
+
+ +
+
This class holds definitions for log level configurations related to Kafka's application logging.
+
+ +
+
This represents all the required data and indexes for a specific log segment that needs to be stored in the remote + storage.
+
+ +
+
In the event of an unclean leader election, the log will be truncated, + previously committed data will be lost, and new data will be written + over these offsets.
+
+ +
+
The long decoder translates bytes into longs.
+
+ +
 
+ +
 
+ +
+
Used to describe how a StateStore should be materialized.
+
+ +
 
+ +
+
A SampledStat that gives the max over its samples.
+
+ +
+
A measurable quantity that can be registered as a metric
+
+ +
+
A MeasurableStat is a Stat that is also Measurable (i.e.
+
+ +
+
A description of the assignments of a specific group member.
+
+ +
+
The partition assignment for a consumer group member.
+
+ +
+
A detailed description of a single group member in the cluster.
+
+ +
 
+ +
+
Interface representing the subscription metadata for a group member.
+
+ +
+
A struct containing information about the member to be removed.
+
+ +
+
The interface for merging aggregate values for SessionWindows with the given key.
+
+ +
+
This interface allows to define Formatters that can be used to parse and format records read by a + Consumer instance for display.
+
+ +
+
A compound stat that includes a rate metric and a cumulative total metric.
+
+ +
+
A metric tracked for monitoring purposes.
+
+ +
+
Configuration values for metrics
+
+ +
+
The MetricName class encapsulates a metric's name, logical group and its related attributes.
+
+ +
+
A template for a MetricName.
+
+ +
+
A registry of sensors and metrics.
+
+ +
+
MetricsContext encapsulates additional contextLabels about metrics exposed via a + MetricsReporter
+
+ +
+
A plugin interface to allow things to listen as new metrics are created so they can be reported.
+
+ +
+
Super-interface for Measurable or Gauge that provides + metric values.
+
+ +
+
A SampledStat that gives the min over its samples.
+
+ +
+
Client to interact with MirrorMaker internal topics (checkpoints, heartbeats) on a given cluster.
+
+ +
+
Configuration required for MirrorClient to talk to a given target cluster.
+
+ +
 
+ +
 
+ +
+
This connector provides support for mocking certain connector behaviors.
+
+ +
+
A mock of the Consumer interface you can use for testing code that uses Kafka.
+
+ +
+
MockProcessorContext is a mock of ProcessorContext for users to test their Processor + implementations.
+
+ +
Deprecated. +
Since 4.0.
+
+ +
 
+ +
 
+ +
+
MockProcessorContext.CapturedPunctuator holds captured punctuators, along with their scheduling information.
+
+ +
+
MockProcessorContext.CapturedPunctuator holds captured punctuators, along with their scheduling information.
+
+ +
+
A mock of the producer interface you can use for testing code that uses Kafka.
+
+ +
+
A mock of the ShareConsumer interface you can use for testing code that uses Kafka.
+
+ +
+
Mock sink implementation which delegates to MockConnector.
+
+ +
+
Task implementation for MockSinkConnector.
+
+ +
+
Mock source implementation which delegates to MockConnector.
+
+ +
+
Task implementation for MockSourceConnector.
+
+ +
+
Plugins can implement this interface to register their own metrics.
+
+ +
+
Interactive query for retrieving a set of records with the same specified key and different timestamps within the specified time range.
+
+ +
 
+ +
+
A misc.
+
+ +
 
+ +
+
A new partition reassignment, which can be applied via Admin.alterPartitionReassignments(Map, AlterPartitionReassignmentsOptions).
+
+ +
+
Describes new partitions for a particular topic in a call to Admin.createPartitions(Map).
+
+ +
+
A new topic to be created via Admin.createTopics(Collection).
+
+ +
+
Information about a Kafka node
+
+ +
+
Indicates that there is no stored offset for a partition and no defined offset + reset policy.
+
+ +
+
Thrown if a reassignment cannot be cancelled because none is in progress.
+
+ +
 
+ +
+
In the context of the group coordinator, the broker returns this error code if it receives an offset fetch + or commit request for a group it's not the coordinator of.
+
+ +
+
Number of insync replicas for the partition is lower than min.insync.replicas This exception is raised when the low + ISR size is discovered *after* the message was already appended to the log.
+
+ +
+
Number of insync replicas for the partition is lower than min.insync.replicas
+
+ +
+
Indicates that an operation attempted to modify or delete a connector or task that is not present on the worker.
+
+ +
+
Broker returns this error if a request could not be processed because the broker is not the leader + or follower for a topic partition.
+
+ +
+
A Callback for use by the SaslServer implementation when it + needs to validate the SASL extensions for the OAUTHBEARER mechanism + Callback handlers should use the OAuthBearerExtensionsValidatorCallback.valid(String) + method to communicate valid extensions back to the SASL server.
+
+ +
+
+ OAuthBearerLoginCallbackHandler is an AuthenticateCallbackHandler that + accepts OAuthBearerTokenCallback and SaslExtensionsCallback callbacks to + perform the steps to request a JWT from an OAuth/OIDC provider using the + client_credentials.
+
+ +
+
The LoginModule for the SASL/OAUTHBEARER mechanism.
+
+ +
+
The b64token value as defined in + RFC 6750 Section + 2.1 along with the token's specific scope and lifetime and principal + name.
+
+ +
+
A Callback for use by the SaslClient and Login + implementations when they require an OAuth 2 bearer token.
+
+ +
+
A Callback for use by the SaslServer implementation when it + needs to provide an OAuth 2 bearer token compact serialization for + validation.
+
+ +
+
+ OAuthBearerValidatorCallbackHandler is an AuthenticateCallbackHandler that + accepts OAuthBearerValidatorCallback and OAuthBearerExtensionsValidatorCallback + callbacks to implement OAuth/OIDC validation.
+
+ +
+
The Kafka offset commit API allows users to provide additional metadata (in the form of a string) + when an offset is committed.
+
+ +
+
A container class for offset and timestamp.
+
+ +
+
A callback interface that the user can implement to trigger custom actions when a commit request completes.
+
+ +
+
The client has tried to save its offset with associated metadata larger than the maximum size allowed by the server.
+
+ +
 
+ +
+
Indicates that the leader is not able to guarantee monotonically increasing offsets + due to the high watermark lagging behind the epoch start offset after a recent leader election
+
+ +
+
No reset policy has been defined, and the offsets for these partitions are either larger or smaller + than the range of offsets the server has for the given partition.
+
+ +
+
No reset policy has been defined, and the offsets for these partitions are either larger or smaller + than the range of offsets the server has for the given partition.
+
+ +
Deprecated. +
Since 4.0.
+
+ +
+
This class allows to specify the desired offsets when using KafkaAdminClient.listOffsets(Map, ListOffsetsOptions)
+
+ +
 
+ +
 
+ +
 
+ +
 
+ +
 
+ +
 
+ +
+
+ OffsetStorageReader provides access to the offset storage used by sources.
+
+ +
+
Indicates that the broker did not attempt to execute this operation.
+
+ +
+
This exception indicates that the broker received an unexpected sequence number from the producer, + which means that data may have been lost.
+
+ +
+
Server-side partition assignor used by the GroupCoordinator.
+
+ + + +
+
Partitioner Interface +
+ Implement Monitorable to enable the partitioner to register metrics.
+
+ +
+
This is used to describe per-partition state in the MetadataResponse.
+
+ +
+
A partition reassignment, which has been listed via Admin.listPartitionReassignments().
+
+ +
+
Resource pattern type.
+
+ +
 
+ +
+
A compound stat that reports one or more percentiles
+
+ +
 
+ +
 
+ +
 
+ +
 
+ +
+
This allows plugins to register metrics and sensors.
+
+ +
+
Exception thrown if a create topics request does not satisfy the configured policy for a topic.
+
+ +
+
A representation of a position vector with respect to a set of topic partitions.
+
+ +
+
A class bounding the processing state Position during queries.
+
+ +
 
+ +
+
A predicate on records.
+
+ +
+
The Predicate interface represents a predicate (boolean-valued function) of a KeyValue pair.
+
+ +
 
+ +
+
Class containing the state of a transaction after it has been prepared for a two-phase commit.
+
+ +
+
Exception used to indicate a kafka principal deserialization failure during request forwarding.
+
+ +
+
An object to define the options used when printing a KStream.
+
+ +
+
A simple wrapper around UUID that abstracts a Process ID
+
+ +
+
Processor context interface.
+
+ +
+
An interface that allows user code to inspect a record that has failed processing
+
+ +
 
+ +
+
A processor of key-value pair records.
+
+ +
+
Processor context interface for Record.
+
+ +
+
Processor context interface.
+
+ +
+
Indicates a processor state operation (e.g.
+
+ +
+
A processor supplier that can create one or more Processor instances.
+
+ +
+
Wrapper class that can be used to inject custom wrappers around the processors of their application topology.
+
+ +
+
This class is used to provide the optional parameters when producing to new topics + using KStream.to(String, Produced).
+
+ +
+
The interface for the KafkaProducer
+
+ +
+
Configuration for the Kafka Producer.
+
+ +
+
This fatal exception indicates that another producer with the same transactional.id has been + started.
+
+ +
+
A plugin interface that allows you to intercept (and possibly mutate) the records received by the producer before + they are published to the Kafka cluster.
+
+ +
+
A key/value pair to be sent to Kafka.
+
+ +
 
+ +
+
Interface that specifies how an exception when attempting to produce a result to + Kafka should be handled.
+
+ +
 
+ +
 
+ +
+
Controls what notion of time is used for punctuation scheduled via + schedule: + + STREAM_TIME - uses "stream time", which is advanced by the processing of messages + in accordance with the timestamp as extracted by the TimestampExtractor in use.
+
+ +
+
A functional interface used as an argument to + ProcessingContext.schedule(Duration, PunctuationType, Punctuator).
+
+ +
+
Marker interface that all interactive queries must implement (see KafkaStreams.query(StateQueryRequest)).
+
+ +
+
Used to enable querying of custom StateStore types via the KafkaStreams API.
+
+ +
+
Provides access to the QueryableStoreTypes provided with KafkaStreams.
+
+ +
 
+ +
 
+ +
 
+ +
+
Runtime configuration parameters
+
+ +
+
Container for a single partition's result when executing a StateQueryRequest.
+
+ +
+
This class is used to describe the state of the quorum received in DescribeQuorumResponse.
+
+ +
 
+ +
 
+ +
+
An upper or lower bound for metrics
+
+ +
+
Thrown when a sensor records a value that causes a metric to go outside the bounds configured as its quota
+
+ +
+
An endpoint for a raft quorum voter.
+
+ +
+
The range assignor works on a per-topic basis.
+
+ +
+
Interactive query for issuing range queries and scans over KeyValue stores.
+
+ +
+
The rate of the given quantity.
+
+ +
+
A key-value store that only supports read operations.
+
+ +
+
A session store that only supports read operations.
+
+ +
+
A window store that only supports read operations.
+
+ +
+
Thrown if a request cannot be completed because a partition reassignment is in progress.
+
+ +
 
+ +
 
+ +
+
Interface for reconfigurable classes that support dynamic configuration.
+
+ +
+
A data class representing an incoming record for processing in a Processor + or a record to forward to downstream processors via ProcessorContext.
+
+ +
+
This record batch is larger than the maximum allowable size
+
+ +
+
The context associated with the current record being processed by + a Processor
+
+ +
+
This exception is raised for any error that occurs while deserializing records received by the consumer using + the configured Deserializer.
+
+ +
 
+ +
+
The metadata for a record that has been acknowledged by the server
+
+ +
 
+ +
+
Typical implementations of this interface convert data from an `InputStream` received via `readRecords` into a + iterator of `ProducerRecord` instance.
+
+ +
+
Describe records to delete in a call to Admin.deleteRecords(Map)
+
+ +
+
This record is larger than the maximum allowable size
+
+ +
+
The Reducer interface for combining two values of the same type into a new value.
+
+ +
+
Indicates that an operation failed due to outdated or invalid metadata, + requiring a refresh (e.g., refreshing producer metadata) before retrying the request.
+
+ +
+
Convenience tool for multi-cluster environments.
+
+ +
+
Base class for remote log metadata objects like RemoteLogSegmentMetadata, RemoteLogSegmentMetadataUpdate, + and RemotePartitionDeleteMetadata.
+
+ +
+
This interface provides storing and fetching remote log segment metadata with strongly consistent semantics.
+
+ +
+
This class represents a universally unique identifier associated to a topic partition's log segment.
+
+ +
+
It describes the metadata about a topic partition's remote log segment in the remote storage.
+
+ +
+
Custom metadata from a RemoteStorageManager plugin.
+
+ +
+
It describes the metadata update about the log segment in the remote storage.
+
+ +
+
This enum indicates the state of the remote log segment.
+
+ +
+
This class represents the metadata about the remote partition.
+
+ +
+
This enum indicates the deletion state of the remote topic partition.
+
+ +
+
Exception thrown when a resource is not found on the remote storage.
+
+ +
+
Exception thrown when there is a remote storage error.
+
+ +
+
This interface provides the lifecycle of remote log segments that includes copy, fetch, and delete from remote + storage.
+
+ +
+
Type of the index file.
+
+ +
+
This class contains the metrics related to tiered storage feature, which is to have a centralized + place to store them, so that we can verify all of them easily.
+
+ + + + + + + + + + + + + +
+
This class is used to provide the optional parameters for internal repartition topics.
+
+ +
+
A description of a replica on a particular broker.
+
+ +
+
The replica is not available for the requested topic partition.
+
+ +
+
An interface used by the MirrorMaker connectors to manage topics names between source and target clusters.
+
+ +
+
Represents a cluster resource with a tuple of (type, name).
+
+ +
+
Exception thrown due to a request for a resource that does not exist.
+
+ +
+
Represents a pattern that is used by ACLs to match zero or more + Resources.
+
+ +
+
Represents a filter that can match ResourcePattern.
+
+ +
+
Represents a type of resource which an ACL can be applied to.
+
+ +
 
+ +
 
+ +
+
A retriable exception is a transient exception that if retried may succeed.
+
+ +
+
An exception that indicates the operation can be reattempted.
+
+ +
+
An interface to that allows developers to customize the RocksDB settings for a given Store.
+
+ +
+
The round robin assignor lays out all the available partitions and all the available consumers.
+
+ +
+
The "Round-Robin" partitioner + + This partitioning strategy can be used when user wants + to distribute the writes to all partitions equally.
+
+ +
+
A SampledStat records a single scalar value measured over one or more samples.
+
+ +
 
+ +
+
This exception indicates that SASL authentication has failed.
+
+ +
 
+ +
+
A simple immutable value object class holding customizable SASL extensions.
+
+ +
+
Optional callback used for SASL mechanisms if any extensions need to be set + in the SASL exchange.
+
+ +
+
+ Definition of an abstract data type.
+
+ +
+
The type of a schema.
+
+ +
+
A composite containing a Schema and associated value
+
+ +
+
+ SchemaBuilder provides a fluent API for constructing Schema objects.
+
+ +
+
Indicates an error while building a schema via SchemaBuilder
+
+ +
+
+ SchemaProjector is a utility to project a value between compatible schemas and throw exceptions + when non compatible schemas are provided.
+
+ +
+
Indicates an error while projecting a schema via SchemaProjector
+
+ +
+
A simple source connector that is capable of producing static data with + Struct schemas.
+
+ +
+
Task implementation for SchemaSourceConnector.
+
+ +
+
SCRAM credential class that encapsulates the credential data persisted for each user that is + accessible to the server.
+
+ +
+
Callback used for SCRAM mechanisms.
+
+ +
+
Mechanism and iterations for a SASL/SCRAM credential associated with a user.
+
+ +
+
Optional callback used for SCRAM mechanisms if any extensions need to be set + in the SASL/SCRAM exchange.
+
+ +
 
+ +
+
Representation of a SASL/SCRAM Mechanism.
+
+ +
+
Contains the common security config for SSL and SASL
+
+ +
+
An error indicating that security is disabled on the broker.
+
+ +
 
+ +
+
An interface for generating security providers.
+
+ +
+
A sensor applies a continuous sequence of numerical values to a set of associated metrics.
+
+ +
 
+ +
+
The interface for wrapping a serializer and deserializer for the given data type.
+
+ +
+
Factory for creating serializers / deserializers.
+
+ +
 
+ +
 
+ +
 
+ +
 
+ +
 
+ +
 
+ +
 
+ +
 
+ +
 
+ +
 
+ +
 
+ +
 
+ +
 
+ +
 
+ +
+
Any exception during serialization in the producer
+
+ +
+
An interface for converting objects to bytes.
+
+ +
+
A store supplier that can be used to create one or more SessionStore<Byte, byte[]> instances.
+
+ +
+
Interface for storing the aggregated values of sessions.
+
+ +
+
Same as a SessionWindowedKStream, however, for multiple co-grouped KStreams.
+
+ +
 
+ +
+
SessionWindowedKStream is an abstraction of a windowed record stream of key-value pairs.
+
+ +
 
+ +
+
A session based window specification used for aggregating events into sessions.
+
+ +
+
A client that consumes records from a Kafka cluster using a share group.
+
+ +
+
A detailed description of a single share group in the cluster.
+
+ +
+
Server-side partition assignor for share groups used by the GroupCoordinator.
+
+ +
+
A description of the assignments of a specific share group member.
+
+ +
+
A detailed description of a single share group member in the cluster.
+
+ +
+
Indicates that a new share session could not be opened because the limit of share sessions has been reached.
+
+ +
+
Thrown when the share session was not found.
+
+ +
 
+ +
 
+ +
+
A HeaderConverter that serializes header values as strings and that deserializes header values to the most appropriate + numeric, boolean, array, or map representation.
+
+ +
+
A simple rate the rate is incrementally calculated + based on the elapsed time between the earliest reading + and now.
+
+ +
+
SinkConnectors implement the Connector interface to send Kafka data to another system.
+
+ +
+
A context to allow a SinkConnector to interact with the Kafka Connect runtime.
+
+ +
+
SinkRecord is a ConnectRecord that has been read from Kafka and includes the original Kafka record's + topic, partition and offset (before any transformations have been applied) + in addition to the standard fields.
+
+ +
+
SinkTask is a Task that takes records loaded from Kafka and sends them to another system.
+
+ +
+
Context passed to SinkTasks, allowing them to access utilities in the Kafka Connect runtime.
+
+ +
+
A sliding window used for aggregating events.
+
+ +
 
+ +
+
Directional pair of clusters, where source is mirrored to target.
+
+ +
+
SourceConnectors implement the connector interface to pull data from another system and send + it to Kafka.
+
+ +
+
A context to allow a SourceConnector to interact with the Kafka Connect runtime.
+
+ +
+
+ SourceRecords are generated by SourceTasks and passed to Kafka Connect for storage in + Kafka.
+
+ +
+
SourceTask is a Task that pulls records from another system for storage in Kafka.
+
+ +
+
Represents the permitted values for the SourceTask.TRANSACTION_BOUNDARY_CONFIG property.
+
+ +
+
SourceTaskContext is provided to SourceTasks to allow them to interact with the underlying + runtime.
+
+ +
 
+ +
+
This exception indicates that SSL handshake has failed.
+
+ +
+
Describes whether the server should require or request client authentication.
+
+ +
 
+ +
+
Plugin interface for allowing creation of SSLEngine object in a custom way.
+
+ +
 
+ +
+
The StaleMemberEpochException is used in the context of the new + consumer group protocol (KIP-848).
+
+ +
 
+ +
 
+ +
+
A Stat is a quantity such as average, max, etc that is computed off the stream of updates to a sensor
+
+ +
+
The request object for Interactive Queries.
+
+ +
+
A progressive builder interface for creating StoreQueryRequests.
+
+ +
+
The response object for interactive queries.
+
+ +
+
Restoration logic for log-backed state stores upon restart, + it takes one record at a time from the logs to apply to the restoring state.
+
+ +
+
Class for listening to various states of the restoration process of a StateStore.
+
+ +
+
Factory for creating serializers / deserializers for state stores in Kafka Streams.
+
+ +
+
A storage engine for managing state maintained by a stream processor.
+
+ +
+
State store context interface.
+
+ +
+
Indicates that the state store being queried is closed although the Kafka Streams state is + RUNNING or + REBALANCING.
+
+ +
+
Indicates that the state store being queried is already closed.
+
+ +
+
The sticky assignor serves two purposes.
+
+ +
 
+ +
+
Build a StateStore wrapped with optional caching and logging.
+
+ +
+
StoreQueryParameters allows you to pass a variety of parameters when fetching a store for interactive query.
+
+ +
+
Factory for creating state stores in Kafka Streams.
+
+ +
+
A state store supplier which can create one or more StateStore instances.
+
+ +
+
Class used to configure the name of the join processor, the repartition topic name, + state stores or state store names in Stream-Stream join.
+
+ +
+
Determine how records are distributed among the partitions in a Kafka topic.
+
+ +
+
StreamsBuilder provides the high-level Kafka Streams DSL to specify a Kafka Streams topology.
+
+ +
+
Configuration for a KafkaStreams instance.
+
+ +
 
+ +
+
StreamsException is the top-level exception type generated by Kafka Streams, and indicates errors have + occurred during a StreamThread's processing.
+
+ +
+
A detailed description of a single streams group in the cluster.
+
+ +
+
A description of the assignments of a specific group member.
+
+ +
+
All tasks for one subtopology of a member.
+
+ +
+
A detailed description of a single streams groups member in the cluster.
+
+ +
+
The user-defined endpoint for the member.
+
+ +
+
The cumulative offset for one task.
+
+ +
+
A detailed description of a subtopology in a streams group.
+
+ +
+
Information about a topic.
+
+ +
 
+ +
 
+ +
+
Metadata of a Kafka Streams client.
+
+ +
+
The Kafka Streams metrics interface for adding metric sensors and collecting metric values.
+
+ +
+
Indicates that Kafka Streams is in state CREATED and thus state stores cannot be queries yet.
+
+ +
+
Indicates that Kafka Streams is in state REBALANCING and thus + cannot be queried by default.
+
+ +
+
Indicates that Kafka Streams is in a terminating or terminal state, such as KafkaStreams.State.PENDING_SHUTDOWN,KafkaStreams.State.PENDING_ERROR,KafkaStreams.State.NOT_RUNNING, or KafkaStreams.State.ERROR.
+
+ +
 
+ +
 
+ +
+
Enumeration that describes the response from the exception handler.
+
+ +
+
Converter and HeaderConverter implementation that only supports serializing to strings.
+
+ +
+
Configuration options for StringConverter instances.
+
+ +
+
The string decoder translates bytes into strings.
+
+ +
+
String encoding defaults to UTF8 and can be customized by setting the property key.deserializer.encoding, + value.deserializer.encoding or deserializer.encoding.
+
+ +
+
String encoding defaults to UTF8 and can be customized by setting the property key.serializer.encoding, + value.serializer.encoding or serializer.encoding.
+
+ +
+
+ A structured record containing a set of named fields with values, each field using an independent Schema.
+
+ +
+
The subscribed topic describer is used by the PartitionAssignor + to obtain topic and partition metadata of the subscribed topics.
+
+ +
+
Represents a regular expression compatible with Google RE2/J, used to subscribe to topics.
+
+ +
+
The subscription type followed by a consumer group.
+
+ +
+
Represents a range of versions that a particular broker supports for some feature.
+
+ +
 
+ +
 
+ +
+
Marker interface for a buffer configuration that will strictly enforce size constraints + (bytes and/or number of records) on the buffer, so it is suitable for reducing duplicate + results downstream, but does not promise to eliminate them entirely.
+
+ +
+
Marker interface for a buffer configuration that is "strict" in the sense that it will strictly + enforce the time bound and never emit early.
+
+ +
+
The TableJoined class represents optional parameters that can be passed to + KTable#join(KTable,Function,...) and + KTable#leftJoin(KTable,Function,...) + operations, for foreign key joins.
+
+ +
+
+ Tasks contain the code that actually copies data to/from another system.
+
+ +
+
Indicates a run time error incurred while trying to assign + stream tasks to + threads.
+
+ +
+
A set of utilities to help implement task assignment via the TaskAssignor
+
+ +
 
+ +
+
A simple config container for necessary parameters and optional overrides to apply when + running the active or standby task rack-aware optimizations.
+
+ +
+
A TaskAssignor is responsible for creating a TaskAssignment from a given + ApplicationState.
+
+ +
+
NONE: no error detected + ACTIVE_TASK_ASSIGNED_MULTIPLE_TIMES: multiple KafkaStreams clients assigned with the same active task + INVALID_STANDBY_TASK: stateless task assigned as a standby task + MISSING_PROCESS_ID: ProcessId present in the input ApplicationState was not present in the output TaskAssignment + UNKNOWN_PROCESS_ID: unrecognized ProcessId not matching any of the participating consumers + UNKNOWN_TASK_ID: unrecognized TaskId not matching any of the tasks to be assigned
+
+ +
+
Wrapper class for the final assignment of active and standbys tasks to individual + KafkaStreams clients.
+
+ +
+
Indicates a specific task is corrupted and need to be re-initialized.
+
+ +
+
The task ID representation composed as subtopology plus the assigned partition ID.
+
+ +
+
Indicates a run time error incurred while trying parse the task id + from the read string.
+
+ +
+
A simple container class corresponding to a given TaskId.
+
+ +
+
Metadata of a task.
+
+ +
+
Indicates that all tasks belongs to the thread have migrated to another thread.
+
+ +
+
Describes the state, IDs, and any errors of a connector task.
+
+ +
+
This is a simple container class used during the assignment process to distinguish + TopicPartitions type.
+
+ +
+
This exception indicates that the size of the telemetry metrics data is too large.
+
+ + + +
+ +
+ +
+
TestInputTopic is used to pipe records to topic in TopologyTestDriver.
+
+ +
+
TestOutputTopic is used to read records from a topic in TopologyTestDriver.
+
+ +
+
A key/value pair, including timestamp and record headers, to be sent to or received from TopologyTestDriver.
+
+ +
+
Metadata of a stream thread.
+
+ +
+
Exception thrown if an operation on a resource exceeds the throttling quota.
+
+ +
+
+ A time representing a specific point in a day, not tied to any specific date.
+
+ +
+
Indicates that a request timed out.
+
+ +
+
+ A timestamp representing an absolute time, without timezone information.
+
+ +
 
+ +
+
Interactive query for retrieving a single record based on its key from TimestampedKeyValueStore
+
+ +
+
A key-(value/timestamp) store that supports put/get/delete and range queries.
+
+ +
+
Interactive query for issuing range queries and scans over TimestampedKeyValueStore
+
+ +
+
Interface for storing the aggregated values of fixed-size time windows.
+
+ +
+
An interface that allows the Kafka Streams framework to extract a timestamp from an instance of ConsumerRecord.
+
+ +
+
Same as a TimeWindowedKStream, however, for multiple co-grouped KStreams.
+
+ +
 
+ +
+
TimeWindowedKStream is an abstraction of a windowed record stream of key-value pairs.
+
+ +
 
+ +
+
The fixed-size time-based window specifications used for aggregations.
+
+ +
+
This class is used to provide the optional parameters when sending output records to downstream processor + using ProcessorContext.forward(Object, Object, To).
+
+ +
+
The TokenBucket is a MeasurableStat implementing a token bucket algorithm + that is usable within a Sensor.
+
+ +
+
A class representing a delegation token details.
+
+ +
 
+ +
+
A class used to represent a collection of topics.
+
+ +
+
A class used to represent a collection of topics defined by their topic ID.
+
+ +
+
A class used to represent a collection of topics defined by their topic name.
+
+ +
+
Keys that can be used to configure a topic.
+
+ +
 
+ +
+
A detailed description of a single topic in the cluster.
+
+ +
 
+ +
+
This represents universally unique identifier with topic id for a topic partition.
+
+ +
+
A listing of a topic in the cluster.
+
+ +
+
An interface that allows to dynamically determine the name of the Kafka topic to send at the sink node of the topology.
+
+ +
+
A topic name and partition number
+
+ +
+
A class containing leadership, replicas and ISR information for a topic partition.
+
+ +
+
The topic name, partition number and the brokerId of the replica
+
+ +
+
A logical representation of a ProcessorTopology.
+
+ +
Deprecated. +
Since 4.0.
+
+ +
+
Streams configs that apply at the topology level.
+
+ +
 
+ +
+
A meta representation of a topology.
+
+ +
+
Represents a global store.
+
+ +
+
A node of a topology.
+
+ +
+
A processor node of a topology.
+
+ +
+
A sink node of a topology.
+
+ +
+
A source node of a topology.
+
+ +
+
A connected sub-graph of a Topology.
+
+ +
+
Indicates a pre run time error occurred while parsing the logical topology + to construct the physical processor topology.
+
+ +
+
This class makes it easier to write tests to verify the behavior of topologies created with Topology or + StreamsBuilder.
+
+ +
 
+ +
+
This is the Exception thrown when we are aborting any undrained batches during + a transaction which is aborted without any underlying cause - which likely means that the user chose to abort.
+
+ +
 
+ +
 
+ +
+
Provided to source tasks to allow them to define their own producer transaction boundaries when + exactly-once support is enabled.
+
+ +
 
+ +
 
+ +
 
+ +
 
+ +
+
Single message transformation for Kafka Connect record types.
+
+ +
Deprecated. +
Since 4.0.
+
+ +
Deprecated. +
Since 4.0.
+
+ +
+
Exception thrown when attempting to define a credential that does not meet the criteria for acceptability + (for example, attempting to create a SCRAM credential with an empty username or password or too few/many iterations).
+
+ +
 
+ +
+
The request contained a leader epoch which is larger than that on the broker that received the + request.
+
+ +
 
+ +
+
This exception is raised by the broker if it could not locate the producer metadata associated with the producerId + in question.
+
+ +
+
An error occurred on the server for which the client doesn't have a corresponding error code.
+
+ +
+
Indicates that the state store being queried is unknown, i.e., the state store does either not exist in your topology + or it is not queryable.
+
+ +
+
This exception indicates that the client sent an invalid or outdated SubscriptionId
+
+ +
 
+ +
+
This topic/partition doesn't exist.
+
+ +
+
Indicates that the NamedTopology being + looked up does not exist in this application
+
+ +
+
The unlimited window specifications used for aggregations.
+
+ + + + + +
 
+ +
+
Exception thrown when there are unstable offsets for the requested topic partitions.
+
+ +
 
+ +
+
Authentication mechanism does not support the requested function.
+
+ +
+
The requesting client does not support the compression type of given partition.
+
+ +
 
+ +
+
The message format version does not support the requested function.
+
+ +
+
This exception indicates that the SASL mechanism requested by the client + is not enabled on the broker.
+
+ +
+
Indicates that a request API or version needed by the client is not supported by the broker.
+
+ + + + + +
+
Retrieves embedded metadata timestamps from Kafka messages.
+
+ +
+
A request to alter a user's SASL/SCRAM credentials.
+
+ +
+
A request to delete a SASL/SCRAM credential for a user.
+
+ +
+
Representation of all SASL/SCRAM credentials associated with a user that can be retrieved, or an exception indicating + why credentials could not be retrieved.
+
+ +
+
A request to update/insert a SASL/SCRAM credential for a user.
+
+ +
+
This class defines an immutable universally unique identifier (UUID).
+
+ +
+
We are converting the byte array to String before deserializing to UUID.
+
+ +
+
We are converting UUID to String before serializing.
+
+ +
+
An instantaneous value.
+
+ +
+
Combines a value from a KeyValue with a timestamp.
+
+ +
+
The ValueJoiner interface for joining two values into a new value of arbitrary type.
+
+ +
+
The ValueJoinerWithKey interface for joining two values into a new value of arbitrary type.
+
+ +
+
The ValueMapper interface for mapping a value to a new value of arbitrary type.
+
+ +
+
The ValueMapperWithKey interface for mapping a value to a new value of arbitrary type.
+
+ +
+
Utility for converting from one Connect value to a different form.
+
+ +
Deprecated. +
Since 4.0.
+
+ +
Deprecated. +
Since 4.0.
+
+ +
+
The ValueTransformerWithKey interface for stateful mapping of a value to a new value (with possible new type).
+
+ +
+
A ValueTransformerWithKeySupplier interface which can create one or more ValueTransformerWithKey instances.
+
+ +
+
A connector primarily intended for system tests.
+
+ +
+
Counterpart to VerifiableSourceTask that consumes records and logs information about each to stdout.
+
+ +
+
A connector primarily intended for system tests.
+
+ +
+
A connector primarily intended for system tests.
+
+ +
+
Connect requires some components implement this interface to define a version string.
+
+ +
+
A representation of a versioned key-value store as a KeyValueStore of type <Bytes, byte[]>.
+
+ +
+
A store supplier that can be used to create one or more versioned key-value stores, + specifically, VersionedBytesStore instances.
+
+ +
+
Interactive query for retrieving a single record from a versioned state store based on its key and timestamp.
+
+ +
+
A key-value store that stores multiple record versions per key, and supports timestamp-based + retrieval operations to return the latest record (per key) as of a specified timestamp.
+
+ +
+
Combines a value (from a key-value record) with a timestamp, for use as the return type + from VersionedKeyValueStore.get(Object, long) and related methods.
+
+ +
+
Iterator interface of VersionedRecord.
+
+ +
 
+ +
 
+ +
 
+ +
+
Exception used to indicate preemption of a blocking operation by an external thread.
+
+ +
+
Retrieves current wall clock timestamps as System.currentTimeMillis().
+
+ +
+
A single window instance, defined by its start and end timestamp.
+
+ +
+
A store supplier that can be used to create one or more WindowStore<Byte, byte[]> instances of type <Byte, byte[]>.
+
+ +
+
The result key type of a windowed stream aggregation.
+
+ +
+
A SampledStat that maintains a simple count of what it has seen.
+
+ +
 
+ +
 
+ +
 
+ +
+
A SampledStat that maintains the sum of what it has seen.
+
+ +
 
+ +
 
+
Windows<W extends Window>
+
+
The window specification for fixed size windows that is used to define window boundaries and grace period.
+
+ +
+
Interface for storing the aggregated values of fixed-size time windows.
+
+ +
+
Iterator interface of KeyValue with key typed Long used for WindowStore.fetch(Object, long, long) + and WindowStore.fetch(Object, Instant, Instant) + + Users must call its close method explicitly upon completeness to release resources, + or use try-with-resources statement (available since JDK7) for this Closeable class.
+
+ +
+
Marker interface for classes implementing FixedKeyProcessorSupplier + that have been wrapped via a ProcessorWrapper.
+
+ +
+
Marker interface for classes implementing ProcessorSupplier + that have been wrapped via a ProcessorWrapper.
+
+
+
+
+
+
+
+ + diff --git a/static/41/javadoc/allpackages-index.html b/static/41/javadoc/allpackages-index.html new file mode 100644 index 000000000..a7bb3d478 --- /dev/null +++ b/static/41/javadoc/allpackages-index.html @@ -0,0 +1,275 @@ + + + + +All Packages (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+
+

All Packages

+
+
Package Summary
+
+
Package
+
Description
+ +
+
Provides a Kafka client for performing administrative operations (such as creating topics and configuring brokers) on a Kafka cluster.
+
+ +
+
Provides a Kafka client for consuming records from topics and/or partitions in a Kafka cluster.
+
+ +
+
Provides a Kafka client for producing records to topics and/or partitions in a Kafka cluster.
+
+ +
+
Provides shared functionality for Kafka clients and servers.
+
+ +
+
Provides classes representing Access Control Lists for authorization of clients
+
+ +
+
Provides annotations used on Kafka APIs.
+
+ +
+
Provides common mechanisms for defining, parsing, validating, and documenting user-configurable parameters.
+
+ +
+
Provides a pluggable interface and some implementations for late-binding in configuration values.
+
+ +
+
Provides common exception classes.
+
+ +
+
Provides API for application-defined metadata attached to Kafka records.
+
+ +
+
Provides the API used by Kafka clients to emit metrics which are then exposed using the * MetricsReporter interface.
+
+ +
+
Provides methods of statistically aggregating metrics upon emission.
+
+ +
+
Provides mechanisms for enforcing resource quotas.
+
+ +
+
Provides client handles representing logical resources in a Kafka cluster.
+
+ +
+
Provides pluggable interfaces for implementing Kafka authentication mechanisms.
+
+ +
+
Provides a LoginModule for using OAuth Bearer Token authentication with Kafka clusters.
+
+ +
+
Provides implementation to use plaintext credentials authentication for securing Kafka clusters.
+
+ +
+
Provides adaptor to use the Salted Challenge Response Authentication Mechanism for securing Kafka clusters.
+
+ +
+
Provides mechanism for delegating authorization to a distinct Principal for securing Kafka clusters.
+
+ +
+
Provides interface and some implementations of serialization/deserialization routines for various objects.
+
+ +
+
Provides common interfaces used to describe pluggable components.
+
+ +
+
Provides interfaces for Connector and Task implementations.
+
+ +
+
Provides pluggable interfaces for policies controlling how users can configure connectors.
+
+ +
+
Provides classes for representing data and schemas handled by Connect.
+
+ +
+
Provides common exception classes for Connect, used by the framework and plugins to communicate failures.
+
+ +
+
Provides an API for application-defined metadata attached to Connect records.
+
+ +
+
Provides an API for describing the state of a running Connect cluster to + ConnectRestExtension instances.
+
+ +
+
Provides APIs for the MirrorMaker connectors and utilities to manage MirrorMaker resources.
+
+ +
+
Provides a pluggable interface for altering the behavior of the Connect REST API.
+
+ +
+
Provides an API for implementing sink connectors which write Kafka records to external applications.
+
+ +
+
Provides an API for implementing source connectors which read data from external applications into Kafka.
+
+ +
+
Provides pluggable interfaces and some implementations for (de)serializing data to and from Kafka
+
+ +
+
Provides source and sink connector implementations used for testing
+
+ +
+
Provides a pluggable interface for altering data which is being moved by Connect.
+
+ +
+
Provides a pluggable interface for describing when a Transformation should be applied to a record.
+
+ +
+
Provides common utilities that can be used in component implementations.
+
+ +
+
Provides the core functionality and metadata management for consumer group partition assignment.
+
+ +
+
Provides pluggable interface for performing authorization on a Kafka server.
+
+ +
+
Provides a pluggable API for defining remote storage and retrieval of Kafka log segments.
+
+ +
+
Provides pluggable interfaces for expressing policies on topics and configs.
+
+ +
+
Provides pluggable interface for enforcing client quotas from a Kafka server.
+
+ +
+
Provides pluggable interface for capturing client telemetry metrics.
+
+ +
+
Provides the Kafka Streams library for building streaming data applications.
+
+ +
+
Provides common exception classes for Streams applications.
+
+ +
+
Provides a high-level programming model (DSL) to express a (stateful) data flow computation over input streams and tables.
+
+ +
+
Provides a low-level programming model (Processor API, aka, PAPI) to express a (stateful) data flow computation over input topics.
+
+ +
+
Provides a low-level programming model (Processor API, aka, PAPI) to express a (stateful) data flow computation over input topics.
+
+ +
+
Provides classes and interfaces used to manage and assign tasks within Kafka Streams applications.
+
+ +
+
Provides classes for assigning tasks to stream threads.
+
+ +
+
Provides a query API (aka Interactive Queries) over state stores, for extracting data from a stateful Kafka Streams application.
+
+ +
+
Provides interfaces for managing the intermediate state of a stateful streams application.
+
+ +
+
Provides classes for testing Kafka Streams applications with mocked inputs.
+
+ +
+
Provides interfaces for writing plugins of kafka tools
+
+
+
+
+
+ + diff --git a/static/41/javadoc/constant-values.html b/static/41/javadoc/constant-values.html new file mode 100644 index 000000000..0b4287437 --- /dev/null +++ b/static/41/javadoc/constant-values.html @@ -0,0 +1,2368 @@ + + + + +Constant Field Values (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+
+

Constant Field Values

+
+

Contents

+ +
+
+
+

org.apache.*

+ + + +
    +
  • +
    org.apache.kafka.common.config.AbstractConfig
    +
    +
    Modifier and Type
    +
    Constant Field
    +
    Value
    +
    public static final String
    + +
    "org.apache.kafka.automatic.config.providers"
    +
    public static final String
    + +
    "config.providers"
    +
    +
  • +
  • +
    org.apache.kafka.common.config.LogLevelConfig
    +
    +
    Modifier and Type
    +
    Constant Field
    +
    Value
    +
    public static final String
    + +
    "DEBUG"
    +
    public static final String
    + +
    "ERROR"
    +
    public static final String
    + +
    "FATAL"
    +
    public static final String
    + +
    "INFO"
    +
    public static final String
    + +
    "TRACE"
    +
    public static final String
    + +
    "WARN"
    +
    +
  • +
  • +
    org.apache.kafka.common.config.SaslConfigs
    +
    +
    Modifier and Type
    +
    Constant Field
    +
    Value
    +
    public static final String
    + +
    "/usr/bin/kinit"
    +
    public static final long
    + +
    60000L
    +
    public static final double
    + +
    0.05
    +
    public static final double
    + +
    0.8
    +
    public static final short
    + +
    300
    +
    public static final short
    + +
    60
    +
    public static final double
    + +
    0.8
    +
    public static final double
    + +
    0.05
    +
    public static final long
    + +
    10000L
    +
    public static final long
    + +
    100L
    +
    public static final String
    + +
    "GSSAPI"
    +
    public static final String
    + +
    "RS256"
    +
    public static final int
    + +
    300
    +
    public static final boolean
    + +
    false
    +
    public static final int
    + +
    60
    +
    public static final int
    + +
    30
    +
    public static final boolean
    + +
    false
    +
    public static final long
    + +
    3600000L
    +
    public static final long
    + +
    10000L
    +
    public static final long
    + +
    100L
    +
    public static final String
    + +
    "org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever"
    +
    public static final String
    + +
    "org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator"
    +
    public static final String
    + +
    "scope"
    +
    public static final String
    + +
    "sub"
    +
    public static final String
    + +
    "GSSAPI"
    +
    public static final String
    + +
    "sasl.client.callback.handler.class"
    +
    public static final String
    + +
    "The fully qualified name of a SASL client callback handler class that implements the AuthenticateCallbackHandler interface."
    +
    public static final String
    + +
    "sasl.jaas.config"
    +
    public static final String
    + +
    "JAAS login context parameters for SASL connections in the format used by JAAS configuration files. JAAS configuration file format is described <a href=\"https://docs.oracle.com/javase/8/docs/technotes/guides/security/jgss/tutorials/LoginConfigFile.html\">here</a>. The format for the value is: <code>loginModuleClass controlFlag (optionName=optionValue)*;</code>. For brokers, the config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.jaas.config=com.example.ScramLoginModule required;"
    +
    public static final String
    + +
    "sasl.kerberos.kinit.cmd"
    +
    public static final String
    + +
    "Kerberos kinit command path."
    +
    public static final String
    + +
    "sasl.kerberos.min.time.before.relogin"
    +
    public static final String
    + +
    "Login thread sleep time between refresh attempts."
    +
    public static final String
    + +
    "sasl.kerberos.service.name"
    +
    public static final String
    + +
    "The Kerberos principal name that Kafka runs as. This can be defined either in Kafka\'s JAAS config or in Kafka\'s config."
    +
    public static final String
    + +
    "sasl.kerberos.ticket.renew.jitter"
    +
    public static final String
    + +
    "Percentage of random jitter added to the renewal time."
    +
    public static final String
    + +
    "sasl.kerberos.ticket.renew.window.factor"
    +
    public static final String
    + +
    "Login thread will sleep until the specified window factor of time from last refresh to ticket\'s expiry has been reached, at which time it will try to renew the ticket."
    +
    public static final String
    + +
    "sasl.login.callback.handler.class"
    +
    public static final String
    + +
    "The fully qualified name of a SASL login callback handler class that implements the AuthenticateCallbackHandler interface. For brokers, login callback handler config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler"
    +
    public static final String
    + +
    "sasl.login.class"
    +
    public static final String
    + +
    "The fully qualified name of a class that implements the Login interface. For brokers, login config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin"
    +
    public static final String
    + +
    "sasl.login.connect.timeout.ms"
    +
    public static final String
    + +
    "The (optional) value in milliseconds for the external authentication provider connection timeout. Currently applies only to OAUTHBEARER."
    +
    public static final String
    + +
    "sasl.login.read.timeout.ms"
    +
    public static final String
    + +
    "The (optional) value in milliseconds for the external authentication provider read timeout. Currently applies only to OAUTHBEARER."
    +
    public static final String
    + +
    "sasl.login.refresh.buffer.seconds"
    +
    public static final String
    + +
    "The amount of buffer time before credential expiration to maintain when refreshing a credential, in seconds. If a refresh would otherwise occur closer to expiration than the number of buffer seconds then the refresh will be moved up to maintain as much of the buffer time as possible. Legal values are between 0 and 3600 (1 hour); a default value of 300 (5 minutes) is used if no value is specified. This value and sasl.login.refresh.min.period.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER."
    +
    public static final String
    + +
    "sasl.login.refresh.min.period.seconds"
    +
    public static final String
    + +
    "The desired minimum time for the login refresh thread to wait before refreshing a credential, in seconds. Legal values are between 0 and 900 (15 minutes); a default value of 60 (1 minute) is used if no value is specified. This value and sasl.login.refresh.buffer.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER."
    +
    public static final String
    + +
    "sasl.login.refresh.window.factor"
    +
    public static final String
    + +
    "Login refresh thread will sleep until the specified window factor relative to the credential\'s lifetime has been reached, at which time it will try to refresh the credential. Legal values are between 0.5 (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used if no value is specified. Currently applies only to OAUTHBEARER."
    +
    public static final String
    + +
    "sasl.login.refresh.window.jitter"
    +
    public static final String
    + +
    "The maximum amount of random jitter relative to the credential\'s lifetime that is added to the login refresh thread\'s sleep time. Legal values are between 0 and 0.25 (25%) inclusive; a default value of 0.05 (5%) is used if no value is specified. Currently applies only to OAUTHBEARER."
    +
    public static final String
    + +
    "sasl.login.retry.backoff.max.ms"
    +
    public static final String
    + +
    "The (optional) value in milliseconds for the maximum wait between login attempts to the external authentication provider. Login uses an exponential backoff algorithm with an initial wait based on the sasl.login.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.login.retry.backoff.max.ms setting. Currently applies only to OAUTHBEARER."
    +
    public static final String
    + +
    "sasl.login.retry.backoff.ms"
    +
    public static final String
    + +
    "The (optional) value in milliseconds for the initial wait between login attempts to the external authentication provider. Login uses an exponential backoff algorithm with an initial wait based on the sasl.login.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.login.retry.backoff.max.ms setting. Currently applies only to OAUTHBEARER."
    +
    public static final String
    + +
    "sasl.mechanism"
    +
    public static final String
    + +
    "SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. GSSAPI is the default mechanism."
    +
    public static final String
    + +
    "sasl.oauthbearer.assertion.algorithm"
    +
    public static final String
    + +
    "<p>The algorithm the Apache Kafka client should use to sign the assertion sent to the identity provider. It is also used as the value of the OAuth <code>alg</code> (Algorithm) header in the JWT assertion.</p><p><em>Note</em>: If a value for <code>sasl.oauthbearer.assertion.file</code> is provided, this configuration will be ignored.</p>"
    +
    public static final String
    + +
    "sasl.oauthbearer.assertion.claim.aud"
    +
    public static final String
    + +
    "<p>The JWT <code>aud</code> (Audience) claim which will be included in the client JWT assertion created locally.</p><p><em>Note</em>: If a value for <code>sasl.oauthbearer.assertion.file</code> is provided, this configuration will be ignored.</p>"
    +
    public static final String
    + +
    "sasl.oauthbearer.assertion.claim.exp.seconds"
    +
    public static final String
    + +
    "<p>The number of seconds <em>in the future</em> for which the JWT is valid. The value is used to determine the JWT <code>exp</code> (Expiration) claim based on the current system time when the JWT is created.</p><p>The formula to generate the <code>exp</code> claim is very simple:</p><pre>Let:\n\n x = the current timestamp in seconds, on client\n y = the value of this configuration\n\nThen:\n\n exp = x + y\n</pre><p><em>Note</em>: If a value for <code>sasl.oauthbearer.assertion.file</code> is provided, this configuration will be ignored.</p>"
    +
    public static final String
    + +
    "sasl.oauthbearer.assertion.claim.iss"
    +
    public static final String
    + +
    "<p>The value to be used as the <code>iss</code> (Issuer) claim which will be included in the client JWT assertion created locally.</p><p><em>Note</em>: If a value for <code>sasl.oauthbearer.assertion.file</code> is provided, this configuration will be ignored.</p>"
    +
    public static final String
    + +
    "sasl.oauthbearer.assertion.claim.jti.include"
    +
    public static final String
    + +
    "<p>Flag that determines if the JWT assertion should generate a unique ID for the JWT and include it in the <code>jti</code> (JWT ID) claim.</p><p><em>Note</em>: If a value for <code>sasl.oauthbearer.assertion.file</code> is provided, this configuration will be ignored.</p>"
    +
    public static final String
    + +
    "sasl.oauthbearer.assertion.claim.nbf.seconds"
    +
    public static final String
    + +
    "<p>The number of seconds <em>in the past</em> from which the JWT is valid. The value is used to determine the JWT <code>nbf</code> (Not Before) claim based on the current system time when the JWT is created.</p><p>The formula to generate the <code>nbf</code> claim is very simple:</p><pre>Let:\n\n x = the current timestamp in seconds, on client\n y = the value of this configuration\n\nThen:\n\n nbf = x - y\n</pre><p><em>Note</em>: If a value for <code>sasl.oauthbearer.assertion.file</code> is provided, this configuration will be ignored.</p>"
    +
    public static final String
    + +
    "sasl.oauthbearer.assertion.claim.sub"
    +
    public static final String
    + +
    "<p>The value to be used as the <code>sub</code> (Subject) claim which will be included in the client JWT assertion created locally.</p><p><em>Note</em>: If a value for <code>sasl.oauthbearer.assertion.file</code> is provided, this configuration will be ignored.</p>"
    +
    public static final String
    + +
    "sasl.oauthbearer.assertion.file"
    +
    public static final String
    + +
    "<p>File that contains a <em>pre-generated</em> JWT assertion.</p><p>The underlying implementation caches the file contents to avoid the performance hit of loading the file on each access. The caching mechanism will detect whenthe file changes to allow for the file to be reloaded on modifications. This allows for &quot;live&quot; assertion rotation without restarting the Kafka client.</p><p>The file contains the assertion in the serialized, three part JWT format:</p><ol><li>The <em>header</em> section is a base 64-encoded JWT header that contains values like <code>alg</code> (Algorithm), <code>typ</code> (Type, always the literal value <code>JWT</code>), etc.</li><li>The <em>payload</em> section includes the base 64-encoded set of JWT claims, such as <code>aud</code> (Audience), <code>iss</code> (Issuer), <code>sub</code> (Subject), etc.</li><li>The <em>signature</em> section is the concatenated <em>header</em> and <em>payload</em> sections that was signed using a private key</li></ol><p>See <a href=\"https://datatracker.ietf.org/doc/html/rfc7519\">RFC 7519</a> and <a href=\"https://datatracker.ietf.org/doc/html/rfc7515\">RFC 7515</a> for more details on the JWT and JWS formats.</p><p><em>Note</em>: If a value for <code>sasl.oauthbearer.assertion.file</code> is provided, all other <code>sasl.oauthbearer.assertion.</code>* configurations are ignored.</p>"
    +
    public static final String
    + +
    "sasl.oauthbearer.assertion.private.key.file"
    +
    public static final String
    + +
    "<p>File that contains a private key in the standard PEM format which is used to sign the JWT assertion sent to the identity provider.</p><p>The underlying implementation caches the file contents to avoid the performance hit of loading the file on each access. The caching mechanism will detect when the file changes to allow for the file to be reloaded on modifications. This allows for &quot;live&quot; private key rotation without restarting the Kafka client.</p><p><em>Note</em>: If a value for <code>sasl.oauthbearer.assertion.file</code> is provided, this configuration will be ignored.</p>"
    +
    public static final String
    + +
    "sasl.oauthbearer.assertion.private.key.passphrase"
    +
    public static final String
    + +
    "<p>The optional passphrase to decrypt the private key file specified by <code>sasl.oauthbearer.assertion.private.key.file</code>.</p><p><em>Note</em>: If the file referred to by <code>sasl.oauthbearer.assertion.private.key.file</code> is modified on the file system at runtime and it was created with a <em>different</em> passphrase than it was previously, the client will not be able to access the private key file because the passphrase is now out of date. For that reason, when using private key passphrases, either use the same passphrase each time, or&mdash;for improved security&mdash;restart the Kafka client using the new passphrase configuration.</p><p><em>Note</em>: If a value for <code>sasl.oauthbearer.assertion.file</code> is provided, this configuration will be ignored.</p>"
    +
    public static final String
    + +
    "sasl.oauthbearer.assertion.template.file"
    +
    public static final String
    + +
    "<p>This optional configuration specifies the file containing the JWT headers and/or payload claims to be used when creating the JWT assertion.</p><p>Not all identity providers require the same set of claims; some may require a given claim while others may prohibit it. In order to provide the most flexibility, this configuration allows the user to provide the static header values and claims that are to be included in the JWT.</p><p><em>Note</em>: If a value for <code>sasl.oauthbearer.assertion.file</code> is provided, this configuration will be ignored.</p>"
    +
    public static final String
    + +
    "sasl.oauthbearer.client.credentials.client.id"
    +
    public static final String
    + +
    "<p>The ID (defined in/by the OAuth identity provider) to identify the client requesting the token.</p><p>The client ID was previously stored as part of the <code>sasl.jaas.config</code> configuration with the key <code>clientId</code>. For backward compatibility, the <code>clientId</code> JAAS option can still be used, but it is deprecated and will be removed in a future version.</p><p>Order of precedence:</p><ul><li><code>sasl.oauthbearer.client.credentials.client.id</code> from configuration</li><li><code>clientId</code> from JAAS</li></ul>"
    +
    public static final String
    + +
    "sasl.oauthbearer.client.credentials.client.secret"
    +
    public static final String
    + +
    "<p>The secret (defined by either the user or preassigned, depending on the identity provider) of the client requesting the token.</p><p>The client secret was previously stored as part of the <code>sasl.jaas.config</code> configuration with the key <code>clientSecret</code>. For backward compatibility, the <code>clientSecret</code> JAAS option can still be used, but it is deprecated and will be removed in a future version.</p><p>Order of precedence:</p><ul><li><code>sasl.oauthbearer.client.credentials.client.secret</code> from configuration</li><li><code>clientSecret</code> from JAAS</li></ul>"
    +
    public static final String
    + +
    "sasl.oauthbearer.clock.skew.seconds"
    +
    public static final String
    + +
    "The (optional) value in seconds to allow for differences between the time of the OAuth/OIDC identity provider and the broker."
    +
    public static final String
    + +
    "sasl.oauthbearer.expected.audience"
    +
    public static final String
    + +
    "The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. The JWT will be inspected for the standard OAuth \"aud\" claim and if this value is set, the broker will match the value from JWT\'s \"aud\" claim to see if there is an exact match. If there is no match, the broker will reject the JWT and authentication will fail."
    +
    public static final String
    + +
    "sasl.oauthbearer.expected.issuer"
    +
    public static final String
    + +
    "The (optional) setting for the broker to use to verify that the JWT was created by the expected issuer. The JWT will be inspected for the standard OAuth \"iss\" claim and if this value is set, the broker will match it exactly against what is in the JWT\'s \"iss\" claim. If there is no match, the broker will reject the JWT and authentication will fail."
    +
    public static final String
    + +
    "sasl.oauthbearer.header.urlencode"
    +
    public static final String
    + +
    "The (optional) setting to enable the OAuth client to URL-encode the client_id and client_secret in the authorization header in accordance with RFC6749, see <a href=\"https://datatracker.ietf.org/doc/html/rfc6749#section-2.3.1\">here</a> for more details. The default value is set to \'false\' for backward compatibility"
    +
    public static final String
    + +
    "sasl.oauthbearer.jwks.endpoint.refresh.ms"
    +
    public static final String
    + +
    "The (optional) value in milliseconds for the broker to wait between refreshing its JWKS (JSON Web Key Set) cache that contains the keys to verify the signature of the JWT."
    +
    public static final String
    + +
    "sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms"
    +
    public static final String
    + +
    "The (optional) value in milliseconds for the maximum wait between attempts to retrieve the JWKS (JSON Web Key Set) from the external authentication provider. JWKS retrieval uses an exponential backoff algorithm with an initial wait based on the sasl.oauthbearer.jwks.endpoint.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms setting."
    +
    public static final String
    + +
    "sasl.oauthbearer.jwks.endpoint.retry.backoff.ms"
    +
    public static final String
    + +
    "The (optional) value in milliseconds for the initial wait between JWKS (JSON Web Key Set) retrieval attempts from the external authentication provider. JWKS retrieval uses an exponential backoff algorithm with an initial wait based on the sasl.oauthbearer.jwks.endpoint.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms setting."
    +
    public static final String
    + +
    "sasl.oauthbearer.jwks.endpoint.url"
    +
    public static final String
    + +
    "The OAuth/OIDC provider URL from which the provider\'s <a href=\"https://datatracker.ietf.org/doc/html/rfc7517#section-5\">JWKS (JSON Web Key Set)</a> can be retrieved. The URL can be HTTP(S)-based or file-based. If the URL is HTTP(S)-based, the JWKS data will be retrieved from the OAuth/OIDC provider via the configured URL on broker startup. All then-current keys will be cached on the broker for incoming requests. If an authentication request is received for a JWT that includes a \"kid\" header claim value that isn\'t yet in the cache, the JWKS endpoint will be queried again on demand. However, the broker polls the URL every sasl.oauthbearer.jwks.endpoint.refresh.ms milliseconds to refresh the cache with any forthcoming keys before any JWT requests that include them are received. If the URL is file-based, the broker will load the JWKS file from a configured location on startup. In the event that the JWT includes a \"kid\" header value that isn\'t in the JWKS file, the broker will reject the JWT and authentication will fail."
    +
    public static final String
    + +
    "sasl.oauthbearer.jwt.retriever.class"
    +
    public static final String
    + +
    "<p>The fully-qualified class name of a <code>JwtRetriever</code> implementation used to request tokens from the identity provider.</p><p>The default configuration value represents a class that maintains backward compatibility with previous versions of Apache Kafka. The default implementation uses the configuration to determine which concrete implementation to create.<p>Other implementations that are provided include:</p><ul><li><code>org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever</code></li><li><code>org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever</code></li><li><code>org.apache.kafka.common.security.oauthbearer.FileJwtRetriever</code></li><li><code>org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever</code></li></ul>"
    +
    public static final String
    + +
    "sasl.oauthbearer.jwt.validator.class"
    +
    public static final String
    + +
    "<p>The fully-qualified class name of a <code>JwtValidator</code> implementation used to validate the JWT from the identity provider.</p><p>The default validator (<code>org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator</code>) maintains backward compatibility with previous versions of Apache Kafka. The default validator uses configuration to determine which concrete implementation to create.<p>The built-in <code>JwtValidator</code> implementations are:</p><ul><li><code>org.apache.kafka.common.security.oauthbearer.BrokerJwtValidator</code></li><li><code>org.apache.kafka.common.security.oauthbearer.ClientJwtValidator</code></li><li><code>org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator</code></li></ul>"
    +
    public static final String
    + +
    "sasl.oauthbearer.scope"
    +
    public static final String
    + +
    "sasl.oauthbearer.scope.claim.name"
    +
    public static final String
    + +
    "The OAuth claim for the scope is often named \"scope\", but this (optional) setting can provide a different name to use for the scope included in the JWT payload\'s claims if the OAuth/OIDC provider uses a different name for that claim."
    +
    public static final String
    + +
    "<p>This is the level of access a client application is granted to a resource or API which is included in the token request. If provided, it should match one or more scopes configured in the identity provider.</p><p>The scope was previously stored as part of the <code>sasl.jaas.config</code> configuration with the key <code>scope</code>. For backward compatibility, the <code>scope</code> JAAS option can still be used, but it is deprecated and will be removed in a future version.</p><p>Order of precedence:</p><ul><li><code>sasl.oauthbearer.scope</code> from configuration</li><li><code>scope</code> from JAAS</li></ul>"
    +
    public static final String
    + +
    "sasl.oauthbearer.sub.claim.name"
    +
    public static final String
    + +
    "The OAuth claim for the subject is often named \"sub\", but this (optional) setting can provide a different name to use for the subject included in the JWT payload\'s claims if the OAuth/OIDC provider uses a different name for that claim."
    +
    public static final String
    + +
    "sasl.oauthbearer.token.endpoint.url"
    +
    public static final String
    + +
    "The URL for the OAuth/OIDC identity provider. If the URL is HTTP(S)-based, it is the issuer\'s token endpoint URL to which requests will be made to login based on the configuration in <code>sasl.oauthbearer.jwt.retriever.class</code>. If the URL is file-based, it specifies a file containing an access token (in JWT serialized form) issued by the OAuth/OIDC identity provider to use for authorization."
    +
    +
  • +
  • +
    org.apache.kafka.common.config.SecurityConfig
    +
    +
    Modifier and Type
    +
    Constant Field
    +
    Value
    +
    public static final String
    + +
    "security.providers"
    +
    public static final String
    + +
    "A list of configurable creator classes each returning a provider implementing security algorithms. These classes should implement the <code>org.apache.kafka.common.security.auth.SecurityProviderCreator</code> interface."
    +
    +
  • +
  • +
    org.apache.kafka.common.config.SslConfigs
    +
    +
    Modifier and Type
    +
    Constant Field
    +
    Value
    +
    public static final String
    + +
    "TLSv1.2,TLSv1.3"
    +
    public static final String
    + +
    "https"
    +
    public static final String
    + +
    "JKS"
    +
    public static final String
    + +
    "TLSv1.3"
    +
    public static final String
    + +
    "JKS"
    +
    public static final String
    + +
    "ssl.cipher.suites"
    +
    public static final String
    + +
    "A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. By default all the available cipher suites are supported."
    +
    public static final String
    + +
    "ssl.enabled.protocols"
    +
    public static final String
    + +
    "The list of protocols enabled for SSL connections. The default is \'TLSv1.2,TLSv1.3\'. This means that clients and servers will prefer TLSv1.3 if both support it and fallback to TLSv1.2 otherwise (assuming both support at least TLSv1.2). This default should be fine for most use cases. Also see the config documentation for `ssl.protocol` to understand how it can impact the TLS version negotiation behavior."
    +
    public static final String
    + +
    "ssl.endpoint.identification.algorithm"
    +
    public static final String
    + +
    "The endpoint identification algorithm to validate server hostname using server certificate. "
    +
    public static final String
    + +
    "ssl.engine.factory.class"
    +
    public static final String
    + +
    "The class of type org.apache.kafka.common.security.auth.SslEngineFactory to provide SSLEngine objects. Default value is org.apache.kafka.common.security.ssl.DefaultSslEngineFactory. Alternatively, setting this to org.apache.kafka.common.security.ssl.CommonNameLoggingSslEngineFactory will log the common name of expired SSL certificates used by clients to authenticate at any of the brokers with log level INFO. Note that this will cause a tiny delay during establishment of new connections from mTLS clients to brokers due to the extra code for examining the certificate chain provided by the client. Note further that the implementation uses a custom truststore based on the standard Java truststore and thus might be considered a security risk due to not being as mature as the standard one."
    +
    public static final String
    + +
    "ssl.key.password"
    +
    public static final String
    + +
    "The password of the private key in the key store file or the PEM key specified in \'ssl.keystore.key\'."
    +
    public static final String
    + +
    "ssl.keymanager.algorithm"
    +
    public static final String
    + +
    "The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine."
    +
    public static final String
    + +
    "ssl.keystore.certificate.chain"
    +
    public static final String
    + +
    "Certificate chain in the format specified by \'ssl.keystore.type\'. Default SSL engine factory supports only PEM format with a list of X.509 certificates"
    +
    public static final String
    + +
    "ssl.keystore.key"
    +
    public static final String
    + +
    "Private key in the format specified by \'ssl.keystore.type\'. Default SSL engine factory supports only PEM format with PKCS#8 keys. If the key is encrypted, key password must be specified using \'ssl.key.password\'"
    +
    public static final String
    + +
    "ssl.keystore.location"
    +
    public static final String
    + +
    "The location of the key store file. This is optional for client and can be used for two-way authentication for client."
    +
    public static final String
    + +
    "ssl.keystore.password"
    +
    public static final String
    + +
    "The store password for the key store file. This is optional for client and only needed if \'ssl.keystore.location\' is configured. Key store password is not supported for PEM format."
    +
    public static final String
    + +
    "ssl.keystore.type"
    +
    public static final String
    + +
    "The file format of the key store file. This is optional for client. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM]."
    +
    public static final String
    + +
    "ssl.protocol"
    +
    public static final String
    + +
    "The SSL protocol used to generate the SSLContext. The default is \'TLSv1.3\', which should be fine for most use cases. A typical alternative to the default is \'TLSv1.2\'. Allowed values for this config are dependent on the JVM. Clients using the defaults for this config and \'ssl.enabled.protocols\' will downgrade to \'TLSv1.2\' if the server does not support \'TLSv1.3\'. If this config is set to \'TLSv1.2\', however, clients will not use \'TLSv1.3\' even if it is one of the values in `ssl.enabled.protocols` and the server only supports \'TLSv1.3\'."
    +
    public static final String
    + +
    "ssl.provider"
    +
    public static final String
    + +
    "The name of the security provider used for SSL connections. Default value is the default security provider of the JVM."
    +
    public static final String
    + +
    "ssl.secure.random.implementation"
    +
    public static final String
    + +
    "The SecureRandom PRNG implementation to use for SSL cryptography operations. "
    +
    public static final String
    + +
    "ssl.trustmanager.algorithm"
    +
    public static final String
    + +
    "The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine."
    +
    public static final String
    + +
    "ssl.truststore.certificates"
    +
    public static final String
    + +
    "Trusted certificates in the format specified by \'ssl.truststore.type\'. Default SSL engine factory supports only PEM format with X.509 certificates."
    +
    public static final String
    + +
    "ssl.truststore.location"
    +
    public static final String
    + +
    "The location of the trust store file."
    +
    public static final String
    + +
    "ssl.truststore.password"
    +
    public static final String
    + +
    "The password for the trust store file. If a password is not set, trust store file configured will still be used, but integrity checking is disabled. Trust store password is not supported for PEM format."
    +
    public static final String
    + +
    "ssl.truststore.type"
    +
    public static final String
    + +
    "The file format of the trust store file. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM]."
    +
    +
  • +
  • +
    org.apache.kafka.common.config.TopicConfig
    +
    +
    Modifier and Type
    +
    Constant Field
    +
    Value
    +
    public static final String
    + +
    "compact"
    +
    public static final String
    + +
    "cleanup.policy"
    +
    public static final String
    + +
    "delete"
    +
    public static final String
    + +
    "This config designates the retention policy to use on log segments. The \"delete\" policy (which is the default) will discard old segments when their retention time or size limit has been reached. The \"compact\" policy will enable <a href=\"#compaction\">log compaction</a>, which retains the latest value for each key. It is also possible to specify both policies in a comma-separated list (e.g. \"delete,compact\"). In this case, old segments will be discarded per the retention time and size configuration, while retained segments will be compacted."
    +
    public static final String
    + +
    "compression.gzip.level"
    +
    public static final String
    + +
    "The compression level to use if compression.type is set to <code>gzip</code>."
    +
    public static final String
    + +
    "compression.lz4.level"
    +
    public static final String
    + +
    "The compression level to use if compression.type is set to <code>lz4</code>."
    +
    public static final String
    + +
    "compression.type"
    +
    public static final String
    + +
    "Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (\'gzip\', \'snappy\', \'lz4\', \'zstd\'). It additionally accepts \'uncompressed\' which is equivalent to no compression; and \'producer\' which means retain the original compression codec set by the producer."
    +
    public static final String
    + +
    "compression.zstd.level"
    +
    public static final String
    + +
    "The compression level to use if compression.type is set to <code>zstd</code>."
    +
    public static final String
    + +
    "delete.retention.ms"
    +
    public static final String
    + +
    "The amount of time to retain delete tombstone markers for <a href=\"#compaction\">log compacted</a> topics. This setting also gives a bound on the time in which a consumer must complete a read if they begin from offset 0 to ensure that they get a valid snapshot of the final stage (otherwise delete tombstones may be collected before they complete their scan)."
    +
    public static final String
    + +
    "file.delete.delay.ms"
    +
    public static final String
    + +
    "The time to wait before deleting a file from the filesystem"
    +
    public static final String
    + +
    "flush.messages"
    +
    public static final String
    + +
    "This setting allows specifying an interval at which we will force an fsync of data written to the log. For example if this was set to 1 we would fsync after every message; if it were 5 we would fsync after every five messages. In general we recommend you not set this and use replication for durability and allow the operating system\'s background flush capabilities as it is more efficient. This setting can be overridden on a per-topic basis (see <a href=\"#topicconfigs\">the per-topic configuration section</a>)."
    +
    public static final String
    + +
    "flush.ms"
    +
    public static final String
    + +
    "This setting allows specifying a time interval at which we will force an fsync of data written to the log. For example if this was set to 1000 we would fsync after 1000 ms had passed. Note that this setting depends on the broker-level configuration <code>log.flush.scheduler.interval.ms</code>, which controls how frequently the flush check occurs. In general we recommend you not set this and use replication for durability and allow the operating system\'s background flush capabilities as it is more efficient."
    +
    public static final String
    + +
    "index.interval.bytes"
    +
    public static final String
    + +
    "This setting controls how frequently Kafka adds entries to its offset index and, conditionally, to its time index. The default setting ensures that we index a message roughly every 4096 bytes. More frequent indexing allows reads to jump closer to the exact position in the log but results in larger index files. You probably don\'t need to change this.<p> Note: the time index will be inserted only when the timestamp is greater than the last indexed timestamp.</p>"
    +
    public static final String
    + +
    "local.retention.bytes"
    +
    public static final String
    + +
    "The maximum size of local log segments that can grow for a partition before it deletes the old segments. Default value is -2, it represents `retention.bytes` value to be used. The effective value should always be less than or equal to `retention.bytes` value."
    +
    public static final String
    + +
    "local.retention.ms"
    +
    public static final String
    + +
    "The number of milliseconds to keep the local log segment before it gets deleted. Default value is -2, it represents `retention.ms` value is to be used. The effective value should always be less than or equal to `retention.ms` value."
    +
    public static final String
    + +
    "max.compaction.lag.ms"
    +
    public static final String
    + +
    "The maximum time a message will remain ineligible for compaction in the log. Only applicable for logs that are being compacted."
    +
    public static final String
    + +
    "max.message.bytes"
    +
    public static final String
    + +
    "The largest record batch size allowed by Kafka (after compression if compression is enabled)."
    +
    public static final String
    + +
    "message.downconversion.enable"
    +
    public static final String
    + +
    "Down-conversion is not possible in Apache Kafka 4.0 and newer, hence this configuration is no-op and it is deprecated for removal in Apache Kafka 5.0."
    +
    public static final String
    + +
    "message.timestamp.after.max.ms"
    +
    public static final String
    + +
    "This configuration sets the allowable timestamp difference between the message timestamp and the broker\'s timestamp. The message timestamp can be later than or equal to the broker\'s timestamp, with the maximum allowable difference determined by the value set in this configuration. If message.timestamp.type=CreateTime, the message will be rejected if the difference in timestamps exceeds this specified threshold. This configuration is ignored if message.timestamp.type=LogAppendTime."
    +
    public static final String
    + +
    "message.timestamp.before.max.ms"
    +
    public static final String
    + +
    "This configuration sets the allowable timestamp difference between the broker\'s timestamp and the message timestamp. The message timestamp can be earlier than or equal to the broker\'s timestamp, with the maximum allowable difference determined by the value set in this configuration. If message.timestamp.type=CreateTime, the message will be rejected if the difference in timestamps exceeds this specified threshold. This configuration is ignored if message.timestamp.type=LogAppendTime."
    +
    public static final String
    + +
    "message.timestamp.type"
    +
    public static final String
    + +
    "Define whether the timestamp in the message is message create time or log append time."
    +
    public static final String
    + +
    "min.cleanable.dirty.ratio"
    +
    public static final String
    + +
    "This configuration controls how frequently the log compactor will attempt to clean the log (assuming <a href=\"#compaction\">log compaction</a> is enabled). By default we will avoid cleaning a log where more than 50% of the log has been compacted. This ratio bounds the maximum space wasted in the log by duplicates (at 50% at most 50% of the log could be duplicates). A higher ratio will mean fewer, more efficient cleanings but will mean more wasted space in the log. If the max.compaction.lag.ms or the min.compaction.lag.ms configurations are also specified, then the log compactor considers the log to be eligible for compaction as soon as either: (i) the dirty ratio threshold has been met and the log has had dirty (uncompacted) records for at least the min.compaction.lag.ms duration, or (ii) if the log has had dirty (uncompacted) records for at most the max.compaction.lag.ms period."
    +
    public static final String
    + +
    "min.compaction.lag.ms"
    +
    public static final String
    + +
    "The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted."
    +
    public static final String
    + +
    "min.insync.replicas"
    +
    public static final String
    + +
    "Specifies the <i>minimum</i> number of in-sync replicas (including the leader) required for a write to succeed when a producer sets <code>acks</code> to \"all\" (or \"-1\"). In the <code>acks=all</code> case, every in-sync replica must acknowledge a write for it to be considered successful. E.g., if a topic has <code>replication.factor</code> of 3 and the ISR set includes all three replicas, then all three replicas must acknowledge an <code>acks=all</code> write for it to succeed, even if <code>min.insync.replicas</code> happens to be less than 3. If <code>acks=all</code> and the current ISR set contains fewer than <code>min.insync.replicas</code> members, then the producer will raise an exception (either <code>NotEnoughReplicas</code> or <code>NotEnoughReplicasAfterAppend</code>).<br> Regardless of the <code>acks</code> setting, the messages will not be visible to the consumers until they are replicated to all in-sync replicas and the <code>min.insync.replicas</code> condition is met.<br> When used together, <code>min.insync.replicas</code> and <code>acks</code> allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set <code>min.insync.replicas</code> to 2, and produce with <code>acks</code> of \"all\". This ensures that a majority of replicas must persist a write before it\'s considered successful by the producer and it\'s visible to consumers.<p>Note that when the Eligible Leader Replicas feature is enabled, the semantics of this config changes. Please refer to <a href=\"#eligible_leader_replicas\">the ELR section</a> for more info.</p>"
    +
    public static final String
    + +
    "preallocate"
    +
    public static final String
    + +
    "True if we should preallocate the file on disk when creating a new log segment."
    +
    public static final String
    + +
    "remote.log.copy.disable"
    +
    public static final String
    + +
    "Determines whether tiered data for a topic should become read only, and no more data uploading on a topic. Once this config is set to true, the local retention configuration (i.e. local.retention.ms/bytes) becomes irrelevant, and all data expiration follows the topic-wide retention configuration(i.e. retention.ms/bytes)."
    +
    public static final String
    + +
    "remote.log.delete.on.disable"
    +
    public static final String
    + +
    "Determines whether tiered data for a topic should be deleted after tiered storage is disabled on a topic. This configuration should be enabled when trying to set `remote.storage.enable` from true to false"
    +
    public static final String
    + +
    "remote.storage.enable"
    +
    public static final String
    + +
    "To enable tiered storage for a topic, set this configuration to true. To disable tiered storage for a topic that has it enabled, set this configuration to false. When disabling, you must also set <code>remote.log.delete.on.disable</code> to true."
    +
    public static final String
    + +
    "retention.bytes"
    +
    public static final String
    + +
    "This configuration controls the maximum size a partition (which consists of log segments) can grow to before we will discard old log segments to free up space if we are using the \"delete\" retention policy. By default there is no size limit only a time limit. Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes. Additionally, retention.bytes configuration operates independently of \"segment.ms\" and \"segment.bytes\" configurations. Moreover, it triggers the rolling of new segment if the retention.bytes is configured to zero."
    +
    public static final String
    + +
    "retention.ms"
    +
    public static final String
    + +
    "This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space if we are using the \"delete\" retention policy. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied. Additionally, retention.ms configuration operates independently of \"segment.ms\" and \"segment.bytes\" configurations. Moreover, it triggers the rolling of new segment if the retention.ms condition is satisfied."
    +
    public static final String
    + +
    "segment.bytes"
    +
    public static final String
    + +
    "This configuration controls the segment file size for the log. Retention and cleaning is always done a file at a time so a larger segment size means fewer files but less granular control over retention."
    +
    public static final String
    + +
    "segment.index.bytes"
    +
    public static final String
    + +
    "This configuration controls the size of the index that maps offsets to file positions. We preallocate this index file and shrink it only after log rolls. You generally should not need to change this setting."
    +
    public static final String
    + +
    "segment.jitter.ms"
    +
    public static final String
    + +
    "The maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling"
    +
    public static final String
    + +
    "segment.ms"
    +
    public static final String
    + +
    "This configuration controls the period of time after which Kafka will force the log to roll even if the segment file isn\'t full to ensure that retention can delete or compact old data."
    +
    public static final String
    + +
    "unclean.leader.election.enable"
    +
    public static final String
    + +
    "Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss.<p>Note: In KRaft mode, when enabling this config dynamically, it needs to wait for the unclean leader electionthread to trigger election periodically (default is 5 minutes). Please run `kafka-leader-election.sh` with `unclean` option to trigger the unclean leader election immediately if needed.</p>"
    +
    +
  • +
+ + + + +
    +
  • +
    org.apache.kafka.common.security.auth.KafkaPrincipal
    +
    +
    Modifier and Type
    +
    Constant Field
    +
    Value
    +
    public static final String
    + +
    "User"
    +
    +
  • +
+
    +
  • +
    org.apache.kafka.common.security.oauthbearer.ClientJwtValidator
    +
    +
    Modifier and Type
    +
    Constant Field
    +
    Value
    +
    public static final String
    + +
    "exp"
    +
    public static final String
    + +
    "iat"
    +
    +
  • +
  • +
    org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler
    +
    +
    Modifier and Type
    +
    Constant Field
    +
    Value
    +
    public static final String
    + +
    "clientId"
    +
    public static final String
    + +
    "The OAuth/OIDC identity provider-issued client ID to uniquely identify the service account to use for authentication for this client. The value must be paired with a corresponding clientSecret value and is provided to the OAuth provider using the OAuth clientcredentials grant type."
    +
    public static final String
    + +
    "clientSecret"
    +
    public static final String
    + +
    "The OAuth/OIDC identity provider-issued client secret serves a similar function as a password to the clientId account and identifies the service account to use for authentication for this client. The value must be paired with a corresponding clientId value and is provided to the OAuth provider using the OAuth clientcredentials grant type."
    +
    public static final String
    + +
    "scope"
    +
    public static final String
    + +
    "The (optional) HTTP/HTTPS login request to the token endpoint (sasl.oauthbearer.token.endpoint.url) may need to specify an OAuth \"scope\". If so, the scope is used to provide the value to include with the login request."
    +
    +
  • +
  • +
    org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule
    +
    +
    Modifier and Type
    +
    Constant Field
    +
    Value
    +
    public static final String
    + +
    "OAUTHBEARER"
    +
    +
  • +
+ +
    +
  • +
    org.apache.kafka.connect.data.Date
    +
    +
    Modifier and Type
    +
    Constant Field
    +
    Value
    +
    public static final String
    + +
    "org.apache.kafka.connect.data.Date"
    +
    +
  • +
  • +
    org.apache.kafka.connect.data.Decimal
    +
    +
    Modifier and Type
    +
    Constant Field
    +
    Value
    +
    public static final String
    + +
    "org.apache.kafka.connect.data.Decimal"
    +
    public static final String
    + +
    "scale"
    +
    +
  • +
  • +
    org.apache.kafka.connect.data.Time
    +
    +
    Modifier and Type
    +
    Constant Field
    +
    Value
    +
    public static final String
    + +
    "org.apache.kafka.connect.data.Time"
    +
    +
  • +
  • +
    org.apache.kafka.connect.data.Timestamp
    +
    +
    Modifier and Type
    +
    Constant Field
    +
    Value
    +
    public static final String
    + +
    "org.apache.kafka.connect.data.Timestamp"
    +
    +
  • +
+ + + + + + + + + + +
+
+
+
+ + diff --git a/static/41/javadoc/deprecated-list.html b/static/41/javadoc/deprecated-list.html new file mode 100644 index 000000000..739be7590 --- /dev/null +++ b/static/41/javadoc/deprecated-list.html @@ -0,0 +1,506 @@ + + + + +Deprecated List (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ + + + + + + + +
+
+
+ + diff --git a/static/41/javadoc/element-list b/static/41/javadoc/element-list new file mode 100644 index 000000000..ea41a3dda --- /dev/null +++ b/static/41/javadoc/element-list @@ -0,0 +1,53 @@ +org.apache.kafka.clients.admin +org.apache.kafka.clients.consumer +org.apache.kafka.clients.producer +org.apache.kafka.common +org.apache.kafka.common.acl +org.apache.kafka.common.annotation +org.apache.kafka.common.config +org.apache.kafka.common.config.provider +org.apache.kafka.common.errors +org.apache.kafka.common.header +org.apache.kafka.common.metrics +org.apache.kafka.common.metrics.stats +org.apache.kafka.common.quota +org.apache.kafka.common.resource +org.apache.kafka.common.security.auth +org.apache.kafka.common.security.oauthbearer +org.apache.kafka.common.security.plain +org.apache.kafka.common.security.scram +org.apache.kafka.common.security.token.delegation +org.apache.kafka.common.serialization +org.apache.kafka.connect.components +org.apache.kafka.connect.connector +org.apache.kafka.connect.connector.policy +org.apache.kafka.connect.data +org.apache.kafka.connect.errors +org.apache.kafka.connect.header +org.apache.kafka.connect.health +org.apache.kafka.connect.mirror +org.apache.kafka.connect.rest +org.apache.kafka.connect.sink +org.apache.kafka.connect.source +org.apache.kafka.connect.storage +org.apache.kafka.connect.tools +org.apache.kafka.connect.transforms +org.apache.kafka.connect.transforms.predicates +org.apache.kafka.connect.util +org.apache.kafka.coordinator.group.api.assignor +org.apache.kafka.server.authorizer +org.apache.kafka.server.log.remote.storage +org.apache.kafka.server.policy +org.apache.kafka.server.quota +org.apache.kafka.server.telemetry +org.apache.kafka.streams +org.apache.kafka.streams.errors +org.apache.kafka.streams.kstream +org.apache.kafka.streams.processor +org.apache.kafka.streams.processor.api +org.apache.kafka.streams.processor.assignment +org.apache.kafka.streams.processor.assignment.assignors +org.apache.kafka.streams.query +org.apache.kafka.streams.state +org.apache.kafka.streams.test +org.apache.kafka.tools.api diff --git a/static/41/javadoc/help-doc.html b/static/41/javadoc/help-doc.html new file mode 100644 index 000000000..b26e04993 --- /dev/null +++ b/static/41/javadoc/help-doc.html @@ -0,0 +1,191 @@ + + + + +API Help (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+

JavaDoc Help

+ +
+
+

Navigation

+Starting from the Overview page, you can browse the documentation using the links in each page, and in the navigation bar at the top of each page. The Index and Search box allow you to navigate to specific declarations and summary pages, including: All Packages, All Classes and Interfaces + +
+
+
+

Kinds of Pages

+The following sections describe the different kinds of pages in this collection. +
+

Overview

+

The Overview page is the front page of this API document and provides a list of all packages with a summary for each. This page can also contain an overall description of the set of packages.

+
+
+

Package

+

Each package has a page that contains a list of its classes and interfaces, with a summary for each. These pages may contain the following categories:

+
    +
  • Interfaces
  • +
  • Classes
  • +
  • Enum Classes
  • +
  • Exceptions
  • +
  • Errors
  • +
  • Annotation Interfaces
  • +
+
+
+

Class or Interface

+

Each class, interface, nested class and nested interface has its own separate page. Each of these pages has three sections consisting of a declaration and description, member summary tables, and detailed member descriptions. Entries in each of these sections are omitted if they are empty or not applicable.

+
    +
  • Class Inheritance Diagram
  • +
  • Direct Subclasses
  • +
  • All Known Subinterfaces
  • +
  • All Known Implementing Classes
  • +
  • Class or Interface Declaration
  • +
  • Class or Interface Description
  • +
+
+
    +
  • Nested Class Summary
  • +
  • Enum Constant Summary
  • +
  • Field Summary
  • +
  • Property Summary
  • +
  • Constructor Summary
  • +
  • Method Summary
  • +
  • Required Element Summary
  • +
  • Optional Element Summary
  • +
+
+
    +
  • Enum Constant Details
  • +
  • Field Details
  • +
  • Property Details
  • +
  • Constructor Details
  • +
  • Method Details
  • +
  • Element Details
  • +
+

Note: Annotation interfaces have required and optional elements, but not methods. Only enum classes have enum constants. The components of a record class are displayed as part of the declaration of the record class. Properties are a feature of JavaFX.

+

The summary entries are alphabetical, while the detailed descriptions are in the order they appear in the source code. This preserves the logical groupings established by the programmer.

+
+
+

Other Files

+

Packages and modules may contain pages with additional information related to the declarations nearby.

+
+
+

Tree (Class Hierarchy)

+

There is a Class Hierarchy page for all packages, plus a hierarchy for each package. Each hierarchy page contains a list of classes and a list of interfaces. Classes are organized by inheritance structure starting with java.lang.Object. Interfaces do not inherit from java.lang.Object.

+
    +
  • When viewing the Overview page, clicking on TREE displays the hierarchy for all packages.
  • +
  • When viewing a particular package, class or interface page, clicking on TREE displays the hierarchy for only that package.
  • +
+
+
+

Deprecated API

+

The Deprecated API page lists all of the API that have been deprecated. A deprecated API is not recommended for use, generally due to shortcomings, and a replacement API is usually given. Deprecated APIs may be removed in future implementations.

+
+
+

Constant Field Values

+

The Constant Field Values page lists the static final fields and their values.

+
+
+

Serialized Form

+

Each serializable or externalizable class has a description of its serialization fields and methods. This information is of interest to those who implement rather than use the API. While there is no link in the navigation bar, you can get to this information by going to any serialized class and clicking "Serialized Form" in the "See Also" section of the class description.

+
+
+

All Packages

+

The All Packages page contains an alphabetic index of all packages contained in the documentation.

+
+
+

All Classes and Interfaces

+

The All Classes and Interfaces page contains an alphabetic index of all classes and interfaces contained in the documentation, including annotation interfaces, enum classes, and record classes.

+
+
+

Index

+

The Index contains an alphabetic index of all classes, interfaces, constructors, methods, and fields in the documentation, as well as summary pages such as All Packages, All Classes and Interfaces.

+
+
+
+This help file applies to API documentation generated by the standard doclet.
+
+
+ + diff --git a/static/41/javadoc/index-all.html b/static/41/javadoc/index-all.html new file mode 100644 index 000000000..66abdf191 --- /dev/null +++ b/static/41/javadoc/index-all.html @@ -0,0 +1,23433 @@ + + + + +Index (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+
+

Index

+
+A B C D E F G H I J K L M N O P Q R S T U V W Z 
All Classes and Interfaces|All Packages|Constant Field Values|Serialized Form +

A

+
+
abort() - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule
+
 
+
abort() - Method in class org.apache.kafka.common.security.plain.PlainLoginModule
+
 
+
abort() - Method in class org.apache.kafka.common.security.scram.ScramLoginModule
+
 
+
abortTransaction() - Method in class org.apache.kafka.clients.producer.KafkaProducer
+
+
Aborts the ongoing transaction.
+
+
abortTransaction() - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
abortTransaction() - Method in interface org.apache.kafka.clients.producer.Producer
+
+ +
+
abortTransaction() - Method in interface org.apache.kafka.connect.source.TransactionContext
+
+
Requests a transaction abort after the next batch of records from SourceTask.poll().
+
+
abortTransaction(AbortTransactionSpec) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Forcefully abort a transaction which is open on a topic partition.
+
+
abortTransaction(AbortTransactionSpec, AbortTransactionOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Forcefully abort a transaction which is open on a topic partition.
+
+
abortTransaction(AbortTransactionSpec, AbortTransactionOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
abortTransaction(AbortTransactionSpec, AbortTransactionOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
abortTransaction(SourceRecord) - Method in interface org.apache.kafka.connect.source.TransactionContext
+
+
Requests a transaction abort after a source record is processed.
+
+
abortTransactionException - Variable in class org.apache.kafka.clients.producer.MockProducer
+
 
+
AbortTransactionOptions - Class in org.apache.kafka.clients.admin
+
 
+
AbortTransactionOptions() - Constructor for class org.apache.kafka.clients.admin.AbortTransactionOptions
+
 
+
AbortTransactionResult - Class in org.apache.kafka.clients.admin
+
+ +
+
AbortTransactionSpec - Class in org.apache.kafka.clients.admin
+
 
+
AbortTransactionSpec(TopicPartition, long, short, int) - Constructor for class org.apache.kafka.clients.admin.AbortTransactionSpec
+
 
+
AbstractConfig - Class in org.apache.kafka.common.config
+
+
A convenient base class for configurations to extend.
+
+
AbstractConfig(ConfigDef, Map<?, ?>) - Constructor for class org.apache.kafka.common.config.AbstractConfig
+
+
Construct a configuration with a ConfigDef and the configuration properties, + which can include properties for zero or more ConfigProvider + that will be used to resolve variables in configuration property values.
+
+
AbstractConfig(ConfigDef, Map<?, ?>, boolean) - Constructor for class org.apache.kafka.common.config.AbstractConfig
+
+
Construct a configuration with a ConfigDef and the configuration properties, + which can include properties for zero or more ConfigProvider + that will be used to resolve variables in configuration property values.
+
+
AbstractConfig(ConfigDef, Map<?, ?>, Map<String, ?>, boolean) - Constructor for class org.apache.kafka.common.config.AbstractConfig
+
+
Construct a configuration with a ConfigDef and the configuration properties, which can include properties + for zero or more ConfigProvider that will be used to resolve variables in configuration property + values.
+
+
AbstractOptions<T extends AbstractOptions> - Class in org.apache.kafka.clients.admin
+
 
+
AbstractOptions() - Constructor for class org.apache.kafka.clients.admin.AbstractOptions
+
 
+
AbstractState - Class in org.apache.kafka.connect.health
+
+
Provides the current status for a connector or a task, along with an identifier for its Connect worker
+
+
AbstractState(String, String, String) - Constructor for class org.apache.kafka.connect.health.AbstractState
+
+
Construct a state for a connector or task.
+
+
accept(A, B) - Method in interface org.apache.kafka.common.KafkaFuture.BiConsumer
+
 
+
ACCEPT - Enum constant in enum class org.apache.kafka.clients.consumer.AcknowledgeType
+
+
The record was consumed successfully.
+
+
acceptable(double) - Method in class org.apache.kafka.common.metrics.Quota
+
 
+
ACCEPTABLE_RECOVERY_LAG_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
acceptable.recovery.lag
+
+
acceptableRecoveryLag() - Method in class org.apache.kafka.streams.processor.assignment.AssignmentConfigs
+
+
The configured acceptable recovery lag according to + StreamsConfig.ACCEPTABLE_RECOVERY_LAG_CONFIG
+
+
accepts(StateStore) - Method in interface org.apache.kafka.streams.state.QueryableStoreType
+
+
Called when searching for StateStores to see if they + match the type expected by implementors of this interface.
+
+
accepts(StateStore) - Method in class org.apache.kafka.streams.state.QueryableStoreTypes.SessionStoreType
+
 
+
AccessControlEntry - Class in org.apache.kafka.common.acl
+
+
Represents an access control entry.
+
+
AccessControlEntry(String, String, AclOperation, AclPermissionType) - Constructor for class org.apache.kafka.common.acl.AccessControlEntry
+
+
Create an instance of an access control entry with the provided parameters.
+
+
AccessControlEntryFilter - Class in org.apache.kafka.common.acl
+
+
Represents a filter which matches access control entries.
+
+
AccessControlEntryFilter(String, String, AclOperation, AclPermissionType) - Constructor for class org.apache.kafka.common.acl.AccessControlEntryFilter
+
+
Create an instance of an access control entry filter with the provided parameters.
+
+
acknowledge(ConsumerRecord<K, V>) - Method in class org.apache.kafka.clients.consumer.KafkaShareConsumer
+
+
Acknowledge successful delivery of a record returned on the last KafkaShareConsumer.poll(Duration) call.
+
+
acknowledge(ConsumerRecord<K, V>) - Method in class org.apache.kafka.clients.consumer.MockShareConsumer
+
 
+
acknowledge(ConsumerRecord<K, V>) - Method in interface org.apache.kafka.clients.consumer.ShareConsumer
+
 
+
acknowledge(ConsumerRecord<K, V>, AcknowledgeType) - Method in class org.apache.kafka.clients.consumer.KafkaShareConsumer
+
+
Acknowledge delivery of a record returned on the last KafkaShareConsumer.poll(Duration) call indicating whether + it was processed successfully.
+
+
acknowledge(ConsumerRecord<K, V>, AcknowledgeType) - Method in class org.apache.kafka.clients.consumer.MockShareConsumer
+
 
+
acknowledge(ConsumerRecord<K, V>, AcknowledgeType) - Method in interface org.apache.kafka.clients.consumer.ShareConsumer
+
 
+
AcknowledgementCommitCallback - Interface in org.apache.kafka.clients.consumer
+
+
A callback interface that the user can implement to trigger custom actions when an acknowledgement completes.
+
+
AcknowledgeType - Enum Class in org.apache.kafka.clients.consumer
+
+
The acknowledge type is used with KafkaShareConsumer.acknowledge(ConsumerRecord, AcknowledgeType) to indicate + whether the record was consumed successfully.
+
+
ACKS_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
acks
+
+
aclBinding() - Method in class org.apache.kafka.server.authorizer.AclDeleteResult.AclBindingDeleteResult
+
+
Returns ACL binding that matched the delete filter.
+
+
AclBinding - Class in org.apache.kafka.common.acl
+
+
Represents a binding between a resource pattern and an access control entry.
+
+
AclBinding(ResourcePattern, AccessControlEntry) - Constructor for class org.apache.kafka.common.acl.AclBinding
+
+
Create an instance of this class with the provided parameters.
+
+
AclBindingDeleteResult(AclBinding) - Constructor for class org.apache.kafka.server.authorizer.AclDeleteResult.AclBindingDeleteResult
+
 
+
AclBindingDeleteResult(AclBinding, ApiException) - Constructor for class org.apache.kafka.server.authorizer.AclDeleteResult.AclBindingDeleteResult
+
 
+
aclBindingDeleteResults() - Method in class org.apache.kafka.server.authorizer.AclDeleteResult
+
+
Returns delete result for each matching ACL binding.
+
+
AclBindingFilter - Class in org.apache.kafka.common.acl
+
+
A filter which can match AclBinding objects.
+
+
AclBindingFilter(ResourcePatternFilter, AccessControlEntryFilter) - Constructor for class org.apache.kafka.common.acl.AclBindingFilter
+
+
Create an instance of this filter with the provided parameters.
+
+
aclCount() - Method in interface org.apache.kafka.server.authorizer.Authorizer
+
+
Get the current number of ACLs, for the purpose of metrics.
+
+
AclCreateResult - Class in org.apache.kafka.server.authorizer
+
 
+
AclCreateResult(ApiException) - Constructor for class org.apache.kafka.server.authorizer.AclCreateResult
+
 
+
AclDeleteResult - Class in org.apache.kafka.server.authorizer
+
 
+
AclDeleteResult(Collection<AclDeleteResult.AclBindingDeleteResult>) - Constructor for class org.apache.kafka.server.authorizer.AclDeleteResult
+
 
+
AclDeleteResult(ApiException) - Constructor for class org.apache.kafka.server.authorizer.AclDeleteResult
+
 
+
AclDeleteResult.AclBindingDeleteResult - Class in org.apache.kafka.server.authorizer
+
+
Delete result for each ACL binding that matched a delete filter.
+
+
AclOperation - Enum Class in org.apache.kafka.common.acl
+
+
Represents an operation which an ACL grants or denies permission to perform.
+
+
AclPermissionType - Enum Class in org.apache.kafka.common.acl
+
+
Represents whether an ACL grants or denies permissions.
+
+
acls(AclBindingFilter) - Method in interface org.apache.kafka.server.authorizer.Authorizer
+
+
Returns ACL bindings which match the provided filter.
+
+
Action - Class in org.apache.kafka.server.authorizer
+
 
+
Action(AclOperation, ResourcePattern, int, boolean, boolean) - Constructor for class org.apache.kafka.server.authorizer.Action
+
 
+
ACTIVE - Enum constant in enum class org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment.AssignedTask.Type
+
 
+
ACTIVE_TASK_ASSIGNED_MULTIPLE_TIMES - Enum constant in enum class org.apache.kafka.streams.processor.assignment.TaskAssignor.AssignmentError
+
 
+
activeHost() - Method in class org.apache.kafka.streams.KeyQueryMetadata
+
+
Get the active Kafka Streams instance for given key.
+
+
activeProducers() - Method in class org.apache.kafka.clients.admin.DescribeProducersResult.PartitionProducerState
+
 
+
activeTasks() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberAssignment
+
+
Active tasks for this client.
+
+
activeTasks() - Method in interface org.apache.kafka.streams.ThreadMetadata
+
+
Metadata of the active tasks assigned to the stream thread.
+
+
add(String, byte[]) - Method in interface org.apache.kafka.common.header.Headers
+
+
Creates and adds a header, to the end, returning if the operation succeeded.
+
+
add(String, Object, Schema) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
add(String, Object, Schema) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Add to this collection a Header with the given key and value.
+
+
add(String, SchemaAndValue) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
add(String, SchemaAndValue) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Add to this collection a Header with the given key and value.
+
+
add(Header) - Method in interface org.apache.kafka.common.header.Headers
+
+
Adds a header (key inside), to the end, returning if the operation succeeded.
+
+
add(MetricName, MeasurableStat) - Method in class org.apache.kafka.common.metrics.Sensor
+
+
Register a metric with this sensor
+
+
add(MetricName, MeasurableStat, MetricConfig) - Method in class org.apache.kafka.common.metrics.Sensor
+
+
Register a metric with this sensor
+
+
add(CompoundStat) - Method in class org.apache.kafka.common.metrics.Sensor
+
+
Register a compound statistic with this sensor with no config override
+
+
add(CompoundStat, MetricConfig) - Method in class org.apache.kafka.common.metrics.Sensor
+
+
Register a compound statistic with this sensor which yields multiple measurable quantities (like a histogram)
+
+
add(Header) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
add(Header) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Add the given Header to this collection.
+
+
addBoolean(String, boolean) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
addBoolean(String, boolean) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Add to this collection a Header with the given key and value.
+
+
addByte(String, byte) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
addByte(String, byte) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Add to this collection a Header with the given key and value.
+
+
addBytes(String, byte[]) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
addBytes(String, byte[]) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Add to this collection a Header with the given key and value.
+
+
addClientSaslSupport(ConfigDef) - Static method in class org.apache.kafka.common.config.SaslConfigs
+
 
+
addClientSslSupport(ConfigDef) - Static method in class org.apache.kafka.common.config.SslConfigs
+
 
+
addDate(String, Date) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
addDate(String, Date) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Add to this collection a Header with the given key and Date value.
+
+
addDecimal(String, BigDecimal) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
addDecimal(String, BigDecimal) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Add to this collection a Header with the given key and Decimal value.
+
+
addDouble(String, double) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
addDouble(String, double) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Add to this collection a Header with the given key and value.
+
+
addedMetrics() - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
addedMetrics() - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
addErrorMessage(String) - Method in class org.apache.kafka.common.config.ConfigValue
+
 
+
addExecutionInfo(String) - Method in interface org.apache.kafka.streams.query.QueryResult
+
+
Used by stores to add detailed execution information (if requested) during query execution.
+
+
addFloat(String, float) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
addFloat(String, float) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Add to this collection a Header with the given key and value.
+
+
addGlobalStore(StoreBuilder<?>, String, Consumed<KIn, VIn>, ProcessorSupplier<KIn, VIn, Void, Void>) - Method in class org.apache.kafka.streams.StreamsBuilder
+
+
Adds a global StateStore to the topology.
+
+
addGlobalStore(StoreBuilder<S>, String, Deserializer<K>, Deserializer<V>, String, String, ProcessorSupplier<K, V, Void, Void>) - Method in class org.apache.kafka.streams.Topology
+
+
Adds a global state store to the topology.
+
+
addGlobalStore(StoreBuilder<S>, String, TimestampExtractor, Deserializer<K>, Deserializer<V>, String, String, ProcessorSupplier<K, V, Void, Void>) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addingReplicas() - Method in class org.apache.kafka.clients.admin.PartitionReassignment
+
+
The brokers that we are adding this partition to as part of a reassignment.
+
+
addInt(String, int) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
addInt(String, int) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Add to this collection a Header with the given key and value.
+
+
addLatencyRateTotalSensor(String, String, String, Sensor.RecordingLevel, String...) - Method in interface org.apache.kafka.streams.StreamsMetrics
+
+
Add a latency, rate and total sensor for a specific operation, which will include the following metrics: + + average latency + max latency + invocation rate (num.operations / seconds) + total invocation count + + Whenever a user records this sensor via Sensor.record(double) etc., it will be counted as one invocation + of the operation, and hence the rate / count metrics will be updated accordingly; and the recorded latency value + will be used to update the average / max latency as well.
+
+
addList(String, List<?>, Schema) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
addList(String, List<?>, Schema) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Add to this collection a Header with the given key and value.
+
+
addLong(String, long) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
addLong(String, long) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Add to this collection a Header with the given key and value.
+
+
addMap(String, Map<?, ?>, Schema) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
addMap(String, Map<?, ?>, Schema) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Add to this collection a Header with the given key and value.
+
+
addMetric(MetricName, Measurable) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Add a metric to monitor an object that implements measurable.
+
+
addMetric(MetricName, MetricConfig, Measurable) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Add a metric to monitor an object that implements Measurable.
+
+
addMetric(MetricName, MetricConfig, MetricValueProvider<?>) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Add a metric to monitor an object that implements MetricValueProvider.
+
+
addMetric(MetricName, MetricValueProvider<?>) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Add a metric to monitor an object that implements MetricValueProvider.
+
+
addMetric(MetricName, MetricValueProvider<?>) - Method in interface org.apache.kafka.common.metrics.PluginMetrics
+
+
Add a metric to monitor an object that implements MetricValueProvider.
+
+
addMetricIfAbsent(MetricName, MetricConfig, MetricValueProvider<?>) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Create or get an existing metric to monitor an object that implements MetricValueProvider.
+
+
addProcessor(String, ProcessorSupplier<KIn, VIn, KOut, VOut>, String...) - Method in class org.apache.kafka.streams.Topology
+
+
Add a processor that receives and processed records from one or more parent processors or + sources.
+
+
addRaftVoter(int, Uuid, Set<RaftVoterEndpoint>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Add a new voter node to the KRaft metadata quorum.
+
+
addRaftVoter(int, Uuid, Set<RaftVoterEndpoint>, AddRaftVoterOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Add a new voter node to the KRaft metadata quorum.
+
+
addRaftVoter(int, Uuid, Set<RaftVoterEndpoint>, AddRaftVoterOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
addRaftVoter(int, Uuid, Set<RaftVoterEndpoint>, AddRaftVoterOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
AddRaftVoterOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
AddRaftVoterOptions() - Constructor for class org.apache.kafka.clients.admin.AddRaftVoterOptions
+
 
+
AddRaftVoterResult - Class in org.apache.kafka.clients.admin
+
+ +
+
addRateTotalSensor(String, String, String, Sensor.RecordingLevel, String...) - Method in interface org.apache.kafka.streams.StreamsMetrics
+
+
Add a rate and a total sensor for a specific operation, which will include the following metrics: + + invocation rate (num.operations / time unit) + total invocation count + + Whenever a user records this sensor via Sensor.record(double) etc., + it will be counted as one invocation of the operation, and hence the rate / count metrics will be updated accordingly.
+
+
addReadOnlyStateStore(StoreBuilder<S>, String, Deserializer<K>, Deserializer<V>, String, String, ProcessorSupplier<K, V, Void, Void>) - Method in class org.apache.kafka.streams.Topology
+
+
Adds a read-only state store to the topology.
+
+
addReadOnlyStateStore(StoreBuilder<S>, String, TimestampExtractor, Deserializer<K>, Deserializer<V>, String, String, ProcessorSupplier<K, V, Void, Void>) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addRecord(ConsumerRecord<K, V>) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
addRecord(ConsumerRecord<K, V>) - Method in class org.apache.kafka.clients.consumer.MockShareConsumer
+
 
+
addRemoteLogSegmentMetadata(RemoteLogSegmentMetadata) - Method in interface org.apache.kafka.server.log.remote.storage.RemoteLogMetadataManager
+
+
This method is used to add RemoteLogSegmentMetadata asynchronously with the containing RemoteLogSegmentId into RemoteLogMetadataManager.
+
+
addReporter(MetricsReporter) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Add a MetricReporter
+
+
addResult(int, QueryResult<R>) - Method in class org.apache.kafka.streams.query.StateQueryResult
+
+
Set the result for a partitioned store query.
+
+
addSensor(String) - Method in interface org.apache.kafka.common.metrics.PluginMetrics
+
+
Create a Sensor with the given unique name.
+
+
addSensor(String, Sensor.RecordingLevel) - Method in interface org.apache.kafka.streams.StreamsMetrics
+
+
Generic method to create a sensor.
+
+
addSensor(String, Sensor.RecordingLevel, Sensor...) - Method in interface org.apache.kafka.streams.StreamsMetrics
+
+
Generic method to create a sensor with parent sensors.
+
+
addShort(String, short) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
addShort(String, short) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Add to this collection a Header with the given key and value.
+
+
addSink(String, String, String...) - Method in class org.apache.kafka.streams.Topology
+
+
Add a sink that sends records from upstream + processors or + sources to the named Kafka topic.
+
+
addSink(String, String, Serializer<K>, Serializer<V>, String...) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSink(String, String, Serializer<K>, Serializer<V>, StreamPartitioner<? super K, ? super V>, String...) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSink(String, String, StreamPartitioner<? super K, ? super V>, String...) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSink(String, TopicNameExtractor<? super K, ? super V>, String...) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSink(String, TopicNameExtractor<? super K, ? super V>, Serializer<K>, Serializer<V>, String...) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSink(String, TopicNameExtractor<? super K, ? super V>, Serializer<K>, Serializer<V>, StreamPartitioner<? super K, ? super V>, String...) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSink(String, TopicNameExtractor<? super K, ? super V>, StreamPartitioner<? super K, ? super V>, String...) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSource(String, String...) - Method in class org.apache.kafka.streams.Topology
+
+
Add a source that consumes the named topics and forwards the records to child + processors and + sinks.
+
+
addSource(String, Pattern) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSource(String, Deserializer<K>, Deserializer<V>, String...) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSource(String, Deserializer<K>, Deserializer<V>, Pattern) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSource(AutoOffsetReset, String, String...) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSource(AutoOffsetReset, String, Pattern) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSource(AutoOffsetReset, String, Deserializer<K>, Deserializer<V>, String...) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSource(AutoOffsetReset, String, Deserializer<K>, Deserializer<V>, Pattern) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSource(AutoOffsetReset, String, TimestampExtractor, Deserializer<K>, Deserializer<V>, String...) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSource(AutoOffsetReset, String, TimestampExtractor, Deserializer<K>, Deserializer<V>, Pattern) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSource(AutoOffsetReset, TimestampExtractor, String, String...) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSource(AutoOffsetReset, TimestampExtractor, String, Pattern) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSource(TimestampExtractor, String, String...) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSource(TimestampExtractor, String, Pattern) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSource(Topology.AutoOffsetReset, String, String...) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSource(Topology.AutoOffsetReset, String, Pattern) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSource(Topology.AutoOffsetReset, String, Deserializer<K>, Deserializer<V>, String...) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSource(Topology.AutoOffsetReset, String, Deserializer<K>, Deserializer<V>, Pattern) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSource(Topology.AutoOffsetReset, String, TimestampExtractor, Deserializer<K>, Deserializer<V>, String...) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSource(Topology.AutoOffsetReset, String, TimestampExtractor, Deserializer<K>, Deserializer<V>, Pattern) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSource(Topology.AutoOffsetReset, TimestampExtractor, String, String...) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addSource(Topology.AutoOffsetReset, TimestampExtractor, String, Pattern) - Method in class org.apache.kafka.streams.Topology
+
+ +
+
addStateStore(StoreBuilder<?>) - Method in class org.apache.kafka.streams.StreamsBuilder
+
+
Adds a state store to the underlying Topology.
+
+
addStateStore(StoreBuilder<S>, String...) - Method in class org.apache.kafka.streams.Topology
+
+
Add a state store to the topology, and optionally connect it to one or more + processors.
+
+
addStateStore(S) - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
 
+
addStreamThread() - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Adds and starts a stream thread in addition to the stream threads that are already running in this + Kafka Streams client.
+
+
addString(String, String) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
addString(String, String) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Add to this collection a Header with the given key and value.
+
+
addStruct(String, Struct) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
addStruct(String, Struct) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Add to this collection a Header with the given key and value.
+
+
addTime(String, Date) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
addTime(String, Date) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Add to this collection a Header with the given key and Time value.
+
+
addTimestamp(String, Date) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
addTimestamp(String, Date) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Add to this collection a Header with the given key and Timestamp value.
+
+
Admin - Interface in org.apache.kafka.clients.admin
+
+
The administrative client for Kafka, which supports managing and inspecting topics, brokers, configurations and ACLs.
+
+
ADMIN - Enum constant in enum class org.apache.kafka.connect.connector.policy.ConnectorClientConfigRequest.ClientType
+
 
+
ADMIN_CLIENT_PREFIX - Static variable in class org.apache.kafka.connect.mirror.MirrorClientConfig
+
 
+
ADMIN_CLIENT_PREFIX - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Prefix used to isolate admin configs from other client configs.
+
+
AdminClient - Class in org.apache.kafka.clients.admin
+
+
The base class for in-built admin clients.
+
+
AdminClient() - Constructor for class org.apache.kafka.clients.admin.AdminClient
+
 
+
AdminClientConfig - Class in org.apache.kafka.clients.admin
+
+
The AdminClient configuration class, which also contains constants for configuration entry names.
+
+
AdminClientConfig(Map<?, ?>) - Constructor for class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
adminClientId() - Method in interface org.apache.kafka.streams.ThreadMetadata
+
+
Client ID of the admin client used by the stream thread.
+
+
adminClientPrefix(String) - Static method in class org.apache.kafka.streams.StreamsConfig
+
+
Prefix a property with StreamsConfig.ADMIN_CLIENT_PREFIX.
+
+
adminConfig() - Method in class org.apache.kafka.connect.mirror.MirrorClientConfig
+
+
Sub-config for Admin clients.
+
+
adminInstanceId() - Method in interface org.apache.kafka.streams.ClientInstanceIds
+
+
Returns the client instance id of the admin client.
+
+
advanceBy(Duration) - Method in class org.apache.kafka.streams.kstream.TimeWindows
+
+
Return a window definition with the original size, but advance ("hop") the window by the given interval, which + specifies by how much a window moves forward relative to the previous one.
+
+
advanceMs - Variable in class org.apache.kafka.streams.kstream.TimeWindows
+
+
The size of the window's advance interval in milliseconds, i.e., by how much a window moves forward relative to + the previous one.
+
+
advanceTime(Duration) - Method in class org.apache.kafka.streams.TestInputTopic
+
+
Advances the internally tracked event time of this input topic.
+
+
advanceWallClockTime(Duration) - Method in class org.apache.kafka.streams.TopologyTestDriver
+
+
Advances the internally mocked wall-clock time.
+
+
after(Duration) - Method in class org.apache.kafka.streams.kstream.JoinWindows
+
+
Changes the end window boundary to timeDifference but keep the start window boundary as is.
+
+
afterMs - Variable in class org.apache.kafka.streams.kstream.JoinWindows
+
+
Maximum time difference for tuples that are after the join tuple.
+
+
aggregate(Initializer<V>) - Method in interface org.apache.kafka.streams.kstream.TimeWindowedCogroupedKStream
+
+
Aggregate the values of records in this stream by the grouped key and defined windows.
+
+
aggregate(Initializer<V>, Materialized<K, V, WindowStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.TimeWindowedCogroupedKStream
+
+
Aggregate the values of records in this stream by the grouped key and defined windows.
+
+
aggregate(Initializer<V>, Merger<? super K, V>) - Method in interface org.apache.kafka.streams.kstream.SessionWindowedCogroupedKStream
+
+
Aggregate the values of records in these streams by the grouped key and defined sessions.
+
+
aggregate(Initializer<V>, Merger<? super K, V>, Materialized<K, V, SessionStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.SessionWindowedCogroupedKStream
+
+
Aggregate the values of records in these streams by the grouped key and defined sessions.
+
+
aggregate(Initializer<V>, Merger<? super K, V>, Named) - Method in interface org.apache.kafka.streams.kstream.SessionWindowedCogroupedKStream
+
+
Aggregate the values of records in these streams by the grouped key and defined sessions.
+
+
aggregate(Initializer<V>, Merger<? super K, V>, Named, Materialized<K, V, SessionStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.SessionWindowedCogroupedKStream
+
+
Aggregate the values of records in these streams by the grouped key and defined sessions.
+
+
aggregate(Initializer<V>, Named) - Method in interface org.apache.kafka.streams.kstream.TimeWindowedCogroupedKStream
+
+
Aggregate the values of records in this stream by the grouped key and defined windows.
+
+
aggregate(Initializer<V>, Named, Materialized<K, V, WindowStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.TimeWindowedCogroupedKStream
+
+
Aggregate the values of records in this stream by the grouped key and defined windows.
+
+
aggregate(Initializer<VOut>) - Method in interface org.apache.kafka.streams.kstream.CogroupedKStream
+
+
Aggregate the values of records in these streams by the grouped key.
+
+
aggregate(Initializer<VOut>, Aggregator<? super K, ? super V, VOut>) - Method in interface org.apache.kafka.streams.kstream.KGroupedStream
+
+
Aggregate the values of records in this stream by the grouped key.
+
+
aggregate(Initializer<VOut>, Aggregator<? super K, ? super V, VOut>) - Method in interface org.apache.kafka.streams.kstream.TimeWindowedKStream
+
+
Aggregate the values of records in this stream by the grouped key and defined windows.
+
+
aggregate(Initializer<VOut>, Aggregator<? super K, ? super V, VOut>, Materialized<K, VOut, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KGroupedStream
+
+
Aggregate the values of records in this stream by the grouped key.
+
+
aggregate(Initializer<VOut>, Aggregator<? super K, ? super V, VOut>, Materialized<K, VOut, WindowStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.TimeWindowedKStream
+
+
Aggregate the values of records in this stream by the grouped key and defined windows.
+
+
aggregate(Initializer<VOut>, Aggregator<? super K, ? super V, VOut>, Merger<? super K, VOut>) - Method in interface org.apache.kafka.streams.kstream.SessionWindowedKStream
+
+
Aggregate the values of records in this stream by the grouped key and defined sessions.
+
+
aggregate(Initializer<VOut>, Aggregator<? super K, ? super V, VOut>, Merger<? super K, VOut>, Materialized<K, VOut, SessionStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.SessionWindowedKStream
+
+
Aggregate the values of records in this stream by the grouped key and defined sessions.
+
+
aggregate(Initializer<VOut>, Aggregator<? super K, ? super V, VOut>, Merger<? super K, VOut>, Named) - Method in interface org.apache.kafka.streams.kstream.SessionWindowedKStream
+
+
Aggregate the values of records in this stream by the grouped key and defined sessions.
+
+
aggregate(Initializer<VOut>, Aggregator<? super K, ? super V, VOut>, Merger<? super K, VOut>, Named, Materialized<K, VOut, SessionStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.SessionWindowedKStream
+
+
Aggregate the values of records in this stream by the grouped key and defined sessions.
+
+
aggregate(Initializer<VOut>, Aggregator<? super K, ? super V, VOut>, Named) - Method in interface org.apache.kafka.streams.kstream.TimeWindowedKStream
+
+
Aggregate the values of records in this stream by the grouped key and defined windows.
+
+
aggregate(Initializer<VOut>, Aggregator<? super K, ? super V, VOut>, Named, Materialized<K, VOut, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KGroupedStream
+
+
Aggregate the values of records in this stream by the grouped key.
+
+
aggregate(Initializer<VOut>, Aggregator<? super K, ? super V, VOut>, Named, Materialized<K, VOut, WindowStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.TimeWindowedKStream
+
+
Aggregate the values of records in this stream by the grouped key and defined windows.
+
+
aggregate(Initializer<VOut>, Materialized<K, VOut, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.CogroupedKStream
+
+
Aggregate the values of records in these streams by the grouped key.
+
+
aggregate(Initializer<VOut>, Named) - Method in interface org.apache.kafka.streams.kstream.CogroupedKStream
+
+
Aggregate the values of records in these streams by the grouped key.
+
+
aggregate(Initializer<VOut>, Named, Materialized<K, VOut, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.CogroupedKStream
+
+
Aggregate the values of records in these streams by the grouped key.
+
+
aggregate(Initializer<VR>, Aggregator<? super K, ? super V, VR>, Aggregator<? super K, ? super V, VR>) - Method in interface org.apache.kafka.streams.kstream.KGroupedTable
+
+
Aggregate the value of records of the original KTable that got mapped to the same key into a new instance of KTable using default serializers and deserializers.
+
+
aggregate(Initializer<VR>, Aggregator<? super K, ? super V, VR>, Aggregator<? super K, ? super V, VR>, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KGroupedTable
+
+
Aggregate the value of records of the original KTable that got mapped to the same key into a new instance of KTable.
+
+
aggregate(Initializer<VR>, Aggregator<? super K, ? super V, VR>, Aggregator<? super K, ? super V, VR>, Named) - Method in interface org.apache.kafka.streams.kstream.KGroupedTable
+
+
Aggregate the value of records of the original KTable that got mapped to the same key into a new instance of KTable using default serializers and deserializers.
+
+
aggregate(Initializer<VR>, Aggregator<? super K, ? super V, VR>, Aggregator<? super K, ? super V, VR>, Named, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KGroupedTable
+
+
Aggregate the value of records of the original KTable that got mapped to the same key into a new instance of KTable.
+
+
Aggregator<K,V,VAgg> - Interface in org.apache.kafka.streams.kstream
+
+
The Aggregator interface for aggregating values of the given key.
+
+
all() - Method in class org.apache.kafka.clients.admin.AbortTransactionResult
+
+
Get a future which completes when the transaction specified by AbortTransactionSpec + in the respective call to Admin.abortTransaction(AbortTransactionSpec, AbortTransactionOptions) + returns successfully or fails due to an error or timeout.
+
+
all() - Method in class org.apache.kafka.clients.admin.AddRaftVoterResult
+
+
Returns a future that completes when the voter has been added.
+
+
all() - Method in class org.apache.kafka.clients.admin.AlterClientQuotasResult
+
+
Returns a future which succeeds only if all quota alterations succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.AlterConfigsResult
+
+
Return a future which succeeds only if all the alter configs operations succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.AlterConsumerGroupOffsetsResult
+
+
Return a future which succeeds if all the alter offsets succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.AlterPartitionReassignmentsResult
+
+
Return a future which succeeds only if all the reassignments were successfully initiated.
+
+
all() - Method in class org.apache.kafka.clients.admin.AlterReplicaLogDirsResult
+
+
Return a KafkaFuture which succeeds on KafkaFuture.get() if all the replica movement have succeeded.
+
+
all() - Method in class org.apache.kafka.clients.admin.AlterShareGroupOffsetsResult
+
+
Return a future which succeeds if all the alter offsets succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.AlterStreamsGroupOffsetsResult
+
+
Return a future which succeeds if all the alter offsets succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.AlterUserScramCredentialsResult
+
+
Return a future which succeeds only if all the user SCRAM credential alterations succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.CreateAclsResult
+
+
Return a future which succeeds only if all the ACL creations succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.CreatePartitionsResult
+
+
Return a future which succeeds if all the partition creations succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.CreateTopicsResult
+
+
Return a future which succeeds if all the topic creations succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.DeleteAclsResult
+
+
Return a future which succeeds only if all the ACLs deletions succeed, and which contains all the deleted ACLs.
+
+
all() - Method in class org.apache.kafka.clients.admin.DeleteConsumerGroupOffsetsResult
+
+
Return a future which succeeds only if all the deletions succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.DeleteConsumerGroupsResult
+
+
Return a future which succeeds only if all the consumer group deletions succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.DeleteRecordsResult
+
+
Return a future which succeeds only if all the records deletions succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.DeleteShareGroupOffsetsResult
+
+
Return a future which succeeds only if all the deletions succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.DeleteShareGroupsResult
+
+
Return a future which succeeds only if all the share group deletions succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.DeleteStreamsGroupOffsetsResult
+
+
Return a future which succeeds only if all the deletions succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.DeleteStreamsGroupsResult
+
+
Return a future which succeeds only if all the deletions succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.DeleteTopicsResult
+
 
+
all() - Method in class org.apache.kafka.clients.admin.DescribeClassicGroupsResult
+
+
Return a future which yields all ClassicGroupDescription objects, if all the describes succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.DescribeConfigsResult
+
+
Return a future which succeeds only if all the config descriptions succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.DescribeConsumerGroupsResult
+
+
Return a future which yields all ConsumerGroupDescription objects, if all the describes succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.DescribeProducersResult
+
 
+
all() - Method in class org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult
+
+
Return a future which succeeds if log directory information of all replicas are available
+
+
all() - Method in class org.apache.kafka.clients.admin.DescribeShareGroupsResult
+
+
Return a future which yields all ShareGroupDescription objects, if all the describes succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.DescribeStreamsGroupsResult
+
+
Return a future which yields all StreamsGroupDescription objects, if all the describes succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.DescribeTransactionsResult
+
+
Get a future which returns a map of the transaction descriptions requested in the respective + call to Admin.describeTransactions(Collection, DescribeTransactionsOptions).
+
+
all() - Method in class org.apache.kafka.clients.admin.DescribeUserScramCredentialsResult
+
 
+
all() - Method in class org.apache.kafka.clients.admin.ElectLeadersResult
+
+
Return a future which succeeds if all the topic elections succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.FenceProducersResult
+
+
Return a future which succeeds only if all the producer fencings succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.ListClientMetricsResourcesResult
+
+
Deprecated.
+
Returns a future that yields either an exception, or the full set of client metrics + listings.
+
+
all() - Method in class org.apache.kafka.clients.admin.ListConfigResourcesResult
+
+
Returns a future that yields either an exception, or the full set of config resources.
+
+
all() - Method in class org.apache.kafka.clients.admin.ListConsumerGroupOffsetsResult
+
+
Return a future which yields all Map<String, Map<TopicPartition, OffsetAndMetadata> objects, + if requests for all the groups succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.ListConsumerGroupsResult
+
+
Deprecated.
+
Returns a future that yields either an exception, or the full set of consumer group + listings.
+
+
all() - Method in class org.apache.kafka.clients.admin.ListGroupsResult
+
+
Returns a future that yields either an exception, or the full set of group listings.
+
+
all() - Method in class org.apache.kafka.clients.admin.ListOffsetsResult
+
+
Return a future which succeeds only if offsets for all specified partitions have been successfully + retrieved.
+
+
all() - Method in class org.apache.kafka.clients.admin.ListShareGroupOffsetsResult
+
+
Return the future when the requests for all groups succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.ListStreamsGroupOffsetsResult
+
+
Return a future which yields all Map<String, Map<TopicPartition, OffsetAndMetadata>> objects, if requests for all the groups succeed.
+
+
all() - Method in class org.apache.kafka.clients.admin.ListTransactionsResult
+
+
Get all transaction listings.
+
+
all() - Method in class org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupResult
+
+
Returns a future which indicates whether the request was 100% success, i.e.
+
+
all() - Method in class org.apache.kafka.clients.admin.RemoveRaftVoterResult
+
+
Returns a future that completes when the voter has been removed.
+
+
all() - Method in class org.apache.kafka.clients.admin.UnregisterBrokerResult
+
+
Return a future which succeeds if the operation is successful.
+
+
all() - Method in class org.apache.kafka.clients.admin.UpdateFeaturesResult
+
+
Return a future which succeeds if all the feature updates succeed.
+
+
all() - Static method in class org.apache.kafka.common.quota.ClientQuotaFilter
+
+
Constructs and returns a quota filter that matches all configured entities.
+
+
all() - Static method in class org.apache.kafka.streams.processor.To
+
+
Forward the key/value pair to all downstream processors
+
+
all() - Method in interface org.apache.kafka.streams.state.ReadOnlyKeyValueStore
+
+
Return an iterator over all keys in this store.
+
+
all() - Method in interface org.apache.kafka.streams.state.ReadOnlyWindowStore
+
+
Gets all the key-value pairs in the existing windows.
+
+
ALL - Enum constant in enum class org.apache.kafka.common.acl.AclOperation
+
+
ALL operation.
+
+
allByBrokerId() - Method in class org.apache.kafka.clients.admin.ListTransactionsResult
+
+
Get all transaction listings in a map which is keyed by the ID of respective broker + that is currently managing them.
+
+
allDescriptions() - Method in class org.apache.kafka.clients.admin.DescribeLogDirsResult
+
+
Return a future which succeeds only if all the brokers have responded without error.
+
+
allLocalStorePartitionLags() - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Returns LagInfo, for all store partitions (active or standby) local to this Streams instance.
+
+
allMetrics() - Static method in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
allOf(KafkaFuture<?>...) - Static method in class org.apache.kafka.common.KafkaFuture
+
+
Returns a new KafkaFuture that is completed when all the given futures have completed.
+
+
ALLOW - Enum constant in enum class org.apache.kafka.common.acl.AclPermissionType
+
+
Grants access.
+
+
ALLOW_AUTO_CREATE_TOPICS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
allow.auto.create.topics
+
+
ALLOWED - Enum constant in enum class org.apache.kafka.server.authorizer.AuthorizationResult
+
 
+
ALLOWED_PATHS_CONFIG - Static variable in class org.apache.kafka.common.config.provider.DirectoryConfigProvider
+
 
+
ALLOWED_PATHS_CONFIG - Static variable in class org.apache.kafka.common.config.provider.FileConfigProvider
+
 
+
ALLOWED_PATHS_DOC - Static variable in class org.apache.kafka.common.config.provider.DirectoryConfigProvider
+
 
+
ALLOWED_PATHS_DOC - Static variable in class org.apache.kafka.common.config.provider.FileConfigProvider
+
 
+
ALLOWLIST_PATTERN_CONFIG - Static variable in class org.apache.kafka.common.config.provider.EnvVarConfigProvider
+
 
+
ALLOWLIST_PATTERN_CONFIG_DOC - Static variable in class org.apache.kafka.common.config.provider.EnvVarConfigProvider
+
 
+
allowReplicationFactorChange() - Method in class org.apache.kafka.clients.admin.AlterPartitionReassignmentsOptions
+
+
A boolean indicating if the alter partition reassignments should be + allowed to alter the replication factor of a partition.
+
+
allowReplicationFactorChange(boolean) - Method in class org.apache.kafka.clients.admin.AlterPartitionReassignmentsOptions
+
+
Set the option indicating if the alter partition reassignments call should be + allowed to alter the replication factor of a partition.
+
+
allTasks() - Method in interface org.apache.kafka.streams.processor.assignment.ApplicationState
+
 
+
allTopicIds() - Method in class org.apache.kafka.clients.admin.DescribeTopicsResult
+
 
+
allTopicNames() - Method in class org.apache.kafka.clients.admin.DescribeTopicsResult
+
 
+
allWithName(String) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
allWithName(String) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Get the collection of Header objects whose keys all match the specified key.
+
+
AlreadyExistsException - Exception in org.apache.kafka.connect.errors
+
+
Indicates the operation tried to create an entity that already exists.
+
+
AlreadyExistsException(String) - Constructor for exception org.apache.kafka.connect.errors.AlreadyExistsException
+
 
+
AlreadyExistsException(String, Throwable) - Constructor for exception org.apache.kafka.connect.errors.AlreadyExistsException
+
 
+
AlreadyExistsException(Throwable) - Constructor for exception org.apache.kafka.connect.errors.AlreadyExistsException
+
 
+
ALTER - Enum constant in enum class org.apache.kafka.common.acl.AclOperation
+
+
ALTER operation.
+
+
ALTER_CONFIGS - Enum constant in enum class org.apache.kafka.common.acl.AclOperation
+
+
ALTER_CONFIGS operation.
+
+
alterClientQuotas(Collection<ClientQuotaAlteration>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Alters client quota configurations with the specified alterations.
+
+
alterClientQuotas(Collection<ClientQuotaAlteration>, AlterClientQuotasOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Alters client quota configurations with the specified alterations.
+
+
alterClientQuotas(Collection<ClientQuotaAlteration>, AlterClientQuotasOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
alterClientQuotas(Collection<ClientQuotaAlteration>, AlterClientQuotasOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
AlterClientQuotasOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
AlterClientQuotasOptions() - Constructor for class org.apache.kafka.clients.admin.AlterClientQuotasOptions
+
 
+
AlterClientQuotasResult - Class in org.apache.kafka.clients.admin
+
+ +
+
AlterClientQuotasResult(Map<ClientQuotaEntity, KafkaFuture<Void>>) - Constructor for class org.apache.kafka.clients.admin.AlterClientQuotasResult
+
+
Maps an entity to its alteration result.
+
+
AlterConfigOp - Class in org.apache.kafka.clients.admin
+
+
A class representing an alter configuration entry containing name, value and operation type.
+
+
AlterConfigOp(ConfigEntry, AlterConfigOp.OpType) - Constructor for class org.apache.kafka.clients.admin.AlterConfigOp
+
 
+
AlterConfigOp.OpType - Enum Class in org.apache.kafka.clients.admin
+
 
+
AlterConfigPolicy - Interface in org.apache.kafka.server.policy
+
+
An interface for enforcing a policy on alter configs requests.
+
+
AlterConfigPolicy.RequestMetadata - Class in org.apache.kafka.server.policy
+
+
Class containing the create request parameters.
+
+
AlterConfigsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
AlterConfigsOptions() - Constructor for class org.apache.kafka.clients.admin.AlterConfigsOptions
+
 
+
AlterConfigsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
alterConsumerGroupOffsets(String, Map<TopicPartition, OffsetAndMetadata>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Alters offsets for the specified group.
+
+
alterConsumerGroupOffsets(String, Map<TopicPartition, OffsetAndMetadata>, AlterConsumerGroupOffsetsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Alters offsets for the specified group.
+
+
alterConsumerGroupOffsets(String, Map<TopicPartition, OffsetAndMetadata>, AlterConsumerGroupOffsetsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
alterConsumerGroupOffsets(String, Map<TopicPartition, OffsetAndMetadata>, AlterConsumerGroupOffsetsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
AlterConsumerGroupOffsetsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
AlterConsumerGroupOffsetsOptions() - Constructor for class org.apache.kafka.clients.admin.AlterConsumerGroupOffsetsOptions
+
 
+
AlterConsumerGroupOffsetsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
alternativeString - Variable in class org.apache.kafka.common.config.ConfigDef.ConfigKey
+
 
+
alterOffsets(Map<String, String>, Map<Map<String, ?>, Map<String, ?>>) - Method in class org.apache.kafka.connect.source.SourceConnector
+
+
Invoked when users request to manually alter/reset the offsets for this connector via the Connect worker's REST + API.
+
+
alterOffsets(Map<String, String>, Map<TopicPartition, Long>) - Method in class org.apache.kafka.connect.sink.SinkConnector
+
+
Invoked when users request to manually alter/reset the offsets for this connector via the Connect worker's REST + API.
+
+
alterPartitionReassignments(Map<TopicPartition, Optional<NewPartitionReassignment>>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Change the reassignments for one or more partitions.
+
+
alterPartitionReassignments(Map<TopicPartition, Optional<NewPartitionReassignment>>, AlterPartitionReassignmentsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Change the reassignments for one or more partitions.
+
+
alterPartitionReassignments(Map<TopicPartition, Optional<NewPartitionReassignment>>, AlterPartitionReassignmentsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
alterPartitionReassignments(Map<TopicPartition, Optional<NewPartitionReassignment>>, AlterPartitionReassignmentsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
AlterPartitionReassignmentsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
AlterPartitionReassignmentsOptions() - Constructor for class org.apache.kafka.clients.admin.AlterPartitionReassignmentsOptions
+
 
+
AlterPartitionReassignmentsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
alterReplicaLogDirs(Map<TopicPartitionReplica, String>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Change the log directory for the specified replicas.
+
+
alterReplicaLogDirs(Map<TopicPartitionReplica, String>, AlterReplicaLogDirsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Change the log directory for the specified replicas.
+
+
alterReplicaLogDirs(Map<TopicPartitionReplica, String>, AlterReplicaLogDirsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
alterReplicaLogDirs(Map<TopicPartitionReplica, String>, AlterReplicaLogDirsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
AlterReplicaLogDirsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
AlterReplicaLogDirsOptions() - Constructor for class org.apache.kafka.clients.admin.AlterReplicaLogDirsOptions
+
 
+
AlterReplicaLogDirsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
alterShareGroupOffsets(String, Map<TopicPartition, Long>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Alters offsets for the specified group.
+
+
alterShareGroupOffsets(String, Map<TopicPartition, Long>, AlterShareGroupOffsetsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Alters offsets for the specified group.
+
+
alterShareGroupOffsets(String, Map<TopicPartition, Long>, AlterShareGroupOffsetsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
alterShareGroupOffsets(String, Map<TopicPartition, Long>, AlterShareGroupOffsetsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
AlterShareGroupOffsetsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
AlterShareGroupOffsetsOptions() - Constructor for class org.apache.kafka.clients.admin.AlterShareGroupOffsetsOptions
+
 
+
AlterShareGroupOffsetsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
alterStreamsGroupOffsets(String, Map<TopicPartition, OffsetAndMetadata>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Alters offsets for the specified group.
+
+
alterStreamsGroupOffsets(String, Map<TopicPartition, OffsetAndMetadata>, AlterStreamsGroupOffsetsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Alters offsets for the specified group.
+
+
alterStreamsGroupOffsets(String, Map<TopicPartition, OffsetAndMetadata>, AlterStreamsGroupOffsetsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
alterStreamsGroupOffsets(String, Map<TopicPartition, OffsetAndMetadata>, AlterStreamsGroupOffsetsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
AlterStreamsGroupOffsetsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
AlterStreamsGroupOffsetsOptions() - Constructor for class org.apache.kafka.clients.admin.AlterStreamsGroupOffsetsOptions
+
 
+
AlterStreamsGroupOffsetsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
alterUserScramCredentials(List<UserScramCredentialAlteration>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Alter SASL/SCRAM credentials for the given users.
+
+
alterUserScramCredentials(List<UserScramCredentialAlteration>, AlterUserScramCredentialsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Alter SASL/SCRAM credentials.
+
+
alterUserScramCredentials(List<UserScramCredentialAlteration>, AlterUserScramCredentialsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
alterUserScramCredentials(List<UserScramCredentialAlteration>, AlterUserScramCredentialsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
AlterUserScramCredentialsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
AlterUserScramCredentialsOptions() - Constructor for class org.apache.kafka.clients.admin.AlterUserScramCredentialsOptions
+
 
+
AlterUserScramCredentialsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
AlterUserScramCredentialsResult(Map<String, KafkaFuture<Void>>) - Constructor for class org.apache.kafka.clients.admin.AlterUserScramCredentialsResult
+
 
+
ANONYMOUS - Static variable in class org.apache.kafka.common.security.auth.KafkaPrincipal
+
 
+
ANY - Enum constant in enum class org.apache.kafka.common.acl.AclOperation
+
+
In a filter, matches any AclOperation.
+
+
ANY - Enum constant in enum class org.apache.kafka.common.acl.AclPermissionType
+
+
In a filter, matches any AclPermissionType.
+
+
ANY - Enum constant in enum class org.apache.kafka.common.resource.PatternType
+
+
In a filter, matches any resource pattern type.
+
+
ANY - Enum constant in enum class org.apache.kafka.common.resource.ResourceType
+
+
In a filter, matches any ResourceType.
+
+
ANY - Enum constant in enum class org.apache.kafka.streams.query.ResultOrder
+
 
+
ANY - Static variable in class org.apache.kafka.common.acl.AccessControlEntryFilter
+
+
Matches any access control entry.
+
+
ANY - Static variable in class org.apache.kafka.common.acl.AclBindingFilter
+
+
A filter which matches any ACL binding.
+
+
ANY - Static variable in class org.apache.kafka.common.resource.ResourcePatternFilter
+
+
Matches any resource pattern.
+
+
ApiException - Exception in org.apache.kafka.common.errors
+
+
Any API exception that is part of the public protocol and should be a subclass of this class and be part of this + package.
+
+
ApiException() - Constructor for exception org.apache.kafka.common.errors.ApiException
+
 
+
ApiException(String) - Constructor for exception org.apache.kafka.common.errors.ApiException
+
 
+
ApiException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.ApiException
+
 
+
ApiException(Throwable) - Constructor for exception org.apache.kafka.common.errors.ApiException
+
 
+
appConfigs() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
 
+
appConfigs() - Method in interface org.apache.kafka.streams.processor.api.ProcessingContext
+
+
Returns all the application config properties as key/value pairs.
+
+
appConfigs() - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
appConfigs() - Method in interface org.apache.kafka.streams.processor.ProcessorContext
+
+
Return all the application config properties as key/value pairs.
+
+
appConfigs() - Method in interface org.apache.kafka.streams.processor.StateStoreContext
+
+
Returns all the application config properties as key/value pairs.
+
+
appConfigsWithPrefix(String) - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
 
+
appConfigsWithPrefix(String) - Method in interface org.apache.kafka.streams.processor.api.ProcessingContext
+
+
Return all the application config properties with the given key prefix, as key/value pairs + stripping the prefix.
+
+
appConfigsWithPrefix(String) - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
appConfigsWithPrefix(String) - Method in interface org.apache.kafka.streams.processor.ProcessorContext
+
+
Return all the application config properties with the given key prefix, as key/value pairs + stripping the prefix.
+
+
appConfigsWithPrefix(String) - Method in interface org.apache.kafka.streams.processor.StateStoreContext
+
+
Returns all the application config properties with the given key prefix, as key/value pairs + stripping the prefix.
+
+
APPEND - Enum constant in enum class org.apache.kafka.clients.admin.AlterConfigOp.OpType
+
+
(For list-type configuration entries only.) Add the specified values to the + current value of the configuration entry.
+
+
appendDeserializerToConfig(Map<String, Object>, Deserializer<?>, Deserializer<?>) - Static method in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
APPLICATION_ID_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
application.id
+
+
APPLICATION_SERVER_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
application.server
+
+
applicationConfigs - Variable in class org.apache.kafka.streams.TopologyConfig
+
 
+
applicationId() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
 
+
applicationId() - Method in interface org.apache.kafka.streams.processor.api.ProcessingContext
+
+
Return the application id.
+
+
applicationId() - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
applicationId() - Method in interface org.apache.kafka.streams.processor.ProcessorContext
+
+
Return the application id.
+
+
applicationId() - Method in interface org.apache.kafka.streams.processor.StateStoreContext
+
+
Returns the application id.
+
+
ApplicationRecoverableException - Exception in org.apache.kafka.common.errors
+
+
Indicates that the error is fatal to the producer, and the application + needs to restart the producer after handling the error.
+
+
ApplicationRecoverableException() - Constructor for exception org.apache.kafka.common.errors.ApplicationRecoverableException
+
 
+
ApplicationRecoverableException(String) - Constructor for exception org.apache.kafka.common.errors.ApplicationRecoverableException
+
 
+
ApplicationRecoverableException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.ApplicationRecoverableException
+
 
+
ApplicationRecoverableException(Throwable) - Constructor for exception org.apache.kafka.common.errors.ApplicationRecoverableException
+
 
+
ApplicationState - Interface in org.apache.kafka.streams.processor.assignment
+
+
A read-only metadata class representing the state of the application and the current rebalance.
+
+
apply() - Method in interface org.apache.kafka.streams.kstream.Initializer
+
+
Return the initial value for an aggregation.
+
+
apply(A) - Method in interface org.apache.kafka.common.KafkaFuture.BaseFunction
+
 
+
apply(String, Headers.HeaderTransform) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
apply(String, Headers.HeaderTransform) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Get all Headers with the given key, apply the transform to each and store the result in place of the original.
+
+
apply(K1, V1, V2) - Method in interface org.apache.kafka.streams.kstream.ValueJoinerWithKey
+
+
Return a joined value consisting of readOnlyKey, value1 and value2.
+
+
apply(K, V) - Method in interface org.apache.kafka.streams.kstream.ForeachAction
+
+
Perform an action for each record of a stream.
+
+
apply(K, V) - Method in interface org.apache.kafka.streams.kstream.KeyValueMapper
+
+
Map a record with the given key and value to a new value.
+
+
apply(K, V) - Method in interface org.apache.kafka.streams.kstream.ValueMapperWithKey
+
+
Map the given [key and ]value to a new value.
+
+
apply(K, V, V) - Method in interface org.apache.kafka.streams.kstream.Merger
+
+
Compute a new aggregate from the key and two aggregates.
+
+
apply(K, V, VAgg) - Method in interface org.apache.kafka.streams.kstream.Aggregator
+
+
Compute a new aggregate from the key and value of a record and the current aggregate of the same key.
+
+
apply(Header) - Method in interface org.apache.kafka.connect.header.Headers.HeaderTransform
+
+
Transform the given Header and return the updated Header.
+
+
apply(Headers.HeaderTransform) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
apply(Headers.HeaderTransform) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Get all Headers, apply the transform to each and store the result in place of the original.
+
+
apply(R) - Method in interface org.apache.kafka.connect.transforms.Transformation
+
+
Apply transformation to the record and return another record object (which may be record itself) + or null, corresponding to a map or filter operation respectively.
+
+
apply(V) - Method in interface org.apache.kafka.streams.kstream.ValueMapper
+
+
Map the given value to a new value.
+
+
apply(V1, V2) - Method in interface org.apache.kafka.streams.kstream.ValueJoiner
+
+
Return a joined value consisting of value1 and value2.
+
+
apply(V, V) - Method in interface org.apache.kafka.streams.kstream.Reducer
+
+
Aggregate the two given values into a single one.
+
+
approximateNumEntries() - Method in interface org.apache.kafka.streams.state.ReadOnlyKeyValueStore
+
+
Return an approximate count of key-value mappings in this store.
+
+
array(Schema) - Static method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
ARRAY - Enum constant in enum class org.apache.kafka.connect.data.Schema.Type
+
+
An ordered sequence of elements, each of which shares the same type.
+
+
as(String) - Static method in class org.apache.kafka.streams.kstream.Branched
+
+
Create an instance of Branched with provided branch name suffix.
+
+
as(String) - Static method in class org.apache.kafka.streams.kstream.Consumed
+
+
Create an instance of Consumed with provided processor name.
+
+
as(String) - Static method in class org.apache.kafka.streams.kstream.Grouped
+
+
Create a Grouped instance with the provided name used as part of the repartition topic if required.
+
+
as(String) - Static method in class org.apache.kafka.streams.kstream.Joined
+
+
Create an instance of Joined with base name for all components of the join, this may + include any repartition topics created to complete the join.
+
+
as(String) - Static method in class org.apache.kafka.streams.kstream.Materialized
+
+
Materialize a StateStore with the given name.
+
+
as(String) - Static method in class org.apache.kafka.streams.kstream.Named
+
+
Create a Named instance with provided name.
+
+
as(String) - Static method in class org.apache.kafka.streams.kstream.Produced
+
+
Create an instance of Produced with provided processor name.
+
+
as(String) - Static method in class org.apache.kafka.streams.kstream.Repartitioned
+
+
Create a Repartitioned instance with the provided name used as part of the repartition topic.
+
+
as(String) - Static method in class org.apache.kafka.streams.kstream.StreamJoined
+
+
Creates a StreamJoined instance using the provided name for the state stores and hence the changelog + topics for the join stores.
+
+
as(String) - Static method in class org.apache.kafka.streams.kstream.TableJoined
+
+
Create an instance of TableJoined with base name for all components of the join, including internal topics + created to complete the join.
+
+
as(DslStoreSuppliers) - Static method in class org.apache.kafka.streams.kstream.Materialized
+
+
Materialize a StateStore with the given DslStoreSuppliers.
+
+
as(KeyValueBytesStoreSupplier) - Static method in class org.apache.kafka.streams.kstream.Materialized
+
+
Materialize a KeyValueStore using the provided KeyValueBytesStoreSupplier.
+
+
as(SessionBytesStoreSupplier) - Static method in class org.apache.kafka.streams.kstream.Materialized
+
+
Materialize a SessionStore using the provided SessionBytesStoreSupplier.
+
+
as(WindowBytesStoreSupplier) - Static method in class org.apache.kafka.streams.kstream.Materialized
+
+
Materialize a WindowStore using the provided WindowBytesStoreSupplier.
+
+
ASCENDING - Enum constant in enum class org.apache.kafka.streams.query.ResultOrder
+
 
+
asOf(Instant) - Method in class org.apache.kafka.streams.query.VersionedKeyQuery
+
+
Specifies the timestamp for the key query.
+
+
asOfTimestamp() - Method in class org.apache.kafka.streams.query.VersionedKeyQuery
+
+
The timestamp of the query, if specified.
+
+
assign(Collection<TopicPartition>) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
assign(Collection<TopicPartition>) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Manually assign a list of partitions to this consumer.
+
+
assign(Collection<TopicPartition>) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
assign(Map<String, Integer>, Map<String, ConsumerPartitionAssignor.Subscription>) - Method in class org.apache.kafka.clients.consumer.RangeAssignor
+
 
+
assign(Map<String, Integer>, Map<String, ConsumerPartitionAssignor.Subscription>) - Method in class org.apache.kafka.clients.consumer.RoundRobinAssignor
+
 
+
assign(Cluster, ConsumerPartitionAssignor.GroupSubscription) - Method in interface org.apache.kafka.clients.consumer.ConsumerPartitionAssignor
+
+
Perform the group assignment given the member subscriptions and current cluster metadata.
+
+
assign(GroupSpec, SubscribedTopicDescriber) - Method in interface org.apache.kafka.coordinator.group.api.assignor.PartitionAssignor
+
+
Assigns partitions to group members based on the given assignment specification and topic metadata.
+
+
assign(ApplicationState) - Method in class org.apache.kafka.streams.processor.assignment.assignors.StickyTaskAssignor
+
 
+
assign(ApplicationState) - Method in interface org.apache.kafka.streams.processor.assignment.TaskAssignor
+
 
+
ASSIGN_FROM_SUBSCRIBED_ASSIGNORS - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
AssignedTask(TaskId, KafkaStreamsAssignment.AssignedTask.Type) - Constructor for class org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment.AssignedTask
+
 
+
ASSIGNING - Enum constant in enum class org.apache.kafka.common.ConsumerGroupState
+
+
Deprecated.
+
ASSIGNING - Enum constant in enum class org.apache.kafka.common.GroupState
+
 
+
assignment() - Method in class org.apache.kafka.clients.admin.MemberDescription
+
+
The assignment of the group member.
+
+
assignment() - Method in class org.apache.kafka.clients.admin.ShareMemberDescription
+
+
The assignment of the group member.
+
+
assignment() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription
+
+
The current assignment.
+
+
assignment() - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
assignment() - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Get the set of partitions currently assigned to this consumer.
+
+
assignment() - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
assignment() - Method in interface org.apache.kafka.connect.sink.SinkTaskContext
+
+
Get the current set of assigned TopicPartitions for this task.
+
+
assignment() - Method in class org.apache.kafka.streams.processor.assignment.TaskAssignor.TaskAssignment
+
 
+
Assignment(List<TopicPartition>) - Constructor for class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment
+
 
+
Assignment(List<TopicPartition>, ByteBuffer) - Constructor for class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment
+
 
+
ASSIGNMENT_LISTENER - Static variable in class org.apache.kafka.streams.StreamsConfig.InternalConfig
+
 
+
assignmentConfigs() - Method in interface org.apache.kafka.streams.processor.assignment.ApplicationState
+
 
+
AssignmentConfigs - Class in org.apache.kafka.streams.processor.assignment
+
+
Assignment related configs for the Kafka Streams TaskAssignor.
+
+
AssignmentConfigs(long, int, int, long, List<String>, int, int, String) - Constructor for class org.apache.kafka.streams.processor.assignment.AssignmentConfigs
+
 
+
AssignmentConfigs(long, int, int, long, List<String>, OptionalInt, OptionalInt, String) - Constructor for class org.apache.kafka.streams.processor.assignment.AssignmentConfigs
+
 
+
AssignmentConfigs(Long, Integer, Integer, Long, List<String>) - Constructor for class org.apache.kafka.streams.processor.assignment.AssignmentConfigs
+
 
+
assignments() - Method in class org.apache.kafka.clients.admin.NewPartitions
+
+
The replica assignments for the new partitions, or null if the assignment will be done by the controller.
+
+
assignPartitions(Map<String, List<PartitionInfo>>, Map<String, ConsumerPartitionAssignor.Subscription>) - Method in class org.apache.kafka.clients.consumer.CooperativeStickyAssignor
+
 
+
assignPartitions(Map<String, List<PartitionInfo>>, Map<String, ConsumerPartitionAssignor.Subscription>) - Method in class org.apache.kafka.clients.consumer.RangeAssignor
+
+
Performs range assignment of the specified partitions for the consumers with the provided subscriptions.
+
+
assignTask(KafkaStreamsAssignment.AssignedTask) - Method in class org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment
+
 
+
asWrapped(ProcessorSupplier<KIn, VIn, KOut, VOut>) - Static method in interface org.apache.kafka.streams.processor.api.ProcessorWrapper
+
+
Use to convert a ProcessorSupplier instance into a WrappedProcessorSupplier
+
+
asWrappedFixedKey(FixedKeyProcessorSupplier<KIn, VIn, VOut>) - Static method in interface org.apache.kafka.streams.processor.api.ProcessorWrapper
+
+ +
+
at(Position) - Static method in class org.apache.kafka.streams.query.PositionBound
+
+
Creates a new PositionBound representing a specific position.
+
+
AT_LEAST_ONCE - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "processing.guarantee" for at-least-once processing guarantees.
+
+
atLeast(Number) - Static method in class org.apache.kafka.common.config.ConfigDef.Range
+
+
A numeric range that checks only the lower bound
+
+
atMostOfSize(int) - Static method in class org.apache.kafka.common.config.ConfigDef.ListSize
+
 
+
AuthenticateCallbackHandler - Interface in org.apache.kafka.common.security.auth
+
 
+
authenticated() - Method in class org.apache.kafka.common.security.plain.PlainAuthenticateCallback
+
+
Returns true if client password matches expected password, false otherwise.
+
+
authenticated(boolean) - Method in class org.apache.kafka.common.security.plain.PlainAuthenticateCallback
+
+
Sets the authenticated state.
+
+
AuthenticationContext - Interface in org.apache.kafka.common.security.auth
+
+
An object representing contextual information from the authentication session.
+
+
AuthenticationException - Exception in org.apache.kafka.common.errors
+
+
This exception indicates that SASL authentication has failed.
+
+
AuthenticationException(String) - Constructor for exception org.apache.kafka.common.errors.AuthenticationException
+
 
+
AuthenticationException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.AuthenticationException
+
 
+
AuthenticationException(Throwable) - Constructor for exception org.apache.kafka.common.errors.AuthenticationException
+
 
+
AuthorizableRequestContext - Interface in org.apache.kafka.server.authorizer
+
+
Request context interface that provides data from request header as well as connection + and authentication information to plugins.
+
+
AuthorizationException - Exception in org.apache.kafka.common.errors
+
 
+
AuthorizationException(String) - Constructor for exception org.apache.kafka.common.errors.AuthorizationException
+
 
+
AuthorizationException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.AuthorizationException
+
 
+
AuthorizationResult - Enum Class in org.apache.kafka.server.authorizer
+
 
+
authorize(AuthorizableRequestContext, List<Action>) - Method in interface org.apache.kafka.server.authorizer.Authorizer
+
+
Authorizes the specified action.
+
+
authorizeByResourceType(AuthorizableRequestContext, AclOperation, ResourceType) - Method in interface org.apache.kafka.server.authorizer.Authorizer
+
+
Check if the caller is authorized to perform the given ACL operation on at least one + resource of the given type.
+
+
authorizedOperations() - Method in class org.apache.kafka.clients.admin.ClassicGroupDescription
+
+
authorizedOperations for this group, or null if that information is not known.
+
+
authorizedOperations() - Method in class org.apache.kafka.clients.admin.ConsumerGroupDescription
+
+
authorizedOperations for this group, or null if that information is not known.
+
+
authorizedOperations() - Method in class org.apache.kafka.clients.admin.DescribeClusterResult
+
+
Returns a future which yields authorized operations.
+
+
authorizedOperations() - Method in class org.apache.kafka.clients.admin.ShareGroupDescription
+
+
authorizedOperations for this group, or null if that information is not known.
+
+
authorizedOperations() - Method in class org.apache.kafka.clients.admin.StreamsGroupDescription
+
+
authorizedOperations for this group, or null if that information is not known.
+
+
authorizedOperations() - Method in class org.apache.kafka.clients.admin.TopicDescription
+
+
authorized operations for this topic, or null if this is not known.
+
+
Authorizer - Interface in org.apache.kafka.server.authorizer
+
+
Pluggable authorizer interface for Kafka brokers.
+
+
AuthorizerNotReadyException - Exception in org.apache.kafka.common.errors
+
+
An exception that indicates that the authorizer is not ready to receive the request yet.
+
+
AuthorizerNotReadyException() - Constructor for exception org.apache.kafka.common.errors.AuthorizerNotReadyException
+
 
+
AuthorizerServerInfo - Interface in org.apache.kafka.server.authorizer
+
+
Runtime broker configuration metadata provided to authorizers during start up.
+
+
AUTO_COMMIT_INTERVAL_MS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
auto.commit.interval.ms
+
+
AUTO_OFFSET_RESET_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
auto.offset.reset
+
+
AUTO_OFFSET_RESET_DOC - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
AUTOMATIC_CONFIG_PROVIDERS_PROPERTY - Static variable in class org.apache.kafka.common.config.AbstractConfig
+
 
+
AutoOffsetReset - Class in org.apache.kafka.streams
+
+
Sets the auto.offset.reset configuration when + adding a source processor + or when creating KStream or KTable via StreamsBuilder.
+
+
availablePartitionsForTopic(String) - Method in class org.apache.kafka.common.Cluster
+
+
Get the list of available partitions for this topic
+
+
Avg - Class in org.apache.kafka.common.metrics.stats
+
+
A SampledStat that maintains a simple average over its samples.
+
+
Avg() - Constructor for class org.apache.kafka.common.metrics.stats.Avg
+
 
+
+

B

+
+
backwardAll() - Method in interface org.apache.kafka.streams.state.ReadOnlyWindowStore
+
+
Gets all the key-value pairs in the existing windows in backward order + with respect to time (from end to beginning of time).
+
+
backwardFetch(K) - Method in interface org.apache.kafka.streams.state.ReadOnlySessionStore
+
+
Retrieve all aggregated sessions for the provided key.
+
+
backwardFetch(K, long, long) - Method in interface org.apache.kafka.streams.state.WindowStore
+
 
+
backwardFetch(K, Instant, Instant) - Method in interface org.apache.kafka.streams.state.ReadOnlyWindowStore
+
+
Get all the key-value pairs with the given key and the time range from all the existing windows + in backward order with respect to time (from end to beginning of time).
+
+
backwardFetch(K, Instant, Instant) - Method in interface org.apache.kafka.streams.state.WindowStore
+
 
+
backwardFetch(K, K) - Method in interface org.apache.kafka.streams.state.ReadOnlySessionStore
+
+
Retrieve all aggregated sessions for the given range of keys.
+
+
backwardFetch(K, K, long, long) - Method in interface org.apache.kafka.streams.state.WindowStore
+
 
+
backwardFetch(K, K, Instant, Instant) - Method in interface org.apache.kafka.streams.state.ReadOnlyWindowStore
+
+
Get all the key-value pairs in the given key range and time range from all the existing windows + in backward order with respect to time (from end to beginning of time).
+
+
backwardFetch(K, K, Instant, Instant) - Method in interface org.apache.kafka.streams.state.WindowStore
+
 
+
backwardFetchAll(long, long) - Method in interface org.apache.kafka.streams.state.WindowStore
+
 
+
backwardFetchAll(Instant, Instant) - Method in interface org.apache.kafka.streams.state.ReadOnlyWindowStore
+
+
Gets all the key-value pairs that belong to the windows within in the given time range in backward order + with respect to time (from end to beginning of time).
+
+
backwardFetchAll(Instant, Instant) - Method in interface org.apache.kafka.streams.state.WindowStore
+
 
+
backwardFindSessions(K, long, long) - Method in interface org.apache.kafka.streams.state.ReadOnlySessionStore
+
+
Fetch any sessions with the matching key and the sessions end is ≥ earliestSessionEndTime + and the sessions start is ≤ latestSessionStartTime iterating from latest to earliest.
+
+
backwardFindSessions(K, Instant, Instant) - Method in interface org.apache.kafka.streams.state.ReadOnlySessionStore
+
+
Fetch any sessions with the matching key and the sessions end is ≥ earliestSessionEndTime + and the sessions start is ≤ latestSessionStartTime iterating from latest to earliest.
+
+
backwardFindSessions(K, Instant, Instant) - Method in interface org.apache.kafka.streams.state.SessionStore
+
 
+
backwardFindSessions(K, K, long, long) - Method in interface org.apache.kafka.streams.state.ReadOnlySessionStore
+
+
Fetch any sessions in the given range of keys and the sessions end is ≥ + earliestSessionEndTime and the sessions start is ≤ latestSessionStartTime iterating from + latest to earliest.
+
+
backwardFindSessions(K, K, Instant, Instant) - Method in interface org.apache.kafka.streams.state.ReadOnlySessionStore
+
+
Fetch any sessions in the given range of keys and the sessions end is ≥ + earliestSessionEndTime and the sessions start is ≤ latestSessionStartTime iterating from + latest to earliest.
+
+
backwardFindSessions(K, K, Instant, Instant) - Method in interface org.apache.kafka.streams.state.SessionStore
+
 
+
BATCH_SIZE_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
batch.size
+
+
BatchingStateRestoreCallback - Interface in org.apache.kafka.streams.processor
+
+
Interface for batching restoration of a StateStore + + It is expected that implementations of this class will not call the StateRestoreCallback.restore(byte[], byte[]) method.
+
+
before(Duration) - Method in class org.apache.kafka.streams.kstream.JoinWindows
+
+
Changes the start window boundary to timeDifference but keep the end window boundary as is.
+
+
beforeMs - Variable in class org.apache.kafka.streams.kstream.JoinWindows
+
+
Maximum time difference for tuples that are before the join tuple.
+
+
beforeOffset() - Method in class org.apache.kafka.clients.admin.RecordsToDelete
+
+
The offset before which all records will be deleted
+
+
beforeOffset(long) - Static method in class org.apache.kafka.clients.admin.RecordsToDelete
+
+
Delete all the records before the given offset
+
+
beginningOffsets(Collection<TopicPartition>) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
beginningOffsets(Collection<TopicPartition>) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Get the first offset for the given partitions.
+
+
beginningOffsets(Collection<TopicPartition>) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
beginningOffsets(Collection<TopicPartition>, Duration) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
beginningOffsets(Collection<TopicPartition>, Duration) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Get the first offset for the given partitions.
+
+
beginningOffsets(Collection<TopicPartition>, Duration) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
beginTransaction() - Method in class org.apache.kafka.clients.producer.KafkaProducer
+
+
Should be called before the start of each new transaction.
+
+
beginTransaction() - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
beginTransaction() - Method in interface org.apache.kafka.clients.producer.Producer
+
+ +
+
beginTransactionException - Variable in class org.apache.kafka.clients.producer.MockProducer
+
 
+
between(Number, Number) - Static method in class org.apache.kafka.common.config.ConfigDef.Range
+
+
A numeric range that checks both the upper (inclusive) and lower bound
+
+
binding() - Method in class org.apache.kafka.clients.admin.DeleteAclsResult.FilterResult
+
+
Return the deleted ACL binding or null if there was an error.
+
+
bins() - Method in interface org.apache.kafka.common.metrics.stats.Histogram.BinScheme
+
+
Get the number of bins.
+
+
bins() - Method in class org.apache.kafka.common.metrics.stats.Histogram.ConstantBinScheme
+
 
+
bins() - Method in class org.apache.kafka.common.metrics.stats.Histogram.LinearBinScheme
+
 
+
bool() - Static method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
Boolean() - Static method in class org.apache.kafka.common.serialization.Serdes
+
+
A serde for nullable Boolean type.
+
+
BOOLEAN - Enum constant in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigType
+
 
+
BOOLEAN - Enum constant in enum class org.apache.kafka.common.config.ConfigDef.Type
+
+
Used for boolean values.
+
+
BOOLEAN - Enum constant in enum class org.apache.kafka.connect.data.Schema.Type
+
+
Boolean value (true or false)
+
+
BOOLEAN_SCHEMA - Static variable in interface org.apache.kafka.connect.data.Schema
+
 
+
BOOLEAN_SIZE - Static variable in class org.apache.kafka.streams.state.StateSerdes
+
 
+
BooleanDeserializer - Class in org.apache.kafka.common.serialization
+
 
+
BooleanDeserializer() - Constructor for class org.apache.kafka.common.serialization.BooleanDeserializer
+
 
+
BooleanSerde() - Constructor for class org.apache.kafka.common.serialization.Serdes.BooleanSerde
+
 
+
BooleanSerializer - Class in org.apache.kafka.common.serialization
+
 
+
BooleanSerializer() - Constructor for class org.apache.kafka.common.serialization.BooleanSerializer
+
 
+
bootstrap(List<InetSocketAddress>) - Static method in class org.apache.kafka.common.Cluster
+
+
Create a "bootstrap" cluster using the given list of host/ports
+
+
BOOTSTRAP_CONTROLLERS_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
+
bootstrap.controllers
+
+
BOOTSTRAP_CONTROLLERS_DOC - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
BOOTSTRAP_SERVERS_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
+
bootstrap.servers
+
+
BOOTSTRAP_SERVERS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
bootstrap.servers
+
+
BOOTSTRAP_SERVERS_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
bootstrap.servers
+
+
BOOTSTRAP_SERVERS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
bootstrap.servers
+
+
bound() - Method in class org.apache.kafka.common.metrics.Quota
+
 
+
bound() - Method in exception org.apache.kafka.common.metrics.QuotaViolationException
+
 
+
branch(Predicate<? super K, ? super V>) - Method in interface org.apache.kafka.streams.kstream.BranchedKStream
+
+
Define a branch for records that match the predicate.
+
+
branch(Predicate<? super K, ? super V>, Branched<K, V>) - Method in interface org.apache.kafka.streams.kstream.BranchedKStream
+
+
Define a branch for records that match the predicate.
+
+
Branched<K,V> - Class in org.apache.kafka.streams.kstream
+
+
The Branched class is used to define the optional parameters when building branches with + BranchedKStream.
+
+
BranchedKStream<K,V> - Interface in org.apache.kafka.streams.kstream
+
+
BranchedKStream is an abstraction of a branched record stream of key-value pairs.
+
+
BROKER - Enum constant in enum class org.apache.kafka.clients.admin.EndpointType
+
 
+
BROKER - Enum constant in enum class org.apache.kafka.common.config.ConfigResource.Type
+
 
+
BROKER_LOGGER - Enum constant in enum class org.apache.kafka.common.config.ConfigResource.Type
+
 
+
brokerId() - Method in class org.apache.kafka.clients.admin.DescribeProducersOptions
+
 
+
brokerId() - Method in class org.apache.kafka.common.TopicPartitionReplica
+
 
+
brokerId() - Method in interface org.apache.kafka.server.authorizer.AuthorizerServerInfo
+
+
Returns broker id.
+
+
brokerId() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogMetadata
+
 
+
brokerId(int) - Method in class org.apache.kafka.clients.admin.DescribeProducersOptions
+
 
+
BrokerIdNotRegisteredException - Exception in org.apache.kafka.common.errors
+
 
+
BrokerIdNotRegisteredException(String) - Constructor for exception org.apache.kafka.common.errors.BrokerIdNotRegisteredException
+
 
+
BrokerIdNotRegisteredException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.BrokerIdNotRegisteredException
+
 
+
BrokerJwtValidator - Class in org.apache.kafka.common.security.oauthbearer
+
+
BrokerJwtValidator is an implementation of JwtValidator that is used + by the broker to perform more extensive validation of the JWT access token that is received + from the client, but ultimately from posting the client credentials to the OAuth/OIDC provider's + token endpoint.
+
+
BrokerJwtValidator() - Constructor for class org.apache.kafka.common.security.oauthbearer.BrokerJwtValidator
+
+
A public, no-args constructor is necessary for instantiation via configuration.
+
+
BrokerJwtValidator.ClaimSupplier<T> - Interface in org.apache.kafka.common.security.oauthbearer
+
 
+
BrokerNotAvailableException - Exception in org.apache.kafka.common.errors
+
 
+
BrokerNotAvailableException(String) - Constructor for exception org.apache.kafka.common.errors.BrokerNotAvailableException
+
 
+
BrokerNotAvailableException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.BrokerNotAvailableException
+
 
+
BrokerNotFoundException - Exception in org.apache.kafka.streams.errors
+
+
Indicates that none of the specified brokers + could be found.
+
+
BrokerNotFoundException(String) - Constructor for exception org.apache.kafka.streams.errors.BrokerNotFoundException
+
 
+
BrokerNotFoundException(String, Throwable) - Constructor for exception org.apache.kafka.streams.errors.BrokerNotFoundException
+
 
+
BrokerNotFoundException(Throwable) - Constructor for exception org.apache.kafka.streams.errors.BrokerNotFoundException
+
 
+
brokerTopicStatsMetrics() - Static method in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
BUFFER_MEMORY_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
buffer.memory
+
+
BUFFERED_RECORDS_PER_PARTITION_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
buffered.records.per.partition
+
+
BUFFERED_RECORDS_PER_PARTITION_DOC - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated.
+
+
BufferExhaustedException - Exception in org.apache.kafka.clients.producer
+
+
This exception is thrown if the producer cannot allocate memory for a record within max.block.ms due to the buffer + being too full.
+
+
BufferExhaustedException(String) - Constructor for exception org.apache.kafka.clients.producer.BufferExhaustedException
+
 
+
build() - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
+
Build the Schema using the current settings
+
+
build() - Method in interface org.apache.kafka.streams.state.StoreBuilder
+
+
Build the store as defined by the builder.
+
+
build() - Method in class org.apache.kafka.streams.StreamsBuilder
+
+
Returns the Topology that represents the specified processing logic.
+
+
build(Properties) - Method in class org.apache.kafka.streams.StreamsBuilder
+
+
Returns the Topology that represents the specified processing logic and accepts + a Properties instance used to indicate whether to optimize topology or not.
+
+
build(AuthenticationContext) - Method in interface org.apache.kafka.common.security.auth.KafkaPrincipalBuilder
+
+
Build a kafka principal from the authentication context.
+
+
BUILD_REMOTE_LOG_AUX_STATE_REQUESTS_PER_SEC_METRIC - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
builder() - Static method in class org.apache.kafka.connect.data.Date
+
+
Returns a SchemaBuilder for a Date.
+
+
builder() - Static method in class org.apache.kafka.connect.data.Time
+
+
Returns a SchemaBuilder for a Time.
+
+
builder() - Static method in class org.apache.kafka.connect.data.Timestamp
+
+
Returns a SchemaBuilder for a Timestamp.
+
+
builder(int) - Static method in class org.apache.kafka.connect.data.Decimal
+
+
Returns a SchemaBuilder for a Decimal with the given scale factor.
+
+
buildFromEndpoint(String) - Static method in class org.apache.kafka.streams.state.HostInfo
+
 
+
BUILT_IN_METRICS_VERSION_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
built.in.metrics.version
+
+
BuiltInDslStoreSuppliers - Class in org.apache.kafka.streams.state
+
+
Collection of builtin DslStoreSuppliers for Kafka Streams.
+
+
BuiltInDslStoreSuppliers() - Constructor for class org.apache.kafka.streams.state.BuiltInDslStoreSuppliers
+
 
+
BuiltInDslStoreSuppliers.InMemoryDslStoreSuppliers - Class in org.apache.kafka.streams.state
+
+
A DslStoreSuppliers that supplies all stores backed by an in-memory map
+
+
BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers - Class in org.apache.kafka.streams.state
+
+
A DslStoreSuppliers that supplies all stores backed by RocksDB
+
+
byBrokerId() - Method in class org.apache.kafka.clients.admin.ListTransactionsResult
+
+
Get a future which returns a map containing the underlying listing future for each broker + in the cluster.
+
+
byDuration(Duration) - Static method in class org.apache.kafka.streams.AutoOffsetReset
+
+
Creates an AutoOffsetReset instance for the specified reset duration.
+
+
ByteArray() - Static method in class org.apache.kafka.common.serialization.Serdes
+
+
A serde for nullable byte[] type.
+
+
ByteArrayDeserializer - Class in org.apache.kafka.common.serialization
+
 
+
ByteArrayDeserializer() - Constructor for class org.apache.kafka.common.serialization.ByteArrayDeserializer
+
 
+
ByteArraySerde() - Constructor for class org.apache.kafka.common.serialization.Serdes.ByteArraySerde
+
 
+
ByteArraySerializer - Class in org.apache.kafka.common.serialization
+
 
+
ByteArraySerializer() - Constructor for class org.apache.kafka.common.serialization.ByteArraySerializer
+
 
+
ByteBuffer() - Static method in class org.apache.kafka.common.serialization.Serdes
+
+
A serde for nullable ByteBuffer type.
+
+
ByteBufferDeserializer - Class in org.apache.kafka.common.serialization
+
 
+
ByteBufferDeserializer() - Constructor for class org.apache.kafka.common.serialization.ByteBufferDeserializer
+
 
+
ByteBufferSerde() - Constructor for class org.apache.kafka.common.serialization.Serdes.ByteBufferSerde
+
 
+
ByteBufferSerializer - Class in org.apache.kafka.common.serialization
+
+
ByteBufferSerializer always rewinds the position of the input buffer to zero for + serialization.
+
+
ByteBufferSerializer() - Constructor for class org.apache.kafka.common.serialization.ByteBufferSerializer
+
 
+
bytes() - Static method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
Bytes() - Static method in class org.apache.kafka.common.serialization.Serdes
+
+
A serde for nullable Bytes type.
+
+
BYTES - Enum constant in enum class org.apache.kafka.connect.data.Schema.Type
+
+
Sequence of unsigned 8-bit bytes
+
+
BYTES_SCHEMA - Static variable in interface org.apache.kafka.connect.data.Schema
+
 
+
BytesDeserializer - Class in org.apache.kafka.common.serialization
+
 
+
BytesDeserializer() - Constructor for class org.apache.kafka.common.serialization.BytesDeserializer
+
 
+
BytesSerde() - Constructor for class org.apache.kafka.common.serialization.Serdes.BytesSerde
+
 
+
BytesSerializer - Class in org.apache.kafka.common.serialization
+
 
+
BytesSerializer() - Constructor for class org.apache.kafka.common.serialization.BytesSerializer
+
 
+
+

C

+
+
CACHE_MAX_BYTES_BUFFERING_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated. +
Since 3.4. Use "statestore.cache.max.bytes" instead.
+
+
+
CACHE_MAX_BYTES_BUFFERING_DOC - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated.
+
+
cacheSize - Variable in class org.apache.kafka.streams.TopologyConfig
+
 
+
Callback - Interface in org.apache.kafka.clients.producer
+
+
A callback interface that the user can implement to allow code to execute when the request is complete.
+
+
cancel() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext.CapturedPunctuator
+
 
+
cancel() - Method in interface org.apache.kafka.streams.processor.Cancellable
+
+
Cancel the scheduled operation to avoid future calls.
+
+
cancel() - Method in class org.apache.kafka.streams.processor.MockProcessorContext.CapturedPunctuator
+
+
Deprecated.
+
cancel(boolean) - Method in class org.apache.kafka.common.KafkaFuture
+
+
If not already completed, completes this future with a CancellationException.
+
+
Cancellable - Interface in org.apache.kafka.streams.processor
+
+ +
+
cancelled() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext.CapturedPunctuator
+
 
+
cancelled() - Method in class org.apache.kafka.streams.processor.MockProcessorContext.CapturedPunctuator
+
+
Deprecated.
+
canDefineTransactionBoundaries(Map<String, String>) - Method in class org.apache.kafka.connect.source.SourceConnector
+
+
Signals whether the connector implementation is capable of defining the transaction boundaries for a + connector with the given configuration.
+
+
canMoveStandbyTask(KafkaStreamsState, KafkaStreamsState, TaskId, Map<ProcessId, KafkaStreamsAssignment>) - Method in interface org.apache.kafka.streams.processor.assignment.TaskAssignmentUtils.MoveStandbyTaskPredicate
+
 
+
CapturedForward(Record<K, V>) - Constructor for class org.apache.kafka.streams.processor.api.MockProcessorContext.CapturedForward
+
 
+
CapturedForward(Record<K, V>, Optional<String>) - Constructor for class org.apache.kafka.streams.processor.api.MockProcessorContext.CapturedForward
+
 
+
centerValue() - Method in class org.apache.kafka.common.metrics.stats.Frequency
+
+
Get the value of this metrics center point.
+
+
CHECK_CRCS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
check.crcs
+
+
Checkpoint - Class in org.apache.kafka.connect.mirror
+
+
Checkpoint records emitted by MirrorCheckpointConnector.
+
+
Checkpoint(String, TopicPartition, long, long, String) - Constructor for class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
checkpointsTopic(String) - Method in class org.apache.kafka.connect.mirror.DefaultReplicationPolicy
+
 
+
checkpointsTopic(String) - Method in interface org.apache.kafka.connect.mirror.ReplicationPolicy
+
+
Returns the name of the checkpoints topic for given cluster alias.
+
+
checkpointTopics() - Method in class org.apache.kafka.connect.mirror.MirrorClient
+
+
Finds all checkpoints topics on this cluster.
+
+
checkpointTopics(Map<String, Object>) - Static method in class org.apache.kafka.connect.mirror.RemoteClusterUtils
+
+
Finds all checkpoints topics
+
+
checkQuotas() - Method in class org.apache.kafka.common.metrics.Sensor
+
+
Check if we have violated our quota for any metric that has a configured quota
+
+
checkQuotas(long) - Method in class org.apache.kafka.common.metrics.Sensor
+
 
+
child(String) - Static method in class org.apache.kafka.streams.processor.To
+
+
Forward the key/value pair to one of the downstream processors designated by the downstream processor name.
+
+
childName() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext.CapturedForward
+
+
The child this data was forwarded to.
+
+
childName() - Method in class org.apache.kafka.streams.processor.MockProcessorContext.CapturedForward
+
+
Deprecated.
+
The child this data was forwarded to.
+
+
CLASS - Enum constant in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigType
+
 
+
CLASS - Enum constant in enum class org.apache.kafka.common.config.ConfigDef.Type
+
+
Used for values that implement a Kafka interface.
+
+
CLASSIC - Enum constant in enum class org.apache.kafka.clients.consumer.GroupProtocol
+
+
Classic group protocol.
+
+
CLASSIC - Enum constant in enum class org.apache.kafka.common.GroupType
+
 
+
CLASSIC - Enum constant in enum class org.apache.kafka.streams.GroupProtocol
+
+
Classic group protocol.
+
+
ClassicGroupDescription - Class in org.apache.kafka.clients.admin
+
+
A detailed description of a single classic group in the cluster.
+
+
ClassicGroupDescription(String, String, String, Collection<MemberDescription>, ClassicGroupState, Node) - Constructor for class org.apache.kafka.clients.admin.ClassicGroupDescription
+
 
+
ClassicGroupDescription(String, String, String, Collection<MemberDescription>, ClassicGroupState, Node, Set<AclOperation>) - Constructor for class org.apache.kafka.clients.admin.ClassicGroupDescription
+
 
+
ClassicGroupState - Enum Class in org.apache.kafka.common
+
+
The classic group state.
+
+
cleanUp() - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Do a cleanup of the local StateStore directory (StreamsConfig.STATE_DIR_CONFIG) by deleting all + data with regard to the application ID.
+
+
CLEANUP_POLICY_COMPACT - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
CLEANUP_POLICY_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
CLEANUP_POLICY_DELETE - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
CLEANUP_POLICY_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
clear() - Method in class org.apache.kafka.clients.producer.MockProducer
+
+
Clear the stored history of sent records, consumer group offsets
+
+
clear() - Method in class org.apache.kafka.common.metrics.stats.Histogram
+
 
+
clear() - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
clear() - Method in interface org.apache.kafka.connect.header.Headers
+
+
Removes all headers from this object.
+
+
CLIENT_DNS_LOOKUP_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
+
client.dns.lookup
+
+
CLIENT_DNS_LOOKUP_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
client.dns.lookup
+
+
CLIENT_DNS_LOOKUP_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
client.dns.lookup
+
+
CLIENT_ID - Enum constant in enum class org.apache.kafka.server.quota.ClientQuotaEntity.ConfigEntityType
+
 
+
CLIENT_ID - Static variable in class org.apache.kafka.common.quota.ClientQuotaEntity
+
 
+
CLIENT_ID_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
CLIENT_ID_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
client.id
+
+
CLIENT_ID_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
client.id
+
+
CLIENT_ID_CONFIG - Static variable in class org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler
+
 
+
CLIENT_ID_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
client.id
+
+
CLIENT_ID_DOC - Static variable in class org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler
+
 
+
CLIENT_METRICS - Enum constant in enum class org.apache.kafka.common.config.ConfigResource.Type
+
 
+
CLIENT_RACK_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
client.rack
+
+
CLIENT_SECRET_CONFIG - Static variable in class org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler
+
 
+
CLIENT_SECRET_DOC - Static variable in class org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler
+
 
+
CLIENT_TAG_PREFIX - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Prefix used to add arbitrary tags to a Kafka Stream's instance as key-value pairs.
+
+
clientAddress() - Method in interface org.apache.kafka.common.security.auth.AuthenticationContext
+
+
Address of the authenticated client
+
+
clientAddress() - Method in class org.apache.kafka.common.security.auth.PlaintextAuthenticationContext
+
 
+
clientAddress() - Method in class org.apache.kafka.common.security.auth.SaslAuthenticationContext
+
 
+
clientAddress() - Method in class org.apache.kafka.common.security.auth.SslAuthenticationContext
+
 
+
clientAddress() - Method in interface org.apache.kafka.server.authorizer.AuthorizableRequestContext
+
+
Returns client IP address from which request was sent.
+
+
ClientCredentialsJwtRetriever - Class in org.apache.kafka.common.security.oauthbearer
+
+
ClientCredentialsJwtRetriever is a JwtRetriever that performs the steps to request + a JWT from an OAuth/OIDC identity provider using the client_credentials grant type.
+
+
ClientCredentialsJwtRetriever() - Constructor for class org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever
+
 
+
clientHost() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription
+
+
The host of the group member.
+
+
clientId() - Method in class org.apache.kafka.clients.admin.MemberDescription
+
+
The client id of the group member.
+
+
clientId() - Method in class org.apache.kafka.clients.admin.ShareMemberDescription
+
+
The client id of the group member.
+
+
clientId() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription
+
+
The client ID of the group member.
+
+
clientId() - Method in interface org.apache.kafka.server.authorizer.AuthorizableRequestContext
+
+
Returns the client id from the request header.
+
+
clientInstanceId() - Method in interface org.apache.kafka.server.telemetry.ClientTelemetryPayload
+
+
Method returns the client's instance id.
+
+
clientInstanceId(Duration) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Determines the client's unique client instance ID used for telemetry.
+
+
clientInstanceId(Duration) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
clientInstanceId(Duration) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
clientInstanceId(Duration) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
+ +
+
clientInstanceId(Duration) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Determines the client's unique client instance ID used for telemetry.
+
+
clientInstanceId(Duration) - Method in class org.apache.kafka.clients.consumer.KafkaShareConsumer
+
+
Determines the client's unique client instance ID used for telemetry.
+
+
clientInstanceId(Duration) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
clientInstanceId(Duration) - Method in class org.apache.kafka.clients.consumer.MockShareConsumer
+
 
+
clientInstanceId(Duration) - Method in interface org.apache.kafka.clients.consumer.ShareConsumer
+
 
+
clientInstanceId(Duration) - Method in class org.apache.kafka.clients.producer.KafkaProducer
+
+
Determines the client's unique client instance ID used for telemetry.
+
+
clientInstanceId(Duration) - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
clientInstanceId(Duration) - Method in interface org.apache.kafka.clients.producer.Producer
+
+ +
+
clientInstanceIds(Duration) - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Returns the internal clients' assigned client instance ids.
+
+
ClientInstanceIds - Interface in org.apache.kafka.streams
+
+
Encapsulates the client instance id used for metrics collection by + producers, consumers, and the admin client used by Kafka Streams.
+
+
ClientJwtValidator - Class in org.apache.kafka.common.security.oauthbearer
+
+
ClientJwtValidator is an implementation of JwtValidator that is used + by the client to perform some rudimentary validation of the JWT access token that is received + as part of the response from posting the client credentials to the OAuth/OIDC provider's + token endpoint.
+
+
ClientJwtValidator() - Constructor for class org.apache.kafka.common.security.oauthbearer.ClientJwtValidator
+
 
+
ClientMetricsResourceListing - Class in org.apache.kafka.clients.admin
+
+
Deprecated.
+
+
ClientMetricsResourceListing(String) - Constructor for class org.apache.kafka.clients.admin.ClientMetricsResourceListing
+
+
Deprecated.
+
clientProps() - Method in class org.apache.kafka.connect.connector.policy.ConnectorClientConfigRequest
+
+
Provides configs with prefix "producer.override." for source connectors and + also sink connectors that are configured with a DLQ topic.
+
+
ClientQuotaAlteration - Class in org.apache.kafka.common.quota
+
+
Describes a configuration alteration to be made to a client quota entity.
+
+
ClientQuotaAlteration(ClientQuotaEntity, Collection<ClientQuotaAlteration.Op>) - Constructor for class org.apache.kafka.common.quota.ClientQuotaAlteration
+
 
+
ClientQuotaAlteration.Op - Class in org.apache.kafka.common.quota
+
 
+
ClientQuotaCallback - Interface in org.apache.kafka.server.quota
+
+
Quota callback interface for brokers and controllers that enables customization of client quota computation.
+
+
ClientQuotaEntity - Class in org.apache.kafka.common.quota
+
+
Describes a client quota entity, which is a mapping of entity types to their names.
+
+
ClientQuotaEntity - Interface in org.apache.kafka.server.quota
+
+
The metadata for an entity for which quota is configured.
+
+
ClientQuotaEntity(Map<String, String>) - Constructor for class org.apache.kafka.common.quota.ClientQuotaEntity
+
+
Constructs a quota entity for the given types and names.
+
+
ClientQuotaEntity.ConfigEntity - Interface in org.apache.kafka.server.quota
+
+
Interface representing a quota configuration entity.
+
+
ClientQuotaEntity.ConfigEntityType - Enum Class in org.apache.kafka.server.quota
+
+ +
+
ClientQuotaFilter - Class in org.apache.kafka.common.quota
+
+
Describes a client quota entity filter.
+
+
ClientQuotaFilterComponent - Class in org.apache.kafka.common.quota
+
+
Describes a component for applying a client quota filter.
+
+
ClientQuotaType - Enum Class in org.apache.kafka.server.quota
+
+
Types of quotas that may be configured on brokers for client requests.
+
+
clientReceiver() - Method in interface org.apache.kafka.server.telemetry.ClientTelemetry
+
+
Called by the broker to fetch instance of ClientTelemetryReceiver.
+
+
clientTagPrefix(String) - Static method in class org.apache.kafka.streams.StreamsConfig
+
+
Prefix a client tag key with StreamsConfig.CLIENT_TAG_PREFIX.
+
+
clientTags() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription
+
+
Used for rack-aware assignment algorithm.
+
+
clientTags() - Method in interface org.apache.kafka.streams.processor.assignment.KafkaStreamsState
+
+
The client tags for this KafkaStreams client, if set any have been via configs using the + StreamsConfig.clientTagPrefix(java.lang.String)
+
+
ClientTelemetry - Interface in org.apache.kafka.server.telemetry
+
+
A MetricsReporter may implement this interface to indicate support for collecting client + telemetry on the server side.
+
+
ClientTelemetryPayload - Interface in org.apache.kafka.server.telemetry
+
+
A client telemetry payload as sent by the client to the telemetry receiver.
+
+
ClientTelemetryReceiver - Interface in org.apache.kafka.server.telemetry
+
+
ClientTelemetryReceiver defines the behaviour for telemetry receiver on the broker side + which receives client telemetry metrics.
+
+
clientType() - Method in class org.apache.kafka.connect.connector.policy.ConnectorClientConfigRequest
+
+ +
+
close() - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Close the Admin and release all associated resources.
+
+
close() - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
close() - Method in interface org.apache.kafka.clients.consumer.ConsumerInterceptor
+
+
This is called when interceptor is closed
+
+
close() - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Close the consumer with default leave group behavior, + waiting for up to the default timeout of 30 seconds for any needed cleanup.
+
+
close() - Method in class org.apache.kafka.clients.consumer.KafkaShareConsumer
+
+
Close the consumer, waiting for up to the default timeout of 30 seconds for any needed cleanup.
+
+
close() - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
close() - Method in class org.apache.kafka.clients.consumer.MockShareConsumer
+
 
+
close() - Method in interface org.apache.kafka.clients.consumer.ShareConsumer
+
 
+
close() - Method in class org.apache.kafka.clients.producer.KafkaProducer
+
+
Close this producer.
+
+
close() - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
close() - Method in interface org.apache.kafka.clients.producer.Partitioner
+
+
This is called when partitioner is closed.
+
+
close() - Method in interface org.apache.kafka.clients.producer.Producer
+
+ +
+
close() - Method in interface org.apache.kafka.clients.producer.ProducerInterceptor
+
+
This is called when interceptor is closed
+
+
close() - Method in class org.apache.kafka.clients.producer.RoundRobinPartitioner
+
 
+
close() - Method in class org.apache.kafka.common.config.provider.DirectoryConfigProvider
+
 
+
close() - Method in class org.apache.kafka.common.config.provider.EnvVarConfigProvider
+
 
+
close() - Method in class org.apache.kafka.common.config.provider.FileConfigProvider
+
 
+
close() - Method in interface org.apache.kafka.common.MessageFormatter
+
+
Closes the formatter
+
+
close() - Method in class org.apache.kafka.common.metrics.JmxReporter
+
 
+
close() - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Close this metrics repository.
+
+
close() - Method in interface org.apache.kafka.common.metrics.MetricsReporter
+
+
Called when the metrics repository is closed.
+
+
close() - Method in interface org.apache.kafka.common.security.auth.AuthenticateCallbackHandler
+
+
Closes this instance.
+
+
close() - Method in interface org.apache.kafka.common.security.auth.Login
+
+
Closes this instance.
+
+
close() - Method in class org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever
+
 
+
close() - Method in class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
+
 
+
close() - Method in class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
+
 
+
close() - Method in class org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever
+
 
+
close() - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler
+
 
+
close() - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallbackHandler
+
 
+
close() - Method in interface org.apache.kafka.common.serialization.Deserializer
+
+
Close this deserializer.
+
+
close() - Method in class org.apache.kafka.common.serialization.ListDeserializer
+
 
+
close() - Method in class org.apache.kafka.common.serialization.ListSerializer
+
 
+
close() - Method in interface org.apache.kafka.common.serialization.Serde
+
+
Close this serde class, which will close the underlying serializer and deserializer.
+
+
close() - Method in class org.apache.kafka.common.serialization.Serdes.WrapperSerde
+
 
+
close() - Method in interface org.apache.kafka.common.serialization.Serializer
+
+
Close this serializer.
+
+
close() - Method in class org.apache.kafka.connect.mirror.MirrorClient
+
+
Closes internal clients.
+
+
close() - Method in interface org.apache.kafka.connect.storage.Converter
+
 
+
close() - Method in class org.apache.kafka.connect.storage.SimpleHeaderConverter
+
 
+
close() - Method in class org.apache.kafka.connect.storage.StringConverter
+
 
+
close() - Method in interface org.apache.kafka.connect.transforms.predicates.Predicate
+
 
+
close() - Method in interface org.apache.kafka.connect.transforms.Transformation
+
+
Signal that this transformation instance will no longer will be used.
+
+
close() - Method in interface org.apache.kafka.server.quota.ClientQuotaCallback
+
+
Closes this instance.
+
+
close() - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Shutdown this KafkaStreams instance by signaling all the threads to stop, and then wait for them to join.
+
+
close() - Method in class org.apache.kafka.streams.kstream.SessionWindowedDeserializer
+
 
+
close() - Method in class org.apache.kafka.streams.kstream.SessionWindowedSerializer
+
 
+
close() - Method in class org.apache.kafka.streams.kstream.TimeWindowedDeserializer
+
 
+
close() - Method in class org.apache.kafka.streams.kstream.TimeWindowedSerializer
+
 
+
close() - Method in interface org.apache.kafka.streams.kstream.Transformer
+
+
Deprecated.
+
Close this transformer and clean up any resources.
+
+
close() - Method in interface org.apache.kafka.streams.kstream.ValueTransformer
+
+
Deprecated.
+
Close this transformer and clean up any resources.
+
+
close() - Method in interface org.apache.kafka.streams.kstream.ValueTransformerWithKey
+
+
Close this processor and clean up any resources.
+
+
close() - Method in interface org.apache.kafka.streams.processor.api.FixedKeyProcessor
+
+
Close this processor and clean up any resources.
+
+
close() - Method in interface org.apache.kafka.streams.processor.api.Processor
+
+
Close this processor and clean up any resources.
+
+
close() - Method in interface org.apache.kafka.streams.processor.StateStore
+
+
Close the storage engine.
+
+
close() - Method in interface org.apache.kafka.streams.state.KeyValueIterator
+
 
+
close() - Method in interface org.apache.kafka.streams.state.VersionedRecordIterator
+
 
+
close() - Method in interface org.apache.kafka.streams.state.WindowStoreIterator
+
 
+
close() - Method in class org.apache.kafka.streams.TopologyTestDriver
+
+
Close the driver, its topology, and all processors.
+
+
close() - Method in interface org.apache.kafka.tools.api.RecordReader
+
+
Closes this reader.
+
+
close(String, Options) - Method in interface org.apache.kafka.streams.state.RocksDBConfigSetter
+
+
Close any user-constructed objects that inherit from org.rocksdb.RocksObject.
+
+
close(Duration) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Close the Admin client and release all associated resources.
+
+
close(Duration) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
close(Duration) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
close(Duration) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
+
Deprecated.
+
+
close(Duration) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Deprecated.
+
+
close(Duration) - Method in class org.apache.kafka.clients.consumer.KafkaShareConsumer
+
+
Tries to close the consumer cleanly within the specified timeout.
+
+
close(Duration) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
+
Deprecated.
+
+
close(Duration) - Method in class org.apache.kafka.clients.consumer.MockShareConsumer
+
 
+
close(Duration) - Method in interface org.apache.kafka.clients.consumer.ShareConsumer
+
 
+
close(Duration) - Method in class org.apache.kafka.clients.producer.KafkaProducer
+
+
This method waits up to timeout for the producer to complete the sending of all incomplete requests.
+
+
close(Duration) - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
close(Duration) - Method in interface org.apache.kafka.clients.producer.Producer
+
+ +
+
close(Duration) - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Shutdown this KafkaStreams by signaling all the threads to stop, and then wait up to the timeout for the + threads to join.
+
+
close(Collection<TopicPartition>) - Method in class org.apache.kafka.connect.sink.SinkTask
+
+
The SinkTask uses this method to close writers for partitions that are no + longer assigned to the SinkTask.
+
+
close(CloseOptions) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
close(CloseOptions) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Close the consumer cleanly.
+
+
close(CloseOptions) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
close(KafkaStreams.CloseOptions) - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Shutdown this KafkaStreams by signaling all the threads to stop, and then wait up to the timeout for the + threads to join.
+
+
closed() - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
closed() - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
closeException - Variable in class org.apache.kafka.clients.producer.MockProducer
+
 
+
CloseOptions - Class in org.apache.kafka.clients.consumer
+
 
+
CloseOptions() - Constructor for class org.apache.kafka.streams.KafkaStreams.CloseOptions
+
 
+
CloseOptions.GroupMembershipOperation - Enum Class in org.apache.kafka.clients.consumer
+
+
Enum to specify the group membership operation upon leaving group.
+
+
Cluster - Class in org.apache.kafka.common
+
+
An immutable representation of a subset of the nodes, topics, and partitions in the Kafka cluster.
+
+
Cluster(String, Collection<Node>, Collection<PartitionInfo>, Set<String>, Set<String>) - Constructor for class org.apache.kafka.common.Cluster
+
+
Create a new cluster with the given id, nodes and partitions
+
+
Cluster(String, Collection<Node>, Collection<PartitionInfo>, Set<String>, Set<String>, Set<String>, Node) - Constructor for class org.apache.kafka.common.Cluster
+
+
Create a new cluster with the given id, nodes and partitions
+
+
Cluster(String, Collection<Node>, Collection<PartitionInfo>, Set<String>, Set<String>, Set<String>, Node, Map<String, Uuid>) - Constructor for class org.apache.kafka.common.Cluster
+
+
Create a new cluster with the given id, nodes, partitions and topicIds
+
+
Cluster(String, Collection<Node>, Collection<PartitionInfo>, Set<String>, Set<String>, Node) - Constructor for class org.apache.kafka.common.Cluster
+
+
Create a new cluster with the given id, nodes and partitions
+
+
CLUSTER - Enum constant in enum class org.apache.kafka.common.resource.ResourceType
+
+
The cluster as a whole.
+
+
CLUSTER - Static variable in class org.apache.kafka.common.resource.Resource
+
+
A resource representing the whole cluster.
+
+
CLUSTER_ACTION - Enum constant in enum class org.apache.kafka.common.acl.AclOperation
+
+
CLUSTER_ACTION operation.
+
+
CLUSTER_NAME - Static variable in class org.apache.kafka.common.resource.Resource
+
+
The name of the CLUSTER resource.
+
+
ClusterAuthorizationException - Exception in org.apache.kafka.common.errors
+
 
+
ClusterAuthorizationException(String) - Constructor for exception org.apache.kafka.common.errors.ClusterAuthorizationException
+
 
+
ClusterAuthorizationException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.ClusterAuthorizationException
+
 
+
clusterDetails() - Method in interface org.apache.kafka.connect.health.ConnectClusterState
+
+
Get details about the setup of the Connect cluster.
+
+
clusterId() - Method in class org.apache.kafka.clients.admin.AddRaftVoterOptions
+
 
+
clusterId() - Method in class org.apache.kafka.clients.admin.DescribeClusterResult
+
+
Returns a future which yields the current cluster id.
+
+
clusterId() - Method in class org.apache.kafka.clients.admin.RemoveRaftVoterOptions
+
 
+
clusterId() - Method in class org.apache.kafka.common.ClusterResource
+
+
Return the cluster id.
+
+
clusterResource() - Method in class org.apache.kafka.common.Cluster
+
 
+
clusterResource() - Method in interface org.apache.kafka.server.authorizer.AuthorizerServerInfo
+
+
Returns cluster metadata for the broker running this authorizer including cluster id.
+
+
ClusterResource - Class in org.apache.kafka.common
+
+
The ClusterResource class encapsulates metadata for a Kafka cluster.
+
+
ClusterResource(String) - Constructor for class org.apache.kafka.common.ClusterResource
+
+
Create ClusterResource with a cluster id.
+
+
ClusterResourceListener - Interface in org.apache.kafka.common
+
+
A callback interface that users can implement when they wish to get notified about changes in the Cluster metadata.
+
+
clusterState() - Method in interface org.apache.kafka.connect.rest.ConnectRestExtensionContext
+
+
Provides the cluster state and health information about the connectors and tasks.
+
+
code() - Method in enum class org.apache.kafka.clients.admin.FeatureUpdate.UpgradeType
+
 
+
code() - Method in enum class org.apache.kafka.common.acl.AclOperation
+
+
Return the code of this operation.
+
+
code() - Method in enum class org.apache.kafka.common.acl.AclPermissionType
+
+
Return the code of this permission type.
+
+
code() - Method in enum class org.apache.kafka.common.resource.PatternType
+
 
+
code() - Method in enum class org.apache.kafka.common.resource.ResourceType
+
+
Return the code of this resource.
+
+
cogroup(Aggregator<? super K, ? super V, VOut>) - Method in interface org.apache.kafka.streams.kstream.KGroupedStream
+
+
Create a new CogroupedKStream from this grouped KStream to allow cogrouping other + KGroupedStream to it.
+
+
cogroup(KGroupedStream<K, V>, Aggregator<? super K, ? super V, VOut>) - Method in interface org.apache.kafka.streams.kstream.CogroupedKStream
+
+
Add an already grouped KStream to this CogroupedKStream.
+
+
CogroupedKStream<K,VOut> - Interface in org.apache.kafka.streams.kstream
+
+
CogroupedKStream is an abstraction of one or more grouped record streams of + key-value pairs.
+
+
combine(List<SampledStat.Sample>, MetricConfig, long) - Method in class org.apache.kafka.common.metrics.stats.Avg
+
 
+
combine(List<SampledStat.Sample>, MetricConfig, long) - Method in class org.apache.kafka.common.metrics.stats.Frequencies
+
 
+
combine(List<SampledStat.Sample>, MetricConfig, long) - Method in class org.apache.kafka.common.metrics.stats.Max
+
 
+
combine(List<SampledStat.Sample>, MetricConfig, long) - Method in class org.apache.kafka.common.metrics.stats.Min
+
 
+
combine(List<SampledStat.Sample>, MetricConfig, long) - Method in class org.apache.kafka.common.metrics.stats.Percentiles
+
 
+
combine(List<SampledStat.Sample>, MetricConfig, long) - Method in class org.apache.kafka.common.metrics.stats.SampledStat
+
 
+
combine(List<SampledStat.Sample>, MetricConfig, long) - Method in class org.apache.kafka.common.metrics.stats.WindowedSum
+
 
+
commit() - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule
+
 
+
commit() - Method in class org.apache.kafka.common.security.plain.PlainLoginModule
+
 
+
commit() - Method in class org.apache.kafka.common.security.scram.ScramLoginModule
+
 
+
commit() - Method in class org.apache.kafka.connect.source.SourceTask
+
+
This method is invoked periodically when offsets are committed for this source task.
+
+
commit() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
 
+
commit() - Method in interface org.apache.kafka.streams.processor.api.ProcessingContext
+
+
Request a commit.
+
+
commit() - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
commit() - Method in interface org.apache.kafka.streams.processor.ProcessorContext
+
+
Request a commit.
+
+
COMMIT_INTERVAL_MS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
commit.interval.ms
+
+
commitAsync() - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
commitAsync() - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Commit offsets returned on the last KafkaConsumer.poll(Duration) for all the subscribed list of topics and partition.
+
+
commitAsync() - Method in class org.apache.kafka.clients.consumer.KafkaShareConsumer
+
+
Commit the acknowledgements for the records returned.
+
+
commitAsync() - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
commitAsync() - Method in class org.apache.kafka.clients.consumer.MockShareConsumer
+
 
+
commitAsync() - Method in interface org.apache.kafka.clients.consumer.ShareConsumer
+
 
+
commitAsync(Map<TopicPartition, OffsetAndMetadata>, OffsetCommitCallback) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
commitAsync(Map<TopicPartition, OffsetAndMetadata>, OffsetCommitCallback) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Commit the specified offsets for the specified list of topics and partitions to Kafka.
+
+
commitAsync(Map<TopicPartition, OffsetAndMetadata>, OffsetCommitCallback) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
commitAsync(OffsetCommitCallback) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
commitAsync(OffsetCommitCallback) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Commit offsets returned on the last poll() for the subscribed list of topics and partitions.
+
+
commitAsync(OffsetCommitCallback) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
CommitCallback - Interface in org.apache.kafka.streams.processor
+
+
Stores can register this callback to be notified upon successful commit.
+
+
commitCount() - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
CommitFailedException - Exception in org.apache.kafka.clients.consumer
+
+
This exception is raised when an offset commit with KafkaConsumer.commitSync() fails + with an unrecoverable error.
+
+
CommitFailedException() - Constructor for exception org.apache.kafka.clients.consumer.CommitFailedException
+
 
+
CommitFailedException(String) - Constructor for exception org.apache.kafka.clients.consumer.CommitFailedException
+
 
+
commitRecord(SourceRecord, RecordMetadata) - Method in class org.apache.kafka.connect.source.SourceTask
+
+
+ Commit an individual SourceRecord when the callback from the producer client is received.
+
+
commitRecord(SourceRecord, RecordMetadata) - Method in class org.apache.kafka.connect.tools.VerifiableSourceTask
+
 
+
commitSync() - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
commitSync() - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Commit offsets returned on the last poll() for all the subscribed list of topics and + partitions.
+
+
commitSync() - Method in class org.apache.kafka.clients.consumer.KafkaShareConsumer
+
+
Commit the acknowledgements for the records returned.
+
+
commitSync() - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
commitSync() - Method in class org.apache.kafka.clients.consumer.MockShareConsumer
+
 
+
commitSync() - Method in interface org.apache.kafka.clients.consumer.ShareConsumer
+
 
+
commitSync(Duration) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
commitSync(Duration) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Commit offsets returned on the last poll() for all the subscribed list of topics and + partitions.
+
+
commitSync(Duration) - Method in class org.apache.kafka.clients.consumer.KafkaShareConsumer
+
+
Commit the acknowledgements for the records returned.
+
+
commitSync(Duration) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
commitSync(Duration) - Method in class org.apache.kafka.clients.consumer.MockShareConsumer
+
 
+
commitSync(Duration) - Method in interface org.apache.kafka.clients.consumer.ShareConsumer
+
 
+
commitSync(Map<TopicPartition, OffsetAndMetadata>) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
commitSync(Map<TopicPartition, OffsetAndMetadata>) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Commit the specified offsets for the specified list of topics and partitions.
+
+
commitSync(Map<TopicPartition, OffsetAndMetadata>) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
commitSync(Map<TopicPartition, OffsetAndMetadata>, Duration) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
commitSync(Map<TopicPartition, OffsetAndMetadata>, Duration) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Commit the specified offsets for the specified list of topics and partitions.
+
+
commitSync(Map<TopicPartition, OffsetAndMetadata>, Duration) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
committed() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
+
Whether ProcessingContext.commit() has been called in this context.
+
+
committed() - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
Whether ProcessorContext.commit() has been called in this context.
+
+
committed(Set<TopicPartition>) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
committed(Set<TopicPartition>) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Get the last committed offsets for the given partitions (whether the commit happened by this process or + another).
+
+
committed(Set<TopicPartition>) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
committed(Set<TopicPartition>, Duration) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
committed(Set<TopicPartition>, Duration) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Get the last committed offsets for the given partitions (whether the commit happened by this process or + another).
+
+
committed(Set<TopicPartition>, Duration) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
committedOffsets() - Method in interface org.apache.kafka.streams.TaskMetadata
+
+
Offsets of the source topic partitions committed so far by the task.
+
+
commitTransaction() - Method in class org.apache.kafka.clients.producer.KafkaProducer
+
+
Commits the ongoing transaction.
+
+
commitTransaction() - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
commitTransaction() - Method in interface org.apache.kafka.clients.producer.Producer
+
+ +
+
commitTransaction() - Method in interface org.apache.kafka.connect.source.TransactionContext
+
+
Request a transaction commit after the next batch of records from SourceTask.poll() + is processed.
+
+
commitTransaction(SourceRecord) - Method in interface org.apache.kafka.connect.source.TransactionContext
+
+
Request a transaction commit after a source record is processed.
+
+
commitTransactionException - Variable in class org.apache.kafka.clients.producer.MockProducer
+
 
+
compareTo(Uuid) - Method in class org.apache.kafka.common.Uuid
+
 
+
compareTo(ProcessId) - Method in class org.apache.kafka.streams.processor.assignment.ProcessId
+
 
+
compareTo(TaskId) - Method in class org.apache.kafka.streams.processor.TaskId
+
 
+
compilePredicate(Map<String, ?>) - Static method in class org.apache.kafka.common.metrics.JmxReporter
+
 
+
COMPLETE_ABORT - Enum constant in enum class org.apache.kafka.clients.admin.TransactionState
+
 
+
COMPLETE_COMMIT - Enum constant in enum class org.apache.kafka.clients.admin.TransactionState
+
 
+
COMPLETE_RECORD_DATA_CONFIG - Static variable in class org.apache.kafka.connect.tools.VerifiableSourceTask
+
 
+
completedFuture(U) - Static method in class org.apache.kafka.common.KafkaFuture
+
+
Returns a new KafkaFuture that is already completed with the given value.
+
+
completeNext() - Method in class org.apache.kafka.clients.producer.MockProducer
+
+
Complete the earliest uncompleted call successfully.
+
+
COMPLETING_REBALANCE - Enum constant in enum class org.apache.kafka.common.ClassicGroupState
+
 
+
COMPLETING_REBALANCE - Enum constant in enum class org.apache.kafka.common.ConsumerGroupState
+
+
Deprecated.
+
COMPLETING_REBALANCE - Enum constant in enum class org.apache.kafka.common.GroupState
+
 
+
components() - Method in class org.apache.kafka.common.quota.ClientQuotaFilter
+
 
+
CompoundStat - Interface in org.apache.kafka.common.metrics
+
+
A compound stat is a stat where a single measurement and associated data structure feeds many metrics.
+
+
CompoundStat.NamedMeasurable - Class in org.apache.kafka.common.metrics
+
 
+
COMPRESSION_GZIP_LEVEL_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
compression.gzip.level
+
+
COMPRESSION_GZIP_LEVEL_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
COMPRESSION_GZIP_LEVEL_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
COMPRESSION_LZ4_LEVEL_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
compression.lz4.level
+
+
COMPRESSION_LZ4_LEVEL_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
COMPRESSION_LZ4_LEVEL_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
COMPRESSION_TYPE_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
compression.type
+
+
COMPRESSION_TYPE_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
COMPRESSION_TYPE_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
COMPRESSION_ZSTD_LEVEL_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
compression.zstd.level
+
+
COMPRESSION_ZSTD_LEVEL_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
COMPRESSION_ZSTD_LEVEL_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
ConcurrentTransactionsException - Exception in org.apache.kafka.common.errors
+
 
+
ConcurrentTransactionsException(String) - Constructor for exception org.apache.kafka.common.errors.ConcurrentTransactionsException
+
 
+
config() - Method in class org.apache.kafka.clients.admin.CreateTopicsResult.TopicMetadataAndConfig
+
 
+
config() - Method in class org.apache.kafka.common.metrics.KafkaMetric
+
+
Get the configuration of this metric.
+
+
config() - Method in class org.apache.kafka.common.metrics.Metrics
+
 
+
config() - Method in class org.apache.kafka.connect.connector.Connector
+
+
Define the configuration for the connector.
+
+
config() - Method in interface org.apache.kafka.connect.storage.Converter
+
+
Configuration specification for this converter.
+
+
config() - Method in interface org.apache.kafka.connect.storage.HeaderConverter
+
+
Configuration specification for this set of header converters.
+
+
config() - Method in class org.apache.kafka.connect.storage.SimpleHeaderConverter
+
 
+
config() - Method in class org.apache.kafka.connect.storage.StringConverter
+
 
+
config() - Method in class org.apache.kafka.connect.tools.MockConnector
+
 
+
config() - Method in class org.apache.kafka.connect.tools.MockSinkConnector
+
 
+
config() - Method in class org.apache.kafka.connect.tools.MockSourceConnector
+
 
+
config() - Method in class org.apache.kafka.connect.tools.SchemaSourceConnector
+
 
+
config() - Method in class org.apache.kafka.connect.tools.VerifiableSinkConnector
+
 
+
config() - Method in class org.apache.kafka.connect.tools.VerifiableSourceConnector
+
 
+
config() - Method in interface org.apache.kafka.connect.transforms.predicates.Predicate
+
+
Configuration specification for this predicate.
+
+
config() - Method in interface org.apache.kafka.connect.transforms.Transformation
+
+
Configuration specification for this transformation.
+
+
config(String) - Method in class org.apache.kafka.clients.admin.CreateTopicsResult
+
+
Returns a future that provides topic configs for the topic when the request completes.
+
+
config(MetricConfig) - Method in class org.apache.kafka.common.metrics.KafkaMetric
+
+
Set the metric config.
+
+
Config - Class in org.apache.kafka.clients.admin
+
+
A configuration object containing the configuration entries for a resource.
+
+
Config - Class in org.apache.kafka.common.config
+
 
+
Config(Collection<ConfigEntry>) - Constructor for class org.apache.kafka.clients.admin.Config
+
+
Create a configuration instance with the provided entries.
+
+
Config(List<ConfigValue>) - Constructor for class org.apache.kafka.common.config.Config
+
 
+
CONFIG_PROVIDERS_CONFIG - Static variable in class org.apache.kafka.common.config.AbstractConfig
+
 
+
ConfigChangeCallback - Interface in org.apache.kafka.common.config
+
+
A callback passed to ConfigProvider for subscribing to changes.
+
+
ConfigData - Class in org.apache.kafka.common.config
+
+
Configuration data from a ConfigProvider.
+
+
ConfigData(Map<String, String>) - Constructor for class org.apache.kafka.common.config.ConfigData
+
+
Creates a new ConfigData with the given data.
+
+
ConfigData(Map<String, String>, Long) - Constructor for class org.apache.kafka.common.config.ConfigData
+
+
Creates a new ConfigData with the given data and TTL (in milliseconds).
+
+
configDef() - Static method in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
configDef() - Static method in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
configDef() - Static method in class org.apache.kafka.clients.producer.ProducerConfig
+
 
+
configDef() - Static method in class org.apache.kafka.connect.storage.StringConverterConfig
+
 
+
configDef() - Static method in class org.apache.kafka.streams.StreamsConfig
+
+
Return a copy of the config definition.
+
+
ConfigDef - Class in org.apache.kafka.common.config
+
+
This class is used for specifying the set of expected configurations.
+
+
ConfigDef() - Constructor for class org.apache.kafka.common.config.ConfigDef
+
 
+
ConfigDef(ConfigDef) - Constructor for class org.apache.kafka.common.config.ConfigDef
+
 
+
ConfigDef.CaseInsensitiveValidString - Class in org.apache.kafka.common.config
+
 
+
ConfigDef.CompositeValidator - Class in org.apache.kafka.common.config
+
 
+
ConfigDef.ConfigKey - Class in org.apache.kafka.common.config
+
 
+
ConfigDef.Importance - Enum Class in org.apache.kafka.common.config
+
+
The importance level for a configuration
+
+
ConfigDef.LambdaValidator - Class in org.apache.kafka.common.config
+
 
+
ConfigDef.ListSize - Class in org.apache.kafka.common.config
+
 
+
ConfigDef.NonEmptyString - Class in org.apache.kafka.common.config
+
 
+
ConfigDef.NonEmptyStringWithoutControlChars - Class in org.apache.kafka.common.config
+
 
+
ConfigDef.NonNullValidator - Class in org.apache.kafka.common.config
+
 
+
ConfigDef.Range - Class in org.apache.kafka.common.config
+
+
Validation logic for numeric ranges
+
+
ConfigDef.Recommender - Interface in org.apache.kafka.common.config
+
+
This is used by the ConfigDef.validate(Map) to get valid values for a configuration given the current + configuration values in order to perform full configuration validation and visibility modification.
+
+
ConfigDef.Type - Enum Class in org.apache.kafka.common.config
+
+
The type for a configuration value
+
+
ConfigDef.Validator - Interface in org.apache.kafka.common.config
+
+
Validation logic the user may provide to perform single configuration validation.
+
+
ConfigDef.ValidList - Class in org.apache.kafka.common.config
+
 
+
ConfigDef.ValidString - Class in org.apache.kafka.common.config
+
 
+
ConfigDef.Width - Enum Class in org.apache.kafka.common.config
+
+
The width of a configuration value
+
+
configEntities() - Method in interface org.apache.kafka.server.quota.ClientQuotaEntity
+
+
Returns the list of configuration entities that this quota entity is comprised of.
+
+
configEntry() - Method in class org.apache.kafka.clients.admin.AlterConfigOp
+
 
+
ConfigEntry - Class in org.apache.kafka.clients.admin
+
+
A class representing a configuration entry containing name, value and additional metadata.
+
+
ConfigEntry(String, String) - Constructor for class org.apache.kafka.clients.admin.ConfigEntry
+
+
Create a configuration entry with the provided values.
+
+
ConfigEntry(String, String, ConfigEntry.ConfigSource, boolean, boolean, List<ConfigEntry.ConfigSynonym>, ConfigEntry.ConfigType, String) - Constructor for class org.apache.kafka.clients.admin.ConfigEntry
+
+
Create a configuration with the provided values.
+
+
ConfigEntry.ConfigSource - Enum Class in org.apache.kafka.clients.admin
+
+
Source of configuration entries.
+
+
ConfigEntry.ConfigSynonym - Class in org.apache.kafka.clients.admin
+
+
Class representing a configuration synonym of a ConfigEntry.
+
+
ConfigEntry.ConfigType - Enum Class in org.apache.kafka.clients.admin
+
+
Data type of configuration entry.
+
+
ConfigException - Exception in org.apache.kafka.common.config
+
+
Thrown if the user supplies an invalid configuration
+
+
ConfigException(String) - Constructor for exception org.apache.kafka.common.config.ConfigException
+
 
+
ConfigException(String, Object) - Constructor for exception org.apache.kafka.common.config.ConfigException
+
 
+
ConfigException(String, Object, String) - Constructor for exception org.apache.kafka.common.config.ConfigException
+
 
+
ConfigKey(String, ConfigDef.Type, Object, ConfigDef.Validator, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, List<String>, ConfigDef.Recommender, boolean) - Constructor for class org.apache.kafka.common.config.ConfigDef.ConfigKey
+
 
+
configKeys() - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Get the configuration keys
+
+
configNames() - Static method in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
configNames() - Static method in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
configNames() - Static method in class org.apache.kafka.clients.producer.ProducerConfig
+
 
+
ConfigProvider - Interface in org.apache.kafka.common.config.provider
+
+
A provider of configuration data, which may optionally support subscriptions to configuration changes.
+
+
ConfigResource - Class in org.apache.kafka.common.config
+
+
A class representing resources that have configs.
+
+
ConfigResource(ConfigResource.Type, String) - Constructor for class org.apache.kafka.common.config.ConfigResource
+
+
Create an instance of this class with the provided parameters.
+
+
ConfigResource.Type - Enum Class in org.apache.kafka.common.config
+
+
Type of resource.
+
+
configs() - Method in class org.apache.kafka.clients.admin.NewTopic
+
+
The configuration for the new topic or null if no configs ever specified.
+
+
configs() - Method in interface org.apache.kafka.connect.sink.SinkTaskContext
+
+
Get the Task configuration.
+
+
configs() - Method in interface org.apache.kafka.connect.source.SourceTaskContext
+
+
Get the Task configuration.
+
+
configs() - Method in class org.apache.kafka.server.policy.AlterConfigPolicy.RequestMetadata
+
+
Return the configs in the request.
+
+
configs() - Method in class org.apache.kafka.server.policy.CreateTopicPolicy.RequestMetadata
+
+
Return topic configs in the request, not including broker defaults.
+
+
configs(Map<String, String>) - Method in class org.apache.kafka.clients.admin.NewTopic
+
+
Set the configuration to use on the new topic.
+
+
ConfigTransformer - Class in org.apache.kafka.common.config
+
+
This class wraps a set of ConfigProvider instances and uses them to perform + transformations.
+
+
ConfigTransformer(Map<String, ConfigProvider>) - Constructor for class org.apache.kafka.common.config.ConfigTransformer
+
+
Creates a ConfigTransformer with the default pattern, of the form ${provider:[path:]key}.
+
+
ConfigTransformerResult - Class in org.apache.kafka.common.config
+
+
The result of a transformation from ConfigTransformer.
+
+
ConfigTransformerResult(Map<String, String>, Map<String, Long>) - Constructor for class org.apache.kafka.common.config.ConfigTransformerResult
+
+
Creates a new ConfigTransformerResult with the given data and TTL values for a set of paths.
+
+
configurable() - Method in interface org.apache.kafka.connect.rest.ConnectRestExtensionContext
+
+
Provides an implementation of Configurable that can be used to register JAX-RS resources.
+
+
Configurable - Interface in org.apache.kafka.common
+
+
A Mix-in style interface for classes that are instantiated by reflection and need to take configuration parameters
+
+
configure(Map<String, ?>) - Method in class org.apache.kafka.clients.producer.RoundRobinPartitioner
+
 
+
configure(Map<String, ?>) - Method in class org.apache.kafka.common.config.provider.DirectoryConfigProvider
+
 
+
configure(Map<String, ?>) - Method in class org.apache.kafka.common.config.provider.EnvVarConfigProvider
+
 
+
configure(Map<String, ?>) - Method in class org.apache.kafka.common.config.provider.FileConfigProvider
+
 
+
configure(Map<String, ?>) - Method in interface org.apache.kafka.common.Configurable
+
+
Configure this class with the given key-value pairs
+
+
configure(Map<String, ?>) - Method in interface org.apache.kafka.common.MessageFormatter
+
+
Configures the MessageFormatter
+
+
configure(Map<String, ?>) - Method in class org.apache.kafka.common.metrics.JmxReporter
+
 
+
configure(Map<String, ?>) - Method in interface org.apache.kafka.common.security.auth.SecurityProviderCreator
+
+
Configure method is used to configure the generator to create the Security Provider
+
+
configure(Map<String, ?>) - Method in class org.apache.kafka.connect.mirror.DefaultReplicationPolicy
+
 
+
configure(Map<String, ?>) - Method in class org.apache.kafka.connect.mirror.IdentityReplicationPolicy
+
 
+
configure(Map<String, ?>) - Method in class org.apache.kafka.connect.storage.SimpleHeaderConverter
+
 
+
configure(Map<String, ?>) - Method in class org.apache.kafka.connect.storage.StringConverter
+
 
+
configure(Map<String, ?>) - Method in class org.apache.kafka.streams.errors.DefaultProductionExceptionHandler
+
 
+
configure(Map<String, ?>) - Method in class org.apache.kafka.streams.errors.LogAndContinueExceptionHandler
+
 
+
configure(Map<String, ?>) - Method in class org.apache.kafka.streams.errors.LogAndContinueProcessingExceptionHandler
+
 
+
configure(Map<String, ?>) - Method in class org.apache.kafka.streams.errors.LogAndFailExceptionHandler
+
 
+
configure(Map<String, ?>) - Method in class org.apache.kafka.streams.errors.LogAndFailProcessingExceptionHandler
+
 
+
configure(Map<String, ?>) - Method in enum class org.apache.kafka.streams.kstream.Materialized.StoreType
+
 
+
configure(Map<String, ?>) - Method in interface org.apache.kafka.streams.processor.api.ProcessorWrapper
+
 
+
configure(Map<String, ?>) - Method in interface org.apache.kafka.streams.processor.assignment.TaskAssignor
+
 
+
configure(Map<String, ?>) - Method in interface org.apache.kafka.streams.state.DslStoreSuppliers
+
 
+
configure(Map<String, ?>) - Method in interface org.apache.kafka.tools.api.RecordReader
+
 
+
configure(Map<String, ?>, boolean) - Method in interface org.apache.kafka.common.serialization.Deserializer
+
+
Configure this class.
+
+
configure(Map<String, ?>, boolean) - Method in class org.apache.kafka.common.serialization.ListDeserializer
+
 
+
configure(Map<String, ?>, boolean) - Method in class org.apache.kafka.common.serialization.ListSerializer
+
 
+
configure(Map<String, ?>, boolean) - Method in interface org.apache.kafka.common.serialization.Serde
+
+
Configure this class, which will configure the underlying serializer and deserializer.
+
+
configure(Map<String, ?>, boolean) - Method in class org.apache.kafka.common.serialization.Serdes.WrapperSerde
+
 
+
configure(Map<String, ?>, boolean) - Method in interface org.apache.kafka.common.serialization.Serializer
+
+
Configure this class.
+
+
configure(Map<String, ?>, boolean) - Method in class org.apache.kafka.common.serialization.StringDeserializer
+
 
+
configure(Map<String, ?>, boolean) - Method in class org.apache.kafka.common.serialization.StringSerializer
+
 
+
configure(Map<String, ?>, boolean) - Method in class org.apache.kafka.common.serialization.UUIDDeserializer
+
 
+
configure(Map<String, ?>, boolean) - Method in class org.apache.kafka.common.serialization.UUIDSerializer
+
 
+
configure(Map<String, ?>, boolean) - Method in interface org.apache.kafka.connect.storage.Converter
+
+
Configure this class.
+
+
configure(Map<String, ?>, boolean) - Method in class org.apache.kafka.connect.storage.StringConverter
+
 
+
configure(Map<String, ?>, boolean) - Method in class org.apache.kafka.streams.kstream.SessionWindowedDeserializer
+
 
+
configure(Map<String, ?>, boolean) - Method in class org.apache.kafka.streams.kstream.SessionWindowedSerializer
+
 
+
configure(Map<String, ?>, boolean) - Method in class org.apache.kafka.streams.kstream.TimeWindowedDeserializer
+
 
+
configure(Map<String, ?>, boolean) - Method in class org.apache.kafka.streams.kstream.TimeWindowedSerializer
+
 
+
configure(Map<String, ?>, String, List<AppConfigurationEntry>) - Method in interface org.apache.kafka.common.security.auth.AuthenticateCallbackHandler
+
+
Configures this callback handler for the specified SASL mechanism.
+
+
configure(Map<String, ?>, String, List<AppConfigurationEntry>) - Method in class org.apache.kafka.common.security.oauthbearer.BrokerJwtValidator
+
 
+
configure(Map<String, ?>, String, List<AppConfigurationEntry>) - Method in class org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever
+
 
+
configure(Map<String, ?>, String, List<AppConfigurationEntry>) - Method in class org.apache.kafka.common.security.oauthbearer.ClientJwtValidator
+
 
+
configure(Map<String, ?>, String, List<AppConfigurationEntry>) - Method in class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
+
 
+
configure(Map<String, ?>, String, List<AppConfigurationEntry>) - Method in class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
+
 
+
configure(Map<String, ?>, String, List<AppConfigurationEntry>) - Method in class org.apache.kafka.common.security.oauthbearer.FileJwtRetriever
+
 
+
configure(Map<String, ?>, String, List<AppConfigurationEntry>) - Method in class org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever
+
 
+
configure(Map<String, ?>, String, List<AppConfigurationEntry>) - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler
+
 
+
configure(Map<String, ?>, String, List<AppConfigurationEntry>) - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallbackHandler
+
 
+
configure(Map<String, ?>, String, Configuration, AuthenticateCallbackHandler) - Method in interface org.apache.kafka.common.security.auth.Login
+
+
Configures this login instance.
+
+
ConfigValue - Class in org.apache.kafka.common.config
+
 
+
ConfigValue(String) - Constructor for class org.apache.kafka.common.config.ConfigValue
+
 
+
ConfigValue(String, Object, List<Object>, List<String>) - Constructor for class org.apache.kafka.common.config.ConfigValue
+
 
+
configValues() - Method in class org.apache.kafka.common.config.Config
+
 
+
ConnectClusterDetails - Interface in org.apache.kafka.connect.health
+
+
Provides immutable Connect cluster information, such as the ID of the backing Kafka cluster.
+
+
ConnectClusterState - Interface in org.apache.kafka.connect.health
+
+
Provides the ability to lookup connector metadata, including status and configurations, as well + as immutable cluster information such as Kafka cluster ID.
+
+
ConnectedStoreProvider - Interface in org.apache.kafka.streams.processor
+
+
Provides a set of StoreBuilders that will be automatically added to the topology and connected to the + associated processor.
+
+
ConnectException - Exception in org.apache.kafka.connect.errors
+
+
ConnectException is the top-level exception type generated by Kafka Connect and connector implementations.
+
+
ConnectException(String) - Constructor for exception org.apache.kafka.connect.errors.ConnectException
+
 
+
ConnectException(String, Throwable) - Constructor for exception org.apache.kafka.connect.errors.ConnectException
+
 
+
ConnectException(Throwable) - Constructor for exception org.apache.kafka.connect.errors.ConnectException
+
 
+
ConnectHeaders - Class in org.apache.kafka.connect.header
+
+
A basic Headers implementation.
+
+
ConnectHeaders() - Constructor for class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
ConnectHeaders(Iterable<Header>) - Constructor for class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
CONNECTIONS_MAX_IDLE_MS_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
+
connections.max.idle.ms
+
+
CONNECTIONS_MAX_IDLE_MS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
connections.max.idle.ms
+
+
CONNECTIONS_MAX_IDLE_MS_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
connections.max.idle.ms
+
+
CONNECTIONS_MAX_IDLE_MS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
connections.max.idle.ms
+
+
Connector - Class in org.apache.kafka.connect.connector
+
+
+ Connectors manage integration of Kafka Connect with another system, either as an input that ingests + data into Kafka or an output that passes data to an external system.
+
+
Connector() - Constructor for class org.apache.kafka.connect.connector.Connector
+
 
+
CONNECTOR - Enum constant in enum class org.apache.kafka.connect.source.SourceTask.TransactionBoundary
+
+
Transactions will be defined by the connector itself, via a TransactionContext.
+
+
CONNECTOR_FAILURE - Static variable in class org.apache.kafka.connect.tools.MockConnector
+
 
+
connectorClass() - Method in class org.apache.kafka.connect.connector.policy.ConnectorClientConfigRequest
+
+
The class of the Connector.
+
+
ConnectorClientConfigOverridePolicy - Interface in org.apache.kafka.connect.connector.policy
+
+
An interface for enforcing a policy on overriding of Kafka client configs via the connector configs.
+
+
ConnectorClientConfigRequest - Class in org.apache.kafka.connect.connector.policy
+
 
+
ConnectorClientConfigRequest(String, ConnectorType, Class<? extends Connector>, Map<String, Object>, ConnectorClientConfigRequest.ClientType) - Constructor for class org.apache.kafka.connect.connector.policy.ConnectorClientConfigRequest
+
 
+
ConnectorClientConfigRequest.ClientType - Enum Class in org.apache.kafka.connect.connector.policy
+
 
+
connectorConfig(String) - Method in interface org.apache.kafka.connect.health.ConnectClusterState
+
+
Lookup the current configuration of a connector.
+
+
ConnectorContext - Interface in org.apache.kafka.connect.connector
+
+
ConnectorContext allows Connectors to proactively interact with the Kafka Connect runtime.
+
+
connectorHealth(String) - Method in interface org.apache.kafka.connect.health.ConnectClusterState
+
+
Lookup the current health of a connector and its tasks.
+
+
ConnectorHealth - Class in org.apache.kafka.connect.health
+
+
Provides basic health information about the connector and its tasks.
+
+
ConnectorHealth(String, ConnectorState, Map<Integer, TaskState>, ConnectorType) - Constructor for class org.apache.kafka.connect.health.ConnectorHealth
+
 
+
connectorName() - Method in class org.apache.kafka.connect.connector.policy.ConnectorClientConfigRequest
+
+
Name of the connector specified in the connector config.
+
+
connectors() - Method in interface org.apache.kafka.connect.health.ConnectClusterState
+
+
Get the names of the connectors currently deployed in this cluster.
+
+
connectorState() - Method in class org.apache.kafka.connect.health.ConnectorHealth
+
+
Provides the current state of the connector.
+
+
ConnectorState - Class in org.apache.kafka.connect.health
+
+
Describes the status, worker ID, and any errors associated with a connector.
+
+
ConnectorState(String, String, String) - Constructor for class org.apache.kafka.connect.health.ConnectorState
+
+
Provides an instance of the ConnectorState.
+
+
ConnectorTransactionBoundaries - Enum Class in org.apache.kafka.connect.source
+
+
An enum to represent the level of support for connector-defined transaction boundaries.
+
+
connectorType() - Method in class org.apache.kafka.connect.connector.policy.ConnectorClientConfigRequest
+
+
Type of the Connector.
+
+
ConnectorType - Enum Class in org.apache.kafka.connect.health
+
+
Enum definition that identifies the type of the connector.
+
+
ConnectorUtils - Class in org.apache.kafka.connect.util
+
+
Utilities that connector implementations might find useful.
+
+
ConnectorUtils() - Constructor for class org.apache.kafka.connect.util.ConnectorUtils
+
 
+
connectProcessorAndStateStores(String, String...) - Method in class org.apache.kafka.streams.Topology
+
+
Connect a processor to one or more + state stores.
+
+
ConnectRecord<R extends ConnectRecord<R>> - Class in org.apache.kafka.connect.connector
+
+
+ Base class for records containing data to be copied to/from Kafka.
+
+
ConnectRecord(String, Integer, Schema, Object, Schema, Object, Long) - Constructor for class org.apache.kafka.connect.connector.ConnectRecord
+
 
+
ConnectRecord(String, Integer, Schema, Object, Schema, Object, Long, Iterable<Header>) - Constructor for class org.apache.kafka.connect.connector.ConnectRecord
+
 
+
ConnectRestExtension - Interface in org.apache.kafka.connect.rest
+
+
A plugin interface to allow registration of new JAX-RS resources like Filters, REST endpoints, providers, etc.
+
+
ConnectRestExtensionContext - Interface in org.apache.kafka.connect.rest
+
+
The interface provides the ability for ConnectRestExtension implementations to access the JAX-RS + Configurable and cluster state ConnectClusterState.
+
+
ConnectSchema - Class in org.apache.kafka.connect.data
+
 
+
ConnectSchema(Schema.Type) - Constructor for class org.apache.kafka.connect.data.ConnectSchema
+
+
Construct a default schema for a primitive type.
+
+
ConnectSchema(Schema.Type, boolean, Object, String, Integer, String) - Constructor for class org.apache.kafka.connect.data.ConnectSchema
+
+
Construct a Schema for a primitive type, setting schema parameters, struct fields, and key and value schemas to null.
+
+
ConnectSchema(Schema.Type, boolean, Object, String, Integer, String, Map<String, String>, List<Field>, Schema, Schema) - Constructor for class org.apache.kafka.connect.data.ConnectSchema
+
+
Construct a Schema.
+
+
CONSTANT - Enum constant in enum class org.apache.kafka.common.metrics.stats.Percentiles.BucketSizing
+
 
+
ConstantBinScheme(int, double, double) - Constructor for class org.apache.kafka.common.metrics.stats.Histogram.ConstantBinScheme
+
+
Create a bin scheme with the specified number of bins that all have the same width.
+
+
Consumed<K,V> - Class in org.apache.kafka.streams.kstream
+
+
The Consumed class is used to define the optional parameters when using StreamsBuilder to + build instances of KStream, KTable, and GlobalKTable.
+
+
Consumer<K,V> - Interface in org.apache.kafka.clients.consumer
+
 
+
CONSUMER - Enum constant in enum class org.apache.kafka.clients.consumer.GroupProtocol
+
+
Consumer group protocol
+
+
CONSUMER - Enum constant in enum class org.apache.kafka.common.GroupType
+
 
+
CONSUMER - Enum constant in enum class org.apache.kafka.connect.connector.policy.ConnectorClientConfigRequest.ClientType
+
 
+
CONSUMER_CLIENT_PREFIX - Static variable in class org.apache.kafka.connect.mirror.MirrorClientConfig
+
 
+
CONSUMER_GROUP_ID_KEY - Static variable in class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
CONSUMER_PREFIX - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Prefix used to isolate consumer configs from other client configs.
+
+
consumerClientId() - Method in interface org.apache.kafka.streams.ThreadMetadata
+
+
Client ID of the Kafka consumer used by the stream thread.
+
+
consumerClientIds() - Method in interface org.apache.kafka.streams.processor.assignment.KafkaStreamsState
+
 
+
consumerConfig() - Method in class org.apache.kafka.connect.mirror.MirrorClientConfig
+
+
Sub-config for Consumer clients.
+
+
ConsumerConfig - Class in org.apache.kafka.clients.consumer
+
+
The consumer configuration keys
+
+
ConsumerConfig(Map<String, Object>) - Constructor for class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
ConsumerConfig(Properties) - Constructor for class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
ConsumerGroupDescription - Class in org.apache.kafka.clients.admin
+
+
A detailed description of a single consumer group in the cluster.
+
+
ConsumerGroupDescription(String, boolean, Collection<MemberDescription>, String, ConsumerGroupState, Node) - Constructor for class org.apache.kafka.clients.admin.ConsumerGroupDescription
+
+ +
+
ConsumerGroupDescription(String, boolean, Collection<MemberDescription>, String, ConsumerGroupState, Node, Set<AclOperation>) - Constructor for class org.apache.kafka.clients.admin.ConsumerGroupDescription
+
+ +
+
ConsumerGroupDescription(String, boolean, Collection<MemberDescription>, String, GroupType, ConsumerGroupState, Node, Set<AclOperation>) - Constructor for class org.apache.kafka.clients.admin.ConsumerGroupDescription
+
+ +
+
ConsumerGroupDescription(String, boolean, Collection<MemberDescription>, String, GroupType, GroupState, Node, Set<AclOperation>, Optional<Integer>, Optional<Integer>) - Constructor for class org.apache.kafka.clients.admin.ConsumerGroupDescription
+
 
+
consumerGroupId() - Method in class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
ConsumerGroupListing - Class in org.apache.kafka.clients.admin
+
+
Deprecated. + +
+
+
ConsumerGroupListing(String, boolean) - Constructor for class org.apache.kafka.clients.admin.ConsumerGroupListing
+
+
Deprecated.
+
Create an instance with the specified parameters.
+
+
ConsumerGroupListing(String, boolean, Optional<ConsumerGroupState>) - Constructor for class org.apache.kafka.clients.admin.ConsumerGroupListing
+
+
Deprecated. + +
+
+
ConsumerGroupListing(String, boolean, Optional<ConsumerGroupState>, Optional<GroupType>) - Constructor for class org.apache.kafka.clients.admin.ConsumerGroupListing
+
+
Deprecated. + +
+
+
ConsumerGroupListing(String, Optional<GroupState>, boolean) - Constructor for class org.apache.kafka.clients.admin.ConsumerGroupListing
+
+
Deprecated.
+
Create an instance with the specified parameters.
+
+
ConsumerGroupListing(String, Optional<GroupState>, Optional<GroupType>, boolean) - Constructor for class org.apache.kafka.clients.admin.ConsumerGroupListing
+
+
Deprecated.
+
Create an instance with the specified parameters.
+
+
ConsumerGroupMetadata - Class in org.apache.kafka.clients.consumer
+
+
A metadata struct containing the consumer group information.
+
+
ConsumerGroupMetadata(String) - Constructor for class org.apache.kafka.clients.consumer.ConsumerGroupMetadata
+
 
+
ConsumerGroupMetadata(String, int, String, Optional<String>) - Constructor for class org.apache.kafka.clients.consumer.ConsumerGroupMetadata
+
 
+
consumerGroupOffsetsHistory() - Method in class org.apache.kafka.clients.producer.MockProducer
+
+
Get the list of committed consumer group offsets since the last call to MockProducer.clear()
+
+
ConsumerGroupPartitionAssignor - Interface in org.apache.kafka.coordinator.group.api.assignor
+
+
Server-side partition assignor for consumer groups used by the GroupCoordinator.
+
+
ConsumerGroupState - Enum Class in org.apache.kafka.common
+
+
Deprecated. +
Since 4.0. Use GroupState instead.
+
+
+
consumerId() - Method in class org.apache.kafka.clients.admin.MemberDescription
+
+
The consumer id of the group member.
+
+
consumerId() - Method in class org.apache.kafka.clients.admin.ShareMemberDescription
+
+
The consumer id of the group member.
+
+
consumerInstanceIds() - Method in interface org.apache.kafka.streams.ClientInstanceIds
+
+
Returns the client instance id of the consumers.
+
+
ConsumerInterceptor<K,V> - Interface in org.apache.kafka.clients.consumer
+
+
A plugin interface that allows you to intercept (and possibly mutate) records received by the consumer.
+
+
ConsumerPartitionAssignor - Interface in org.apache.kafka.clients.consumer
+
+
This interface is used to define custom partition assignment for use in + KafkaConsumer.
+
+
ConsumerPartitionAssignor.Assignment - Class in org.apache.kafka.clients.consumer
+
 
+
ConsumerPartitionAssignor.GroupAssignment - Class in org.apache.kafka.clients.consumer
+
 
+
ConsumerPartitionAssignor.GroupSubscription - Class in org.apache.kafka.clients.consumer
+
 
+
ConsumerPartitionAssignor.RebalanceProtocol - Enum Class in org.apache.kafka.clients.consumer
+
+
The rebalance protocol defines partition assignment and revocation semantics.
+
+
ConsumerPartitionAssignor.Subscription - Class in org.apache.kafka.clients.consumer
+
 
+
consumerPrefix(String) - Static method in class org.apache.kafka.streams.StreamsConfig
+
+
Prefix a property with StreamsConfig.CONSUMER_PREFIX.
+
+
ConsumerRebalanceListener - Interface in org.apache.kafka.clients.consumer
+
+
A callback interface that the user can implement to trigger custom actions when the set of partitions assigned to the + consumer changes.
+
+
ConsumerRecord<K,V> - Class in org.apache.kafka.clients.consumer
+
+
A key/value pair to be received from Kafka.
+
+
ConsumerRecord(String, int, long, long, TimestampType, int, int, K, V, Headers, Optional<Integer>) - Constructor for class org.apache.kafka.clients.consumer.ConsumerRecord
+
+
Creates a record to be received from a specified topic and partition.
+
+
ConsumerRecord(String, int, long, long, TimestampType, int, int, K, V, Headers, Optional<Integer>, Optional<Short>) - Constructor for class org.apache.kafka.clients.consumer.ConsumerRecord
+
+
Creates a record to be received from a specified topic and partition.
+
+
ConsumerRecord(String, int, long, K, V) - Constructor for class org.apache.kafka.clients.consumer.ConsumerRecord
+
+
Creates a record to be received from a specified topic and partition (provided for + compatibility with Kafka 0.9 before the message format supported timestamps and before + serialized metadata were exposed).
+
+
ConsumerRecords<K,V> - Class in org.apache.kafka.clients.consumer
+
+
A container that holds the list ConsumerRecord per partition for a + particular topic.
+
+
ConsumerRecords(Map<TopicPartition, List<ConsumerRecord<K, V>>>) - Constructor for class org.apache.kafka.clients.consumer.ConsumerRecords
+
+
Deprecated. +
Since 4.0. Use ConsumerRecords(Map, Map) instead.
+
+
+
ConsumerRecords(Map<TopicPartition, List<ConsumerRecord<K, V>>>, Map<TopicPartition, OffsetAndMetadata>) - Constructor for class org.apache.kafka.clients.consumer.ConsumerRecords
+
 
+
contains(Collection<ClientQuotaFilterComponent>) - Static method in class org.apache.kafka.common.quota.ClientQuotaFilter
+
+
Constructs and returns a quota filter that matches all provided components.
+
+
containsMbean(String) - Method in class org.apache.kafka.common.metrics.JmxReporter
+
 
+
containsOnly(Collection<ClientQuotaFilterComponent>) - Static method in class org.apache.kafka.common.quota.ClientQuotaFilter
+
+
Constructs and returns a quota filter that matches all provided components.
+
+
contentType() - Method in interface org.apache.kafka.server.telemetry.ClientTelemetryPayload
+
+
Method returns the content-type format of the metrics data which is being sent by the client.
+
+
contextChange(MetricsContext) - Method in class org.apache.kafka.common.metrics.JmxReporter
+
 
+
contextChange(MetricsContext) - Method in interface org.apache.kafka.common.metrics.MetricsReporter
+
+
Sets the context labels for the service or library exposing metrics.
+
+
contextLabels() - Method in class org.apache.kafka.common.metrics.KafkaMetricsContext
+
 
+
contextLabels() - Method in interface org.apache.kafka.common.metrics.MetricsContext
+
+
Returns the labels for this metrics context.
+
+
ContextualFixedKeyProcessor<KIn,VIn,VOut> - Class in org.apache.kafka.streams.processor.api
+
+
An abstract implementation of FixedKeyProcessor that manages the FixedKeyProcessorContext instance.
+
+
ContextualProcessor<KIn,VIn,KOut,VOut> - Class in org.apache.kafka.streams.processor.api
+
+
An abstract implementation of Processor that manages the ProcessorContext instance.
+
+
CONTINUE - Enum constant in enum class org.apache.kafka.streams.errors.DeserializationExceptionHandler.DeserializationHandlerResponse
+
+
Continue processing.
+
+
CONTINUE - Enum constant in enum class org.apache.kafka.streams.errors.ProcessingExceptionHandler.ProcessingHandlerResponse
+
+
Continue processing.
+
+
CONTINUE - Enum constant in enum class org.apache.kafka.streams.errors.ProductionExceptionHandler.ProductionExceptionHandlerResponse
+
+
Continue processing.
+
+
controller() - Method in class org.apache.kafka.clients.admin.DescribeClusterResult
+
+
Returns a future which yields the current controller node.
+
+
controller() - Method in class org.apache.kafka.common.Cluster
+
 
+
CONTROLLER - Enum constant in enum class org.apache.kafka.clients.admin.EndpointType
+
 
+
CONTROLLER_MUTATION - Enum constant in enum class org.apache.kafka.server.quota.ClientQuotaType
+
 
+
ControllerMovedException - Exception in org.apache.kafka.common.errors
+
 
+
ControllerMovedException(String) - Constructor for exception org.apache.kafka.common.errors.ControllerMovedException
+
 
+
ControllerMovedException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.ControllerMovedException
+
 
+
Converter - Interface in org.apache.kafka.connect.storage
+
+
The Converter interface provides support for translating between Kafka Connect's runtime data format + and byte[].
+
+
ConverterConfig - Class in org.apache.kafka.connect.storage
+
+
Abstract class that defines the configuration options for Converter and HeaderConverter instances.
+
+
ConverterType - Enum Class in org.apache.kafka.connect.storage
+
+
The type of Converter and HeaderConverter.
+
+
convertToBoolean(Schema, Object) - Static method in class org.apache.kafka.connect.data.Values
+
+
Convert the specified value to a Schema.Type.BOOLEAN value.
+
+
convertToByte(Schema, Object) - Static method in class org.apache.kafka.connect.data.Values
+
+
Convert the specified value to an Schema.Type.INT8 byte value.
+
+
convertToDate(Schema, Object) - Static method in class org.apache.kafka.connect.data.Values
+
+
Convert the specified value to a date value.
+
+
convertToDecimal(Schema, Object, int) - Static method in class org.apache.kafka.connect.data.Values
+
+
Convert the specified value to a decimal value.
+
+
convertToDouble(Schema, Object) - Static method in class org.apache.kafka.connect.data.Values
+
+
Convert the specified value to a Schema.Type.FLOAT64 double value.
+
+
convertToFloat(Schema, Object) - Static method in class org.apache.kafka.connect.data.Values
+
+
Convert the specified value to a Schema.Type.FLOAT32 float value.
+
+
convertToInteger(Schema, Object) - Static method in class org.apache.kafka.connect.data.Values
+
+
Convert the specified value to an Schema.Type.INT32 int value.
+
+
convertToList(Schema, Object) - Static method in class org.apache.kafka.connect.data.Values
+
+
Convert the specified value to an Schema.Type.ARRAY value.
+
+
convertToLong(Schema, Object) - Static method in class org.apache.kafka.connect.data.Values
+
+
Convert the specified value to an Schema.Type.INT64 long value.
+
+
convertToMap(Schema, Object) - Static method in class org.apache.kafka.connect.data.Values
+
+
Convert the specified value to a Schema.Type.MAP value.
+
+
convertToShort(Schema, Object) - Static method in class org.apache.kafka.connect.data.Values
+
+
Convert the specified value to an Schema.Type.INT16 short value.
+
+
convertToString(Object, ConfigDef.Type) - Static method in class org.apache.kafka.common.config.ConfigDef
+
 
+
convertToString(Schema, Object) - Static method in class org.apache.kafka.connect.data.Values
+
+
Convert the specified value to a Schema.Type.STRING value.
+
+
convertToStringMapWithPasswordValues(Map<String, ?>) - Static method in class org.apache.kafka.common.config.ConfigDef
+
+
Converts a map of config (key, value) pairs to a map of strings where each value + is converted to a string.
+
+
convertToStruct(Schema, Object) - Static method in class org.apache.kafka.connect.data.Values
+
+
Convert the specified value to a Schema.Type.STRUCT value.
+
+
convertToTime(Schema, Object) - Static method in class org.apache.kafka.connect.data.Values
+
+
Convert the specified value to a time value.
+
+
convertToTimestamp(Schema, Object) - Static method in class org.apache.kafka.connect.data.Values
+
+
Convert the specified value to a timestamp value.
+
+
convertToTimestampedFormat(byte[]) - Static method in interface org.apache.kafka.streams.state.TimestampedBytesStore
+
 
+
COOPERATIVE - Enum constant in enum class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.RebalanceProtocol
+
 
+
COOPERATIVE_STICKY_ASSIGNOR_NAME - Static variable in class org.apache.kafka.clients.consumer.CooperativeStickyAssignor
+
 
+
CooperativeStickyAssignor - Class in org.apache.kafka.clients.consumer
+
+
A cooperative version of the AbstractStickyAssignor.
+
+
CooperativeStickyAssignor() - Constructor for class org.apache.kafka.clients.consumer.CooperativeStickyAssignor
+
 
+
coordinator() - Method in class org.apache.kafka.clients.admin.ClassicGroupDescription
+
+
The classic group coordinator, or null if the coordinator is not known.
+
+
coordinator() - Method in class org.apache.kafka.clients.admin.ConsumerGroupDescription
+
+
The consumer group coordinator, or null if the coordinator is not known.
+
+
coordinator() - Method in class org.apache.kafka.clients.admin.ShareGroupDescription
+
+
The share group coordinator, or null if the coordinator is not known.
+
+
coordinator() - Method in class org.apache.kafka.clients.admin.StreamsGroupDescription
+
+
The group coordinator, or null if the coordinator is not known.
+
+
coordinatorEpoch() - Method in class org.apache.kafka.clients.admin.AbortTransactionSpec
+
 
+
coordinatorEpoch() - Method in class org.apache.kafka.clients.admin.ProducerState
+
 
+
coordinatorId() - Method in class org.apache.kafka.clients.admin.TransactionDescription
+
 
+
CoordinatorLoadInProgressException - Exception in org.apache.kafka.common.errors
+
+
In the context of the group coordinator, the broker returns this error code for any coordinator request if + it is still loading the group metadata (e.g.
+
+
CoordinatorLoadInProgressException(String) - Constructor for exception org.apache.kafka.common.errors.CoordinatorLoadInProgressException
+
 
+
CoordinatorLoadInProgressException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.CoordinatorLoadInProgressException
+
 
+
CoordinatorNotAvailableException - Exception in org.apache.kafka.common.errors
+
+
In the context of the group coordinator, the broker returns this error code for metadata or offset commit + requests if the group metadata topic has not been created yet.
+
+
CoordinatorNotAvailableException(String) - Constructor for exception org.apache.kafka.common.errors.CoordinatorNotAvailableException
+
 
+
CoordinatorNotAvailableException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.CoordinatorNotAvailableException
+
 
+
copy() - Method in class org.apache.kafka.streams.query.Position
+
+
Create a deep copy of the Position.
+
+
COPY_SEGMENT_FINISHED - Enum constant in enum class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState
+
+
This state indicates that the segment copying to remote storage is finished.
+
+
COPY_SEGMENT_STARTED - Enum constant in enum class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState
+
+
This state indicates that the segment copying to remote storage is started but not yet finished.
+
+
copyLogSegmentData(RemoteLogSegmentMetadata, LogSegmentData) - Method in interface org.apache.kafka.server.log.remote.storage.RemoteStorageManager
+
+
Copies the given LogSegmentData provided for the given remoteLogSegmentMetadata.
+
+
correlationId() - Method in interface org.apache.kafka.server.authorizer.AuthorizableRequestContext
+
+
Returns the correlation id from the request header.
+
+
corruptedTasks() - Method in exception org.apache.kafka.streams.errors.TaskCorruptedException
+
 
+
CorruptRecordException - Exception in org.apache.kafka.common.errors
+
+
This exception indicates a record has failed its internal CRC check, this generally indicates network or disk + corruption.
+
+
CorruptRecordException() - Constructor for exception org.apache.kafka.common.errors.CorruptRecordException
+
 
+
CorruptRecordException(String) - Constructor for exception org.apache.kafka.common.errors.CorruptRecordException
+
 
+
CorruptRecordException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.CorruptRecordException
+
 
+
CorruptRecordException(Throwable) - Constructor for exception org.apache.kafka.common.errors.CorruptRecordException
+
 
+
count() - Method in class org.apache.kafka.clients.consumer.ConsumerRecords
+
+
The number of records for all topics
+
+
count() - Method in interface org.apache.kafka.streams.kstream.KGroupedStream
+
+
Count the number of records in this stream by the grouped key.
+
+
count() - Method in interface org.apache.kafka.streams.kstream.KGroupedTable
+
+
Count number of records of the original KTable that got mapped to + the same key into a new instance of KTable.
+
+
count() - Method in interface org.apache.kafka.streams.kstream.SessionWindowedKStream
+
+
Count the number of records in this stream by the grouped key and defined sessions.
+
+
count() - Method in interface org.apache.kafka.streams.kstream.TimeWindowedKStream
+
+
Count the number of records in this stream by the grouped key and defined windows.
+
+
count(Materialized<K, Long, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KGroupedStream
+
+
Count the number of records in this stream by the grouped key.
+
+
count(Materialized<K, Long, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KGroupedTable
+
+
Count number of records of the original KTable that got mapped to + the same key into a new instance of KTable.
+
+
count(Materialized<K, Long, SessionStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.SessionWindowedKStream
+
+
Count the number of records in this stream by the grouped key and defined sessions.
+
+
count(Materialized<K, Long, WindowStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.TimeWindowedKStream
+
+
Count the number of records in this stream by the grouped key and defined windows.
+
+
count(Named) - Method in interface org.apache.kafka.streams.kstream.KGroupedStream
+
+
Count the number of records in this stream by the grouped key.
+
+
count(Named) - Method in interface org.apache.kafka.streams.kstream.KGroupedTable
+
+
Count number of records of the original KTable that got mapped to + the same key into a new instance of KTable.
+
+
count(Named) - Method in interface org.apache.kafka.streams.kstream.SessionWindowedKStream
+
+
Count the number of records in this stream by the grouped key and defined sessions.
+
+
count(Named) - Method in interface org.apache.kafka.streams.kstream.TimeWindowedKStream
+
+
Count the number of records in this stream by the grouped key and defined windows.
+
+
count(Named, Materialized<K, Long, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KGroupedStream
+
+
Count the number of records in this stream by the grouped key.
+
+
count(Named, Materialized<K, Long, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KGroupedTable
+
+
Count number of records of the original KTable that got mapped to + the same key into a new instance of KTable.
+
+
count(Named, Materialized<K, Long, SessionStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.SessionWindowedKStream
+
+
Count the number of records in this stream by the grouped key and defined sessions.
+
+
count(Named, Materialized<K, Long, WindowStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.TimeWindowedKStream
+
+
Count the number of records in this stream by the grouped key and defined windows.
+
+
counts() - Method in class org.apache.kafka.common.metrics.stats.Histogram
+
 
+
create(Map<String, Object>) - Static method in interface org.apache.kafka.clients.admin.Admin
+
+
Create a new Admin with the given configuration.
+
+
create(Map<String, Object>) - Static method in class org.apache.kafka.clients.admin.AdminClient
+
+
Create a new Admin with the given configuration.
+
+
create(Properties) - Static method in interface org.apache.kafka.clients.admin.Admin
+
+
Create a new Admin with the given configuration.
+
+
create(Properties) - Static method in class org.apache.kafka.clients.admin.AdminClient
+
+
Create a new Admin with the given configuration.
+
+
create(Record<KIn, VIn>) - Static method in class org.apache.kafka.streams.processor.api.InternalFixedKeyRecordFactory
+
+
Only allowed way to create FixedKeyRecords.
+
+
create(StateStoreProvider, String) - Method in interface org.apache.kafka.streams.state.QueryableStoreType
+
+
Create an instance of T (usually a facade) that developers can use + to query the underlying StateStores.
+
+
create(StateStoreProvider, String) - Method in class org.apache.kafka.streams.state.QueryableStoreTypes.KeyValueStoreType
+
 
+
create(StateStoreProvider, String) - Method in class org.apache.kafka.streams.state.QueryableStoreTypes.SessionStoreType
+
 
+
create(StateStoreProvider, String) - Method in class org.apache.kafka.streams.state.QueryableStoreTypes.WindowStoreType
+
 
+
CREATE - Enum constant in enum class org.apache.kafka.common.acl.AclOperation
+
+
CREATE operation.
+
+
CREATE_TOKENS - Enum constant in enum class org.apache.kafka.common.acl.AclOperation
+
+
CREATE_TOKENS operation.
+
+
createAcls(Collection<AclBinding>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
This is a convenience method for Admin.createAcls(Collection, CreateAclsOptions) with + default options.
+
+
createAcls(Collection<AclBinding>, CreateAclsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Creates access control lists (ACLs) which are bound to specific resources.
+
+
createAcls(Collection<AclBinding>, CreateAclsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
createAcls(Collection<AclBinding>, CreateAclsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
createAcls(AuthorizableRequestContext, List<AclBinding>) - Method in interface org.apache.kafka.server.authorizer.Authorizer
+
+
Creates new ACL bindings.
+
+
CreateAclsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
CreateAclsOptions() - Constructor for class org.apache.kafka.clients.admin.CreateAclsOptions
+
 
+
CreateAclsResult - Class in org.apache.kafka.clients.admin
+
+
The result of the Admin.createAcls(Collection) call.
+
+
createClientSslEngine(String, int, String) - Method in interface org.apache.kafka.common.security.auth.SslEngineFactory
+
+
Creates a new SSLEngine object to be used by the client.
+
+
CREATED - Enum constant in enum class org.apache.kafka.streams.KafkaStreams.State
+
 
+
createDelegationToken() - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Create a Delegation Token.
+
+
createDelegationToken(CreateDelegationTokenOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Create a Delegation Token.
+
+
createDelegationToken(CreateDelegationTokenOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
createDelegationToken(CreateDelegationTokenOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
CreateDelegationTokenOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
CreateDelegationTokenOptions() - Constructor for class org.apache.kafka.clients.admin.CreateDelegationTokenOptions
+
 
+
CreateDelegationTokenResult - Class in org.apache.kafka.clients.admin
+
+ +
+
createInputTopic(String, Serializer<K>, Serializer<V>) - Method in class org.apache.kafka.streams.TopologyTestDriver
+
+
Create TestInputTopic to be used for piping records to topic + Uses current system time as start timestamp for records.
+
+
createInputTopic(String, Serializer<K>, Serializer<V>, Instant, Duration) - Method in class org.apache.kafka.streams.TopologyTestDriver
+
+
Create TestInputTopic to be used for piping records to topic + Uses provided start timestamp and autoAdvance parameter for records
+
+
createOutputTopic(String, Deserializer<K>, Deserializer<V>) - Method in class org.apache.kafka.streams.TopologyTestDriver
+
+
Create TestOutputTopic to be used for reading records from topic
+
+
createPartitions(Map<String, NewPartitions>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Increase the number of partitions of the topics given as the keys of newPartitions + according to the corresponding values.
+
+
createPartitions(Map<String, NewPartitions>, CreatePartitionsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Increase the number of partitions of the topics given as the keys of newPartitions + according to the corresponding values.
+
+
createPartitions(Map<String, NewPartitions>, CreatePartitionsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
createPartitions(Map<String, NewPartitions>, CreatePartitionsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
CreatePartitionsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
CreatePartitionsOptions() - Constructor for class org.apache.kafka.clients.admin.CreatePartitionsOptions
+
 
+
CreatePartitionsResult - Class in org.apache.kafka.clients.admin
+
+
The result of the Admin.createPartitions(Map) call.
+
+
createServerSslEngine(String, int) - Method in interface org.apache.kafka.common.security.auth.SslEngineFactory
+
+
Creates a new SSLEngine object to be used by the server.
+
+
CreateTopicPolicy - Interface in org.apache.kafka.server.policy
+
+
An interface for enforcing a policy on create topics requests.
+
+
CreateTopicPolicy.RequestMetadata - Class in org.apache.kafka.server.policy
+
+
Class containing the create request parameters.
+
+
createTopics(Collection<NewTopic>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Create a batch of new topics with the default options.
+
+
createTopics(Collection<NewTopic>, CreateTopicsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Create a batch of new topics.
+
+
createTopics(Collection<NewTopic>, CreateTopicsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
createTopics(Collection<NewTopic>, CreateTopicsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
CreateTopicsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
CreateTopicsOptions() - Constructor for class org.apache.kafka.clients.admin.CreateTopicsOptions
+
 
+
CreateTopicsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
CreateTopicsResult.TopicMetadataAndConfig - Class in org.apache.kafka.clients.admin
+
 
+
createWithUpdates(RemoteLogSegmentMetadataUpdate) - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata
+
+
Creates a new RemoteLogSegmentMetadata applying the given rlsmUpdate on this instance.
+
+
credentialInfo() - Method in class org.apache.kafka.clients.admin.UserScramCredentialUpsertion
+
 
+
credentialInfos() - Method in class org.apache.kafka.clients.admin.UserScramCredentialsDescription
+
 
+
CumulativeCount - Class in org.apache.kafka.common.metrics.stats
+
+
A non-sampled version of WindowedCount maintained over all time.
+
+
CumulativeCount() - Constructor for class org.apache.kafka.common.metrics.stats.CumulativeCount
+
 
+
CumulativeSum - Class in org.apache.kafka.common.metrics.stats
+
+
An non-sampled cumulative total maintained over all time.
+
+
CumulativeSum() - Constructor for class org.apache.kafka.common.metrics.stats.CumulativeSum
+
 
+
CumulativeSum(double) - Constructor for class org.apache.kafka.common.metrics.stats.CumulativeSum
+
 
+
current(long) - Method in class org.apache.kafka.common.metrics.stats.SampledStat
+
 
+
currentLag(TopicPartition) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
currentLag(TopicPartition) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Get the consumer's current lag on the partition.
+
+
currentLag(TopicPartition) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
currentOffsetPosition() - Method in class org.apache.kafka.streams.LagInfo
+
+
Get the current maximum offset on the store partition's changelog topic, that has been successfully written into + the store partition's state store.
+
+
currentStreamTimeMs() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
 
+
currentStreamTimeMs() - Method in interface org.apache.kafka.streams.processor.api.ProcessingContext
+
+
Return the current stream-time in milliseconds.
+
+
currentStreamTimeMs() - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
currentStreamTimeMs() - Method in interface org.apache.kafka.streams.processor.ProcessorContext
+
+
Return the current stream-time in milliseconds.
+
+
currentSystemTimeMs() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
 
+
currentSystemTimeMs() - Method in interface org.apache.kafka.streams.processor.api.ProcessingContext
+
+
Return the current system timestamp (also called wall-clock time) in milliseconds.
+
+
currentSystemTimeMs() - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
currentSystemTimeMs() - Method in interface org.apache.kafka.streams.processor.ProcessorContext
+
+
Return the current system timestamp (also called wall-clock time) in milliseconds.
+
+
currentTransactionStartOffset() - Method in class org.apache.kafka.clients.admin.ProducerState
+
 
+
customMetadata() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata
+
 
+
customMetadata() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate
+
 
+
CustomMetadata(byte[]) - Constructor for class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata.CustomMetadata
+
 
+
+

D

+
+
data() - Method in class org.apache.kafka.common.config.ConfigData
+
+
Returns the data.
+
+
data() - Method in class org.apache.kafka.common.config.ConfigTransformerResult
+
+
Returns the transformed data, with variables replaced with corresponding values from the + ConfigProvider instances if found.
+
+
data() - Method in interface org.apache.kafka.server.telemetry.ClientTelemetryPayload
+
+
Method returns the serialized metrics data as received by the client.
+
+
DataException - Exception in org.apache.kafka.connect.errors
+
+
Base class for all Kafka Connect data API exceptions.
+
+
DataException(String) - Constructor for exception org.apache.kafka.connect.errors.DataException
+
 
+
DataException(String, Throwable) - Constructor for exception org.apache.kafka.connect.errors.DataException
+
 
+
DataException(Throwable) - Constructor for exception org.apache.kafka.connect.errors.DataException
+
 
+
Date - Class in org.apache.kafka.connect.data
+
+
+ A date representing a calendar day with no time of day or timezone.
+
+
Date() - Constructor for class org.apache.kafka.connect.data.Date
+
 
+
dateFormatFor(Date) - Static method in class org.apache.kafka.connect.data.Values
+
 
+
DEAD - Enum constant in enum class org.apache.kafka.common.ClassicGroupState
+
 
+
DEAD - Enum constant in enum class org.apache.kafka.common.ConsumerGroupState
+
+
Deprecated.
+
DEAD - Enum constant in enum class org.apache.kafka.common.GroupState
+
 
+
DEBUG - Enum constant in enum class org.apache.kafka.common.metrics.Sensor.RecordingLevel
+
 
+
DEBUG_LOG_LEVEL - Static variable in class org.apache.kafka.common.config.LogLevelConfig
+
+
The DEBUG level designates fine-grained + informational events that are most useful to debug Kafka
+
+
Decimal - Class in org.apache.kafka.connect.data
+
+
+ An arbitrary-precision signed decimal number.
+
+
Decimal() - Constructor for class org.apache.kafka.connect.data.Decimal
+
 
+
Decoder<T> - Interface in org.apache.kafka.tools.api
+
+
A decoder is a method of turning byte arrays into objects.
+
+
DEFAULT - Enum constant in enum class org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation
+
 
+
DEFAULT - Static variable in enum class org.apache.kafka.connect.source.SourceTask.TransactionBoundary
+
+
The default transaction boundary style that will be used for source connectors when no style is explicitly + configured.
+
+
DEFAULT_ALLOW_AUTO_CREATE_TOPICS - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
DEFAULT_API_TIMEOUT_MS_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
DEFAULT_API_TIMEOUT_MS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
default.api.timeout.ms
+
+
DEFAULT_CLIENT_ID - Enum constant in enum class org.apache.kafka.server.quota.ClientQuotaEntity.ConfigEntityType
+
 
+
DEFAULT_CLIENT_RACK - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
DEFAULT_CLIENT_SUPPLIER_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
default.client.supplier
+
+
DEFAULT_CLIENT_SUPPLIER_DOC - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated.
+
+
DEFAULT_CONFIG - Enum constant in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigSource
+
 
+
DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+ +
+
DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_DOC - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated.
+
+
DEFAULT_DSL_STORE - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated.
+
+
DEFAULT_DSL_STORE_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated. + +
+
+
DEFAULT_DSL_STORE_DOC - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated.
+
+
DEFAULT_EXCLUDE - Static variable in class org.apache.kafka.common.metrics.JmxReporter
+
 
+
DEFAULT_EXCLUDE_INTERNAL_TOPICS - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
DEFAULT_FAILURE_DELAY_MS - Static variable in class org.apache.kafka.connect.tools.MockConnector
+
 
+
DEFAULT_FETCH_MAX_BYTES - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
DEFAULT_FETCH_MAX_WAIT_MS - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
DEFAULT_FETCH_MIN_BYTES - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
DEFAULT_GROUP_PROTOCOL - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
DEFAULT_GROUP_PROTOCOL - Static variable in class org.apache.kafka.streams.StreamsConfig
+
 
+
DEFAULT_GROUP_REMOTE_ASSIGNOR - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
DEFAULT_INCLUDE - Static variable in class org.apache.kafka.common.metrics.JmxReporter
+
 
+
DEFAULT_ISOLATION_LEVEL - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
DEFAULT_KERBEROS_KINIT_CMD - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_KERBEROS_MIN_TIME_BEFORE_RELOGIN - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_KERBEROS_TICKET_RENEW_JITTER - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_KERBEROS_TICKET_RENEW_WINDOW_FACTOR - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_KEY_SERDE_CLASS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
default key.serde
+
+
DEFAULT_LOGIN_REFRESH_BUFFER_SECONDS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_LOGIN_REFRESH_MIN_PERIOD_SECONDS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_LOGIN_REFRESH_WINDOW_FACTOR - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_LOGIN_REFRESH_WINDOW_JITTER - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_MAX_PARTITION_FETCH_BYTES - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
DEFAULT_MAX_POLL_RECORDS - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
DEFAULT_METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
DEFAULT_METADATA_RECOVERY_STRATEGY - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
DEFAULT_NUM_SAMPLES - Static variable in class org.apache.kafka.common.metrics.MetricConfig
+
 
+
DEFAULT_PATTERN - Static variable in class org.apache.kafka.common.config.ConfigTransformer
+
 
+
DEFAULT_PRODUCTION_EXCEPTION_HANDLER_CLASS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated. + +
+
+
DEFAULT_SASL_LOGIN_RETRY_BACKOFF_MAX_MS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_SASL_LOGIN_RETRY_BACKOFF_MS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_SASL_MECHANISM - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_SASL_OAUTHBEARER_ASSERTION_ALGORITHM - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_SASL_OAUTHBEARER_HEADER_URLENCODE - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_SASL_OAUTHBEARER_SCOPE_CLAIM_NAME - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_SASL_OAUTHBEARER_SUB_CLAIM_NAME - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
DEFAULT_SECURITY_PROTOCOL - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
DEFAULT_SSL_ENABLED_PROTOCOLS - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
DEFAULT_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
DEFAULT_SSL_KEYMANGER_ALGORITHM - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
DEFAULT_SSL_KEYSTORE_TYPE - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
DEFAULT_SSL_PROTOCOL - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
DEFAULT_SSL_TRUSTMANAGER_ALGORITHM - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
DEFAULT_SSL_TRUSTSTORE_TYPE - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
DEFAULT_STICKY_NON_OVERLAP_COST - Static variable in class org.apache.kafka.streams.processor.assignment.assignors.StickyTaskAssignor
+
 
+
DEFAULT_STICKY_TRAFFIC_COST - Static variable in class org.apache.kafka.streams.processor.assignment.assignors.StickyTaskAssignor
+
 
+
DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
default.timestamp.extractor
+
+
DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_DOC - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated.
+
+
DEFAULT_USER - Enum constant in enum class org.apache.kafka.server.quota.ClientQuotaEntity.ConfigEntityType
+
 
+
DEFAULT_VALUE_SERDE_CLASS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
default.value.serde
+
+
defaultBranch() - Method in interface org.apache.kafka.streams.kstream.BranchedKStream
+
+
Finalize the construction of branches and defines the default branch for the messages not intercepted + by other branches.
+
+
defaultBranch(Branched<K, V>) - Method in interface org.apache.kafka.streams.kstream.BranchedKStream
+
+
Finalize the construction of branches and defines the default branch for the messages not intercepted + by other branches.
+
+
DefaultDecoder - Class in org.apache.kafka.tools.api
+
+
The default implementation does nothing, just returns the same byte array it takes in.
+
+
DefaultDecoder() - Constructor for class org.apache.kafka.tools.api.DefaultDecoder
+
 
+
defaultDeserializationExceptionHandler() - Method in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated. + +
+
+
DefaultJwtRetriever - Class in org.apache.kafka.common.security.oauthbearer
+
+
DefaultJwtRetriever instantiates and delegates JwtRetriever API calls to an embedded implementation + based on configuration: + + + + If the value of sasl.oauthbearer.token.endpoint.url is set to a value that starts with the + file protocol (e.g.
+
+
DefaultJwtRetriever() - Constructor for class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
+
 
+
DefaultJwtValidator - Class in org.apache.kafka.common.security.oauthbearer
+
+
This JwtValidator uses the delegation approach, instantiating and delegating calls to a + more concrete implementation.
+
+
DefaultJwtValidator() - Constructor for class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
+
 
+
DefaultJwtValidator(CloseableVerificationKeyResolver) - Constructor for class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
+
 
+
defaultKeySerde() - Method in class org.apache.kafka.streams.StreamsConfig
+
+
Return an configured instance of key Serde + class.
+
+
defaultProductionExceptionHandler() - Method in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated. + +
+
+
DefaultProductionExceptionHandler - Class in org.apache.kafka.streams.errors
+
+
ProductionExceptionHandler that always instructs streams to fail when an exception + happens while attempting to produce result records.
+
+
DefaultProductionExceptionHandler() - Constructor for class org.apache.kafka.streams.errors.DefaultProductionExceptionHandler
+
 
+
DefaultReplicationPolicy - Class in org.apache.kafka.connect.mirror
+
+
Default implementation of ReplicationPolicy which prepends the source cluster alias to + remote topic names.
+
+
DefaultReplicationPolicy() - Constructor for class org.apache.kafka.connect.mirror.DefaultReplicationPolicy
+
 
+
defaultStandbyTaskAssignment(ApplicationState, Map<ProcessId, KafkaStreamsAssignment>) - Static method in class org.apache.kafka.streams.processor.assignment.TaskAssignmentUtils
+
+
Assign standby tasks to KafkaStreams clients according to the default logic.
+
+
defaultTimestampExtractor() - Method in class org.apache.kafka.streams.StreamsConfig
+
 
+
defaultValue - Variable in class org.apache.kafka.common.config.ConfigDef.ConfigKey
+
 
+
defaultValue() - Method in class org.apache.kafka.connect.data.ConnectSchema
+
 
+
defaultValue() - Method in interface org.apache.kafka.connect.data.Schema
+
 
+
defaultValue() - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
defaultValue(Object) - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
+
Set the default value for this schema.
+
+
defaultValues() - Method in class org.apache.kafka.common.config.ConfigDef
+
 
+
defaultValueSerde() - Method in class org.apache.kafka.streams.StreamsConfig
+
+
Return an configured instance of value + Serde class.
+
+
define(String, ConfigDef.Type, Object, ConfigDef.Importance, String) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Define a new configuration with no special validation logic
+
+
define(String, ConfigDef.Type, Object, ConfigDef.Importance, String, String) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Define a new configuration with no special validation logic
+
+
define(String, ConfigDef.Type, Object, ConfigDef.Importance, String, String, int, ConfigDef.Width, String) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Define a new configuration with no special validation logic, not dependents and no custom recommender
+
+
define(String, ConfigDef.Type, Object, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, List<String>) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Define a new configuration with no special validation logic and no custom recommender
+
+
define(String, ConfigDef.Type, Object, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, List<String>, ConfigDef.Recommender) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Define a new configuration with no special validation logic
+
+
define(String, ConfigDef.Type, Object, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, ConfigDef.Recommender) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Define a new configuration with no special validation logic and no custom recommender
+
+
define(String, ConfigDef.Type, Object, ConfigDef.Validator, ConfigDef.Importance, String) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Define a new configuration with no group, no order in group, no width, no display name, no dependents and no custom recommender
+
+
define(String, ConfigDef.Type, Object, ConfigDef.Validator, ConfigDef.Importance, String, String, int, ConfigDef.Width, String) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Define a new configuration with no dependents and no custom recommender
+
+
define(String, ConfigDef.Type, Object, ConfigDef.Validator, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, List<String>) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Define a new configuration with no custom recommender
+
+
define(String, ConfigDef.Type, Object, ConfigDef.Validator, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, List<String>, ConfigDef.Recommender) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Define a new configuration
+
+
define(String, ConfigDef.Type, Object, ConfigDef.Validator, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, List<String>, ConfigDef.Recommender, String) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Define a new configuration
+
+
define(String, ConfigDef.Type, Object, ConfigDef.Validator, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, ConfigDef.Recommender) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Define a new configuration with no dependents
+
+
define(String, ConfigDef.Type, ConfigDef.Importance, String) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Define a new configuration with no default value and no special validation logic
+
+
define(String, ConfigDef.Type, ConfigDef.Importance, String, String, int, ConfigDef.Width, String) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Define a new configuration with no default value, no special validation logic, no dependents and no custom recommender
+
+
define(String, ConfigDef.Type, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, List<String>) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Define a new configuration with no default value, no special validation logic and no custom recommender
+
+
define(String, ConfigDef.Type, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, List<String>, ConfigDef.Recommender) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Define a new configuration with no default value and no special validation logic
+
+
define(String, ConfigDef.Type, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, ConfigDef.Recommender) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Define a new configuration with no default value, no special validation logic and no custom recommender
+
+
define(ConfigDef.ConfigKey) - Method in class org.apache.kafka.common.config.ConfigDef
+
 
+
defineInternal(String, ConfigDef.Type, Object, ConfigDef.Importance) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Define a new internal configuration.
+
+
defineInternal(String, ConfigDef.Type, Object, ConfigDef.Validator, ConfigDef.Importance, String) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Define a new internal configuration.
+
+
DELAY_MS_KEY - Static variable in class org.apache.kafka.connect.tools.MockConnector
+
 
+
DELEGATION_TOKEN - Enum constant in enum class org.apache.kafka.common.resource.ResourceType
+
+
A token ID.
+
+
delegationToken() - Method in class org.apache.kafka.clients.admin.CreateDelegationTokenResult
+
+
Returns a future which yields a delegation token
+
+
DelegationToken - Class in org.apache.kafka.common.security.token.delegation
+
+
A class representing a delegation token.
+
+
DelegationToken(TokenInformation, byte[]) - Constructor for class org.apache.kafka.common.security.token.delegation.DelegationToken
+
 
+
DelegationTokenAuthorizationException - Exception in org.apache.kafka.common.errors
+
 
+
DelegationTokenAuthorizationException(String) - Constructor for exception org.apache.kafka.common.errors.DelegationTokenAuthorizationException
+
 
+
DelegationTokenAuthorizationException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.DelegationTokenAuthorizationException
+
 
+
DelegationTokenDisabledException - Exception in org.apache.kafka.common.errors
+
 
+
DelegationTokenDisabledException(String) - Constructor for exception org.apache.kafka.common.errors.DelegationTokenDisabledException
+
 
+
DelegationTokenDisabledException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.DelegationTokenDisabledException
+
 
+
DelegationTokenExpiredException - Exception in org.apache.kafka.common.errors
+
 
+
DelegationTokenExpiredException(String) - Constructor for exception org.apache.kafka.common.errors.DelegationTokenExpiredException
+
 
+
DelegationTokenExpiredException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.DelegationTokenExpiredException
+
 
+
DelegationTokenNotFoundException - Exception in org.apache.kafka.common.errors
+
 
+
DelegationTokenNotFoundException(String) - Constructor for exception org.apache.kafka.common.errors.DelegationTokenNotFoundException
+
 
+
DelegationTokenNotFoundException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.DelegationTokenNotFoundException
+
 
+
DelegationTokenOwnerMismatchException - Exception in org.apache.kafka.common.errors
+
 
+
DelegationTokenOwnerMismatchException(String) - Constructor for exception org.apache.kafka.common.errors.DelegationTokenOwnerMismatchException
+
 
+
DelegationTokenOwnerMismatchException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.DelegationTokenOwnerMismatchException
+
 
+
delegationTokens() - Method in class org.apache.kafka.clients.admin.DescribeDelegationTokenResult
+
+
Returns a future which yields list of delegation tokens
+
+
delete(K) - Method in interface org.apache.kafka.streams.state.KeyValueStore
+
+
Delete the value from the store (if there is one).
+
+
delete(K, long) - Method in interface org.apache.kafka.streams.state.VersionedKeyValueStore
+
+
Delete the value associated with this key from the store, at the specified timestamp + (if there is such a value), and return the deleted value.
+
+
delete(Bytes, long) - Method in interface org.apache.kafka.streams.state.VersionedBytesStore
+
+ +
+
DELETE - Enum constant in enum class org.apache.kafka.clients.admin.AlterConfigOp.OpType
+
+
Revert the configuration entry to the default value (possibly null).
+
+
DELETE - Enum constant in enum class org.apache.kafka.common.acl.AclOperation
+
+
DELETE operation.
+
+
DELETE_PARTITION_FINISHED - Enum constant in enum class org.apache.kafka.server.log.remote.storage.RemotePartitionDeleteState
+
+
This state indicates that the partition is deleted successfully.
+
+
DELETE_PARTITION_MARKED - Enum constant in enum class org.apache.kafka.server.log.remote.storage.RemotePartitionDeleteState
+
+
This is used when a topic/partition is marked for delete by the controller.
+
+
DELETE_PARTITION_STARTED - Enum constant in enum class org.apache.kafka.server.log.remote.storage.RemotePartitionDeleteState
+
+
This state indicates that the partition deletion is started but not yet finished.
+
+
DELETE_RETENTION_MS_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
DELETE_RETENTION_MS_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
DELETE_SEGMENT_FINISHED - Enum constant in enum class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState
+
+
This state indicates that the segment is deleted successfully.
+
+
DELETE_SEGMENT_STARTED - Enum constant in enum class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState
+
+
This state indicates that the segment deletion is started but not yet finished.
+
+
deleteAcls(Collection<AclBindingFilter>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
This is a convenience method for Admin.deleteAcls(Collection, DeleteAclsOptions) with default options.
+
+
deleteAcls(Collection<AclBindingFilter>, DeleteAclsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Deletes access control lists (ACLs) according to the supplied filters.
+
+
deleteAcls(Collection<AclBindingFilter>, DeleteAclsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
deleteAcls(Collection<AclBindingFilter>, DeleteAclsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
deleteAcls(AuthorizableRequestContext, List<AclBindingFilter>) - Method in interface org.apache.kafka.server.authorizer.Authorizer
+
+
Deletes all ACL bindings that match the provided filters.
+
+
DeleteAclsOptions - Class in org.apache.kafka.clients.admin
+
+
Options for the Admin.deleteAcls(Collection) call.
+
+
DeleteAclsOptions() - Constructor for class org.apache.kafka.clients.admin.DeleteAclsOptions
+
 
+
DeleteAclsResult - Class in org.apache.kafka.clients.admin
+
+
The result of the Admin.deleteAcls(Collection) call.
+
+
DeleteAclsResult.FilterResult - Class in org.apache.kafka.clients.admin
+
+
A class containing either the deleted ACL binding or an exception if the delete failed.
+
+
DeleteAclsResult.FilterResults - Class in org.apache.kafka.clients.admin
+
+
A class containing the results of the delete ACLs operation.
+
+
deleteConsumerGroupOffsets(String, Set<TopicPartition>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Delete committed offsets for a set of partitions in a consumer group with the default + options.
+
+
deleteConsumerGroupOffsets(String, Set<TopicPartition>, DeleteConsumerGroupOffsetsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Delete committed offsets for a set of partitions in a consumer group.
+
+
deleteConsumerGroupOffsets(String, Set<TopicPartition>, DeleteConsumerGroupOffsetsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
deleteConsumerGroupOffsets(String, Set<TopicPartition>, DeleteConsumerGroupOffsetsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DeleteConsumerGroupOffsetsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DeleteConsumerGroupOffsetsOptions() - Constructor for class org.apache.kafka.clients.admin.DeleteConsumerGroupOffsetsOptions
+
 
+
DeleteConsumerGroupOffsetsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
deleteConsumerGroups(Collection<String>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Delete consumer groups from the cluster with the default options.
+
+
deleteConsumerGroups(Collection<String>, DeleteConsumerGroupsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Delete consumer groups from the cluster.
+
+
deleteConsumerGroups(Collection<String>, DeleteConsumerGroupsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
deleteConsumerGroups(Collection<String>, DeleteConsumerGroupsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DeleteConsumerGroupsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DeleteConsumerGroupsOptions() - Constructor for class org.apache.kafka.clients.admin.DeleteConsumerGroupsOptions
+
 
+
DeleteConsumerGroupsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
deletedGroups() - Method in class org.apache.kafka.clients.admin.DeleteConsumerGroupsResult
+
+
Return a map from group id to futures which can be used to check the status of + individual deletions.
+
+
deletedGroups() - Method in class org.apache.kafka.clients.admin.DeleteShareGroupsResult
+
+
Return a map from group id to futures which can be used to check the status of + individual deletions.
+
+
deletedGroups() - Method in class org.apache.kafka.clients.admin.DeleteStreamsGroupsResult
+
+
Return a map from group id to futures which can be used to check the status of individual deletions.
+
+
DeletedRecords - Class in org.apache.kafka.clients.admin
+
+
Represents information about deleted records
+
+
DeletedRecords(long) - Constructor for class org.apache.kafka.clients.admin.DeletedRecords
+
+
Create an instance of this class with the provided parameters.
+
+
deleteLogSegmentData(RemoteLogSegmentMetadata) - Method in interface org.apache.kafka.server.log.remote.storage.RemoteStorageManager
+
+
Deletes the resources associated with the given remoteLogSegmentMetadata.
+
+
deleteRecords(Map<TopicPartition, RecordsToDelete>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Delete records whose offset is smaller than the given offset of the corresponding partition.
+
+
deleteRecords(Map<TopicPartition, RecordsToDelete>, DeleteRecordsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Delete records whose offset is smaller than the given offset of the corresponding partition.
+
+
deleteRecords(Map<TopicPartition, RecordsToDelete>, DeleteRecordsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
deleteRecords(Map<TopicPartition, RecordsToDelete>, DeleteRecordsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DeleteRecordsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DeleteRecordsOptions() - Constructor for class org.apache.kafka.clients.admin.DeleteRecordsOptions
+
 
+
DeleteRecordsResult - Class in org.apache.kafka.clients.admin
+
+
The result of the Admin.deleteRecords(Map) call.
+
+
DeleteRecordsResult(Map<TopicPartition, KafkaFuture<DeletedRecords>>) - Constructor for class org.apache.kafka.clients.admin.DeleteRecordsResult
+
 
+
deleteShareGroupOffsets(String, Set<String>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Delete offsets for a set of topics in a share group with the default options.
+
+
deleteShareGroupOffsets(String, Set<String>, DeleteShareGroupOffsetsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Delete offsets for a set of topics in a share group.
+
+
deleteShareGroupOffsets(String, Set<String>, DeleteShareGroupOffsetsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
deleteShareGroupOffsets(String, Set<String>, DeleteShareGroupOffsetsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DeleteShareGroupOffsetsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DeleteShareGroupOffsetsOptions() - Constructor for class org.apache.kafka.clients.admin.DeleteShareGroupOffsetsOptions
+
 
+
DeleteShareGroupOffsetsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
deleteShareGroups(Collection<String>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Delete share groups from the cluster with the default options.
+
+
deleteShareGroups(Collection<String>, DeleteShareGroupsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Delete share groups from the cluster.
+
+
deleteShareGroups(Collection<String>, DeleteShareGroupsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
deleteShareGroups(Collection<String>, DeleteShareGroupsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DeleteShareGroupsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DeleteShareGroupsOptions() - Constructor for class org.apache.kafka.clients.admin.DeleteShareGroupsOptions
+
 
+
DeleteShareGroupsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
deleteStreamsGroupOffsets(String, Set<TopicPartition>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Delete committed offsets for a set of partitions in a streams group with the default + options.
+
+
deleteStreamsGroupOffsets(String, Set<TopicPartition>, DeleteStreamsGroupOffsetsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Delete committed offsets for a set of partitions in a streams group.
+
+
deleteStreamsGroupOffsets(String, Set<TopicPartition>, DeleteStreamsGroupOffsetsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
deleteStreamsGroupOffsets(String, Set<TopicPartition>, DeleteStreamsGroupOffsetsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DeleteStreamsGroupOffsetsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DeleteStreamsGroupOffsetsOptions() - Constructor for class org.apache.kafka.clients.admin.DeleteStreamsGroupOffsetsOptions
+
 
+
DeleteStreamsGroupOffsetsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
deleteStreamsGroups(Collection<String>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Delete streams groups from the cluster with the default options.
+
+
deleteStreamsGroups(Collection<String>, DeleteStreamsGroupsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Delete streams groups from the cluster.
+
+
deleteStreamsGroups(Collection<String>, DeleteStreamsGroupsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
deleteStreamsGroups(Collection<String>, DeleteStreamsGroupsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DeleteStreamsGroupsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DeleteStreamsGroupsOptions() - Constructor for class org.apache.kafka.clients.admin.DeleteStreamsGroupsOptions
+
 
+
DeleteStreamsGroupsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
deleteTopics(Collection<String>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
This is a convenience method for Admin.deleteTopics(TopicCollection, DeleteTopicsOptions) + with default options.
+
+
deleteTopics(Collection<String>, DeleteTopicsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
This is a convenience method for Admin.deleteTopics(TopicCollection, DeleteTopicsOptions) + with default options.
+
+
deleteTopics(TopicCollection) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
This is a convenience method for Admin.deleteTopics(TopicCollection, DeleteTopicsOptions) + with default options.
+
+
deleteTopics(TopicCollection, DeleteTopicsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Delete a batch of topics.
+
+
deleteTopics(TopicCollection, DeleteTopicsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
deleteTopics(TopicCollection, DeleteTopicsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DeleteTopicsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DeleteTopicsOptions() - Constructor for class org.apache.kafka.clients.admin.DeleteTopicsOptions
+
 
+
DeleteTopicsResult - Class in org.apache.kafka.clients.admin
+
+
The result of the Admin.deleteTopics(Collection) call.
+
+
DELIVERY_TIMEOUT_MS_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
delivery.timeout.ms
+
+
deliveryCount() - Method in class org.apache.kafka.clients.consumer.ConsumerRecord
+
+
Get the delivery count for the record if available.
+
+
DENIED - Enum constant in enum class org.apache.kafka.server.authorizer.AuthorizationResult
+
 
+
DENY - Enum constant in enum class org.apache.kafka.common.acl.AclPermissionType
+
+
Disallows access.
+
+
dependents - Variable in class org.apache.kafka.common.config.ConfigDef.ConfigKey
+
 
+
DESCENDING - Enum constant in enum class org.apache.kafka.streams.query.ResultOrder
+
 
+
describe() - Method in class org.apache.kafka.streams.Topology
+
+
Returns a description of the specified Topology.
+
+
DESCRIBE - Enum constant in enum class org.apache.kafka.common.acl.AclOperation
+
+
DESCRIBE operation.
+
+
DESCRIBE_CONFIGS - Enum constant in enum class org.apache.kafka.common.acl.AclOperation
+
+
DESCRIBE_CONFIGS operation.
+
+
DESCRIBE_TOKENS - Enum constant in enum class org.apache.kafka.common.acl.AclOperation
+
+
DESCRIBE_TOKENS operation.
+
+
describeAcls(AclBindingFilter) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
This is a convenience method for Admin.describeAcls(AclBindingFilter, DescribeAclsOptions) with + default options.
+
+
describeAcls(AclBindingFilter, DescribeAclsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Lists access control lists (ACLs) according to the supplied filter.
+
+
describeAcls(AclBindingFilter, DescribeAclsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
describeAcls(AclBindingFilter, DescribeAclsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DescribeAclsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeAclsOptions() - Constructor for class org.apache.kafka.clients.admin.DescribeAclsOptions
+
 
+
DescribeAclsResult - Class in org.apache.kafka.clients.admin
+
+
The result of the Admin.describeAcls(AclBindingFilter) call.
+
+
describeClassicGroups(Collection<String>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describe some classic groups in the cluster, with the default options.
+
+
describeClassicGroups(Collection<String>, DescribeClassicGroupsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describe some classic groups in the cluster.
+
+
describeClassicGroups(Collection<String>, DescribeClassicGroupsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
describeClassicGroups(Collection<String>, DescribeClassicGroupsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DescribeClassicGroupsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeClassicGroupsOptions() - Constructor for class org.apache.kafka.clients.admin.DescribeClassicGroupsOptions
+
 
+
DescribeClassicGroupsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeClassicGroupsResult(Map<String, KafkaFuture<ClassicGroupDescription>>) - Constructor for class org.apache.kafka.clients.admin.DescribeClassicGroupsResult
+
 
+
describeClientQuotas(ClientQuotaFilter) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describes all entities matching the provided filter that have at least one client quota configuration + value defined.
+
+
describeClientQuotas(ClientQuotaFilter, DescribeClientQuotasOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describes all entities matching the provided filter that have at least one client quota configuration + value defined.
+
+
describeClientQuotas(ClientQuotaFilter, DescribeClientQuotasOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
describeClientQuotas(ClientQuotaFilter, DescribeClientQuotasOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DescribeClientQuotasOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeClientQuotasOptions() - Constructor for class org.apache.kafka.clients.admin.DescribeClientQuotasOptions
+
 
+
DescribeClientQuotasResult - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeClientQuotasResult(KafkaFuture<Map<ClientQuotaEntity, Map<String, Double>>>) - Constructor for class org.apache.kafka.clients.admin.DescribeClientQuotasResult
+
+
Maps an entity to its configured quota value(s).
+
+
describeCluster() - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Get information about the nodes in the cluster, using the default options.
+
+
describeCluster(DescribeClusterOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Get information about the nodes in the cluster.
+
+
describeCluster(DescribeClusterOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
describeCluster(DescribeClusterOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DescribeClusterOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeClusterOptions() - Constructor for class org.apache.kafka.clients.admin.DescribeClusterOptions
+
 
+
DescribeClusterResult - Class in org.apache.kafka.clients.admin
+
+
The result of the Admin.describeCluster() call.
+
+
describeConfigs(Collection<ConfigResource>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Get the configuration for the specified resources with the default options.
+
+
describeConfigs(Collection<ConfigResource>, DescribeConfigsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Get the configuration for the specified resources.
+
+
describeConfigs(Collection<ConfigResource>, DescribeConfigsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
describeConfigs(Collection<ConfigResource>, DescribeConfigsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DescribeConfigsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeConfigsOptions() - Constructor for class org.apache.kafka.clients.admin.DescribeConfigsOptions
+
 
+
DescribeConfigsResult - Class in org.apache.kafka.clients.admin
+
+
The result of the Admin.describeConfigs(Collection) call.
+
+
describeConsumerGroups(Collection<String>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describe some consumer groups in the cluster, with the default options.
+
+
describeConsumerGroups(Collection<String>, DescribeConsumerGroupsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describe some consumer groups in the cluster.
+
+
describeConsumerGroups(Collection<String>, DescribeConsumerGroupsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
describeConsumerGroups(Collection<String>, DescribeConsumerGroupsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DescribeConsumerGroupsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeConsumerGroupsOptions() - Constructor for class org.apache.kafka.clients.admin.DescribeConsumerGroupsOptions
+
 
+
DescribeConsumerGroupsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeConsumerGroupsResult(Map<String, KafkaFuture<ConsumerGroupDescription>>) - Constructor for class org.apache.kafka.clients.admin.DescribeConsumerGroupsResult
+
 
+
describeDelegationToken() - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describe the Delegation Tokens.
+
+
describeDelegationToken(DescribeDelegationTokenOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describe the Delegation Tokens.
+
+
describeDelegationToken(DescribeDelegationTokenOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
describeDelegationToken(DescribeDelegationTokenOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DescribeDelegationTokenOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeDelegationTokenOptions() - Constructor for class org.apache.kafka.clients.admin.DescribeDelegationTokenOptions
+
 
+
DescribeDelegationTokenResult - Class in org.apache.kafka.clients.admin
+
+ +
+
describedGroups() - Method in class org.apache.kafka.clients.admin.DescribeClassicGroupsResult
+
+
Return a map from group id to futures which yield group descriptions.
+
+
describedGroups() - Method in class org.apache.kafka.clients.admin.DescribeConsumerGroupsResult
+
+
Return a map from group id to futures which yield group descriptions.
+
+
describedGroups() - Method in class org.apache.kafka.clients.admin.DescribeShareGroupsResult
+
+
Return a map from group id to futures which yield share group descriptions.
+
+
describedGroups() - Method in class org.apache.kafka.clients.admin.DescribeStreamsGroupsResult
+
+
Return a map from group id to futures which yield streams group descriptions.
+
+
describeFeatures() - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describes finalized as well as supported features.
+
+
describeFeatures(DescribeFeaturesOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describes finalized as well as supported features.
+
+
describeFeatures(DescribeFeaturesOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
describeFeatures(DescribeFeaturesOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DescribeFeaturesOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeFeaturesOptions() - Constructor for class org.apache.kafka.clients.admin.DescribeFeaturesOptions
+
 
+
DescribeFeaturesResult - Class in org.apache.kafka.clients.admin
+
+ +
+
describeLogDirs(Collection<Integer>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Query the information of all log directories on the given set of brokers
+
+
describeLogDirs(Collection<Integer>, DescribeLogDirsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Query the information of all log directories on the given set of brokers
+
+
describeLogDirs(Collection<Integer>, DescribeLogDirsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
describeLogDirs(Collection<Integer>, DescribeLogDirsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DescribeLogDirsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeLogDirsOptions() - Constructor for class org.apache.kafka.clients.admin.DescribeLogDirsOptions
+
 
+
DescribeLogDirsResult - Class in org.apache.kafka.clients.admin
+
+
The result of the Admin.describeLogDirs(Collection) call.
+
+
describeMetadataQuorum() - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describes the state of the metadata quorum.
+
+
describeMetadataQuorum(DescribeMetadataQuorumOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describes the state of the metadata quorum.
+
+
describeMetadataQuorum(DescribeMetadataQuorumOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
describeMetadataQuorum(DescribeMetadataQuorumOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DescribeMetadataQuorumOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeMetadataQuorumOptions() - Constructor for class org.apache.kafka.clients.admin.DescribeMetadataQuorumOptions
+
 
+
DescribeMetadataQuorumResult - Class in org.apache.kafka.clients.admin
+
+ +
+
describeProducers(Collection<TopicPartition>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describe producer state on a set of topic partitions.
+
+
describeProducers(Collection<TopicPartition>, DescribeProducersOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describe active producer state on a set of topic partitions.
+
+
describeProducers(Collection<TopicPartition>, DescribeProducersOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
describeProducers(Collection<TopicPartition>, DescribeProducersOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DescribeProducersOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeProducersOptions() - Constructor for class org.apache.kafka.clients.admin.DescribeProducersOptions
+
 
+
DescribeProducersResult - Class in org.apache.kafka.clients.admin
+
 
+
DescribeProducersResult.PartitionProducerState - Class in org.apache.kafka.clients.admin
+
 
+
describeReplicaLogDirs(Collection<TopicPartitionReplica>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Query the replica log directory information for the specified replicas.
+
+
describeReplicaLogDirs(Collection<TopicPartitionReplica>, DescribeReplicaLogDirsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Query the replica log directory information for the specified replicas.
+
+
describeReplicaLogDirs(Collection<TopicPartitionReplica>, DescribeReplicaLogDirsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
describeReplicaLogDirs(Collection<TopicPartitionReplica>, DescribeReplicaLogDirsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DescribeReplicaLogDirsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeReplicaLogDirsOptions() - Constructor for class org.apache.kafka.clients.admin.DescribeReplicaLogDirsOptions
+
 
+
DescribeReplicaLogDirsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeReplicaLogDirsResult.ReplicaLogDirInfo - Class in org.apache.kafka.clients.admin
+
 
+
describeShareGroups(Collection<String>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describe some share groups in the cluster, with the default options.
+
+
describeShareGroups(Collection<String>, DescribeShareGroupsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describe some share groups in the cluster.
+
+
describeShareGroups(Collection<String>, DescribeShareGroupsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
describeShareGroups(Collection<String>, DescribeShareGroupsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DescribeShareGroupsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeShareGroupsOptions() - Constructor for class org.apache.kafka.clients.admin.DescribeShareGroupsOptions
+
 
+
DescribeShareGroupsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeShareGroupsResult(Map<String, KafkaFuture<ShareGroupDescription>>) - Constructor for class org.apache.kafka.clients.admin.DescribeShareGroupsResult
+
 
+
describeStreamsGroups(Collection<String>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describe streams groups in the cluster, with the default options.
+
+
describeStreamsGroups(Collection<String>, DescribeStreamsGroupsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describe streams groups in the cluster.
+
+
describeStreamsGroups(Collection<String>, DescribeStreamsGroupsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
describeStreamsGroups(Collection<String>, DescribeStreamsGroupsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DescribeStreamsGroupsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeStreamsGroupsOptions() - Constructor for class org.apache.kafka.clients.admin.DescribeStreamsGroupsOptions
+
 
+
DescribeStreamsGroupsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeStreamsGroupsResult(Map<String, KafkaFuture<StreamsGroupDescription>>) - Constructor for class org.apache.kafka.clients.admin.DescribeStreamsGroupsResult
+
 
+
describeTopics(Collection<String>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describe some topics in the cluster, with the default options.
+
+
describeTopics(Collection<String>, DescribeTopicsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describe some topics in the cluster.
+
+
describeTopics(TopicCollection) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
This is a convenience method for Admin.describeTopics(TopicCollection, DescribeTopicsOptions) + with default options.
+
+
describeTopics(TopicCollection, DescribeTopicsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describe some topics in the cluster.
+
+
describeTopics(TopicCollection, DescribeTopicsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
describeTopics(TopicCollection, DescribeTopicsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DescribeTopicsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeTopicsOptions() - Constructor for class org.apache.kafka.clients.admin.DescribeTopicsOptions
+
 
+
DescribeTopicsResult - Class in org.apache.kafka.clients.admin
+
+
The result of the Admin.describeTopics(Collection) call.
+
+
describeTransactions(Collection<String>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describe the state of a set of transactional IDs.
+
+
describeTransactions(Collection<String>, DescribeTransactionsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describe the state of a set of transactional IDs from the respective transaction coordinators, + which are dynamically discovered.
+
+
describeTransactions(Collection<String>, DescribeTransactionsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
describeTransactions(Collection<String>, DescribeTransactionsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DescribeTransactionsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeTransactionsOptions() - Constructor for class org.apache.kafka.clients.admin.DescribeTransactionsOptions
+
 
+
DescribeTransactionsResult - Class in org.apache.kafka.clients.admin
+
 
+
describeUserScramCredentials() - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describe all SASL/SCRAM credentials.
+
+
describeUserScramCredentials(List<String>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describe SASL/SCRAM credentials for the given users.
+
+
describeUserScramCredentials(List<String>, DescribeUserScramCredentialsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Describe SASL/SCRAM credentials.
+
+
describeUserScramCredentials(List<String>, DescribeUserScramCredentialsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
describeUserScramCredentials(List<String>, DescribeUserScramCredentialsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
DescribeUserScramCredentialsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
DescribeUserScramCredentialsOptions() - Constructor for class org.apache.kafka.clients.admin.DescribeUserScramCredentialsOptions
+
 
+
DescribeUserScramCredentialsResult - Class in org.apache.kafka.clients.admin
+
+
The result of the Admin.describeUserScramCredentials() call.
+
+
description() - Method in class org.apache.kafka.common.MetricName
+
 
+
description() - Method in class org.apache.kafka.common.MetricNameTemplate
+
+
Get the description of the metric.
+
+
description(String) - Method in class org.apache.kafka.clients.admin.DescribeTransactionsResult
+
+
Get the description of a specific transactional ID.
+
+
description(String) - Method in class org.apache.kafka.clients.admin.DescribeUserScramCredentialsResult
+
 
+
descriptions() - Method in class org.apache.kafka.clients.admin.DescribeLogDirsResult
+
+
Return a map from brokerId to future which can be used to check the information of partitions on each individual broker.
+
+
DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
deserialization.exception.handler
+
+
deserializationExceptionHandler - Variable in class org.apache.kafka.streams.TopologyConfig.TaskConfig
+
 
+
deserializationExceptionHandler() - Method in class org.apache.kafka.streams.StreamsConfig
+
 
+
DeserializationExceptionHandler - Interface in org.apache.kafka.streams.errors
+
+
Interface that specifies how an exception from source node deserialization + (e.g., reading from Kafka) should be handled.
+
+
DeserializationExceptionHandler.DeserializationHandlerResponse - Enum Class in org.apache.kafka.streams.errors
+
+
Enumeration that describes the response from the exception handler.
+
+
deserializationExceptionHandlerSupplier - Variable in class org.apache.kafka.streams.TopologyConfig
+
 
+
deserialize(byte[]) - Method in interface org.apache.kafka.common.security.auth.KafkaPrincipalSerde
+
+
Deserialize a KafkaPrincipal from byte array.
+
+
deserialize(String, byte[]) - Method in class org.apache.kafka.common.serialization.BooleanDeserializer
+
 
+
deserialize(String, byte[]) - Method in class org.apache.kafka.common.serialization.ByteArrayDeserializer
+
 
+
deserialize(String, byte[]) - Method in class org.apache.kafka.common.serialization.ByteBufferDeserializer
+
 
+
deserialize(String, byte[]) - Method in class org.apache.kafka.common.serialization.BytesDeserializer
+
 
+
deserialize(String, byte[]) - Method in interface org.apache.kafka.common.serialization.Deserializer
+
+
Deserialize a record value from a byte array into a value or object.
+
+
deserialize(String, byte[]) - Method in class org.apache.kafka.common.serialization.DoubleDeserializer
+
 
+
deserialize(String, byte[]) - Method in class org.apache.kafka.common.serialization.FloatDeserializer
+
 
+
deserialize(String, byte[]) - Method in class org.apache.kafka.common.serialization.IntegerDeserializer
+
 
+
deserialize(String, byte[]) - Method in class org.apache.kafka.common.serialization.ListDeserializer
+
 
+
deserialize(String, byte[]) - Method in class org.apache.kafka.common.serialization.LongDeserializer
+
 
+
deserialize(String, byte[]) - Method in class org.apache.kafka.common.serialization.ShortDeserializer
+
 
+
deserialize(String, byte[]) - Method in class org.apache.kafka.common.serialization.StringDeserializer
+
 
+
deserialize(String, byte[]) - Method in class org.apache.kafka.common.serialization.UUIDDeserializer
+
 
+
deserialize(String, byte[]) - Method in class org.apache.kafka.common.serialization.VoidDeserializer
+
 
+
deserialize(String, byte[]) - Method in class org.apache.kafka.streams.kstream.SessionWindowedDeserializer
+
 
+
deserialize(String, byte[]) - Method in class org.apache.kafka.streams.kstream.TimeWindowedDeserializer
+
 
+
deserialize(String, Headers, byte[]) - Method in interface org.apache.kafka.common.serialization.Deserializer
+
+
Deserialize a record value from a byte array into a value or object.
+
+
deserialize(String, Headers, ByteBuffer) - Method in class org.apache.kafka.common.serialization.BooleanDeserializer
+
 
+
deserialize(String, Headers, ByteBuffer) - Method in class org.apache.kafka.common.serialization.ByteBufferDeserializer
+
 
+
deserialize(String, Headers, ByteBuffer) - Method in interface org.apache.kafka.common.serialization.Deserializer
+
+
Deserialize a record value from a ByteBuffer into a value or object.
+
+
deserialize(String, Headers, ByteBuffer) - Method in class org.apache.kafka.common.serialization.DoubleDeserializer
+
 
+
deserialize(String, Headers, ByteBuffer) - Method in class org.apache.kafka.common.serialization.FloatDeserializer
+
 
+
deserialize(String, Headers, ByteBuffer) - Method in class org.apache.kafka.common.serialization.IntegerDeserializer
+
 
+
deserialize(String, Headers, ByteBuffer) - Method in class org.apache.kafka.common.serialization.LongDeserializer
+
 
+
deserialize(String, Headers, ByteBuffer) - Method in class org.apache.kafka.common.serialization.ShortDeserializer
+
 
+
deserialize(String, Headers, ByteBuffer) - Method in class org.apache.kafka.common.serialization.StringDeserializer
+
 
+
deserialize(String, Headers, ByteBuffer) - Method in class org.apache.kafka.common.serialization.UUIDDeserializer
+
 
+
deserialize(String, Headers, ByteBuffer) - Method in class org.apache.kafka.common.serialization.VoidDeserializer
+
 
+
deserializer() - Method in interface org.apache.kafka.common.serialization.Serde
+
 
+
deserializer() - Method in class org.apache.kafka.common.serialization.Serdes.WrapperSerde
+
 
+
Deserializer<T> - Interface in org.apache.kafka.common.serialization
+
+
An interface for converting bytes to objects.
+
+
deserializeRecord(ConsumerRecord<byte[], byte[]>) - Static method in class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
deserializeRecord(ConsumerRecord<byte[], byte[]>) - Static method in class org.apache.kafka.connect.mirror.Heartbeat
+
 
+
DirectoryConfigProvider - Class in org.apache.kafka.common.config.provider
+
+
An implementation of ConfigProvider based on a directory of files.
+
+
DirectoryConfigProvider() - Constructor for class org.apache.kafka.common.config.provider.DirectoryConfigProvider
+
 
+
disableTelemetry() - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
disableTelemetry() - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
DisconnectException - Exception in org.apache.kafka.common.errors
+
+
Server disconnected before a request could be completed.
+
+
DisconnectException() - Constructor for exception org.apache.kafka.common.errors.DisconnectException
+
 
+
DisconnectException(String) - Constructor for exception org.apache.kafka.common.errors.DisconnectException
+
 
+
DisconnectException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.DisconnectException
+
 
+
DisconnectException(Throwable) - Constructor for exception org.apache.kafka.common.errors.DisconnectException
+
 
+
displayName - Variable in class org.apache.kafka.common.config.ConfigDef.ConfigKey
+
 
+
divergentOffsets() - Method in exception org.apache.kafka.clients.consumer.LogTruncationException
+
+
Get the divergent offsets for the partitions which were truncated.
+
+
doc() - Method in class org.apache.kafka.connect.data.ConnectSchema
+
 
+
doc() - Method in interface org.apache.kafka.connect.data.Schema
+
 
+
doc() - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
doc(String) - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
+
Set the documentation for this schema.
+
+
documentation - Variable in class org.apache.kafka.common.config.ConfigDef.ConfigKey
+
 
+
documentation() - Method in class org.apache.kafka.clients.admin.ConfigEntry
+
+
Return the config documentation.
+
+
documentationOf(String) - Method in class org.apache.kafka.common.config.AbstractConfig
+
 
+
DOES_NOT_EXIST - Enum constant in enum class org.apache.kafka.streams.query.FailureReason
+
+
The requested store partition does not exist at all.
+
+
Double() - Static method in class org.apache.kafka.common.serialization.Serdes
+
+
A serde for nullable Double type.
+
+
DOUBLE - Enum constant in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigType
+
 
+
DOUBLE - Enum constant in enum class org.apache.kafka.common.config.ConfigDef.Type
+
+
Used for numerical values within the Java Double range.
+
+
DoubleDeserializer - Class in org.apache.kafka.common.serialization
+
 
+
DoubleDeserializer() - Constructor for class org.apache.kafka.common.serialization.DoubleDeserializer
+
 
+
DoubleSerde() - Constructor for class org.apache.kafka.common.serialization.Serdes.DoubleSerde
+
 
+
DoubleSerializer - Class in org.apache.kafka.common.serialization
+
 
+
DoubleSerializer() - Constructor for class org.apache.kafka.common.serialization.DoubleSerializer
+
 
+
DOWNSTREAM_OFFSET_KEY - Static variable in class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
downstreamOffset() - Method in class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
DSL_STORE_SUPPLIERS_CLASS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
dsl.store.suppliers.class
+
+
DslKeyValueParams - Class in org.apache.kafka.streams.state
+
+
DslKeyValueParams is a wrapper class for all parameters that function + as inputs to DslStoreSuppliers.keyValueStore(DslKeyValueParams).
+
+
DslKeyValueParams(String, boolean) - Constructor for class org.apache.kafka.streams.state.DslKeyValueParams
+
 
+
DslSessionParams - Class in org.apache.kafka.streams.state
+
+
DslSessionParams is a wrapper class for all parameters that function + as inputs to DslStoreSuppliers.sessionStore(DslSessionParams).
+
+
DslSessionParams(String, Duration, EmitStrategy) - Constructor for class org.apache.kafka.streams.state.DslSessionParams
+
 
+
dslStoreSuppliers - Variable in class org.apache.kafka.streams.TopologyConfig
+
 
+
DslStoreSuppliers - Interface in org.apache.kafka.streams.state
+
+
DslStoreSuppliers defines a grouping of factories to construct + stores for each of the types of state store implementations in Kafka + Streams.
+
+
DslWindowParams - Class in org.apache.kafka.streams.state
+
+
DslWindowParams is a wrapper class for all parameters that function + as inputs to DslStoreSuppliers.windowStore(DslWindowParams).
+
+
DslWindowParams(String, Duration, Duration, boolean, EmitStrategy, boolean, boolean) - Constructor for class org.apache.kafka.streams.state.DslWindowParams
+
 
+
DUMMY_THREAD_INDEX - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated.
+
+
duplicate() - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
duplicate() - Method in interface org.apache.kafka.connect.header.Headers
+
+
Create a copy of this Headers object.
+
+
DuplicateBrokerRegistrationException - Exception in org.apache.kafka.common.errors
+
 
+
DuplicateBrokerRegistrationException(String) - Constructor for exception org.apache.kafka.common.errors.DuplicateBrokerRegistrationException
+
 
+
DuplicateBrokerRegistrationException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.DuplicateBrokerRegistrationException
+
 
+
DuplicateResourceException - Exception in org.apache.kafka.common.errors
+
+
Exception thrown due to a request that illegally refers to the same resource twice + (for example, trying to both create and delete the same SCRAM credential for a particular user in a single request).
+
+
DuplicateResourceException(String) - Constructor for exception org.apache.kafka.common.errors.DuplicateResourceException
+
+
Constructor
+
+
DuplicateResourceException(String, String) - Constructor for exception org.apache.kafka.common.errors.DuplicateResourceException
+
+
Constructor
+
+
DuplicateResourceException(String, String, Throwable) - Constructor for exception org.apache.kafka.common.errors.DuplicateResourceException
+
+
Constructor
+
+
DuplicateResourceException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.DuplicateResourceException
+
 
+
DuplicateSequenceException - Exception in org.apache.kafka.common.errors
+
 
+
DuplicateSequenceException(String) - Constructor for exception org.apache.kafka.common.errors.DuplicateSequenceException
+
 
+
DuplicateVoterException - Exception in org.apache.kafka.common.errors
+
 
+
DuplicateVoterException(String) - Constructor for exception org.apache.kafka.common.errors.DuplicateVoterException
+
 
+
DuplicateVoterException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.DuplicateVoterException
+
 
+
DYNAMIC_BROKER_CONFIG - Enum constant in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigSource
+
 
+
DYNAMIC_BROKER_LOGGER_CONFIG - Enum constant in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigSource
+
 
+
DYNAMIC_CLIENT_METRICS_CONFIG - Enum constant in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigSource
+
 
+
DYNAMIC_DEFAULT_BROKER_CONFIG - Enum constant in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigSource
+
 
+
DYNAMIC_GROUP_CONFIG - Enum constant in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigSource
+
 
+
DYNAMIC_TOPIC_CONFIG - Enum constant in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigSource
+
 
+
+

E

+
+
EAGER - Enum constant in enum class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.RebalanceProtocol
+
 
+
earliest() - Static method in class org.apache.kafka.clients.admin.OffsetSpec
+
+
Used to retrieve the earliest offset of a partition
+
+
earliest() - Static method in class org.apache.kafka.streams.AutoOffsetReset
+
+
Creates an AutoOffsetReset instance representing "earliest".
+
+
EARLIEST - Enum constant in enum class org.apache.kafka.clients.consumer.OffsetResetStrategy
+
+
Deprecated.
+
EARLIEST - Enum constant in enum class org.apache.kafka.streams.Topology.AutoOffsetReset
+
+
Deprecated.
+
earliestLocal() - Static method in class org.apache.kafka.clients.admin.OffsetSpec
+
+
Used to retrieve the local log start offset.
+
+
EarliestLocalSpec() - Constructor for class org.apache.kafka.clients.admin.OffsetSpec.EarliestLocalSpec
+
 
+
EarliestSpec() - Constructor for class org.apache.kafka.clients.admin.OffsetSpec.EarliestSpec
+
 
+
earlyStartListeners() - Method in interface org.apache.kafka.server.authorizer.AuthorizerServerInfo
+
+
Returns the configured early start listeners.
+
+
ElectionNotNeededException - Exception in org.apache.kafka.common.errors
+
 
+
ElectionNotNeededException(String) - Constructor for exception org.apache.kafka.common.errors.ElectionNotNeededException
+
 
+
ElectionNotNeededException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.ElectionNotNeededException
+
 
+
ElectionType - Enum Class in org.apache.kafka.common
+
+ +
+
electLeaders(ElectionType, Set<TopicPartition>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Elect a replica as leader for topic partitions.
+
+
electLeaders(ElectionType, Set<TopicPartition>, ElectLeadersOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Elect a replica as leader for the given partitions, or for all partitions if the argument + to partitions is null.
+
+
electLeaders(ElectionType, Set<TopicPartition>, ElectLeadersOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
electLeaders(ElectionType, Set<TopicPartition>, ElectLeadersOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
ElectLeadersOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
ElectLeadersOptions() - Constructor for class org.apache.kafka.clients.admin.ElectLeadersOptions
+
 
+
ElectLeadersResult - Class in org.apache.kafka.clients.admin
+
+
The result of Admin.electLeaders(ElectionType, Set, ElectLeadersOptions) + + The API of this class is evolving, see Admin for details.
+
+
EligibleLeadersNotAvailableException - Exception in org.apache.kafka.common.errors
+
 
+
EligibleLeadersNotAvailableException(String) - Constructor for exception org.apache.kafka.common.errors.EligibleLeadersNotAvailableException
+
 
+
EligibleLeadersNotAvailableException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.EligibleLeadersNotAvailableException
+
 
+
elr() - Method in class org.apache.kafka.common.TopicPartitionInfo
+
+
Return the eligible leader replicas of the partition.
+
+
embed(String, String, int, ConfigDef) - Method in class org.apache.kafka.common.config.ConfigDef
+
 
+
EMIT_INTERVAL_MS_KSTREAMS_OUTER_JOIN_SPURIOUS_RESULTS_FIX - Static variable in class org.apache.kafka.streams.StreamsConfig.InternalConfig
+
 
+
EMIT_INTERVAL_MS_KSTREAMS_WINDOWED_AGGREGATION - Static variable in class org.apache.kafka.streams.StreamsConfig.InternalConfig
+
 
+
emitEarlyWhenFull() - Method in interface org.apache.kafka.streams.kstream.Suppressed.BufferConfig
+
+
Set the buffer to just emit the oldest records when any of its constraints are violated.
+
+
emitStrategy() - Method in class org.apache.kafka.streams.state.DslSessionParams
+
 
+
emitStrategy() - Method in class org.apache.kafka.streams.state.DslWindowParams
+
 
+
emitStrategy(EmitStrategy) - Method in interface org.apache.kafka.streams.kstream.SessionWindowedKStream
+
+
Configure when the aggregated result will be emitted for SessionWindowedKStream.
+
+
emitStrategy(EmitStrategy) - Method in interface org.apache.kafka.streams.kstream.TimeWindowedKStream
+
+
Configure when the aggregated result will be emitted for TimeWindowedKStream.
+
+
EmitStrategy - Interface in org.apache.kafka.streams.kstream
+
+
This interface controls the strategy that can be used to control how we emit results in a processor.
+
+
EmitStrategy.StrategyType - Enum Class in org.apache.kafka.streams.kstream
+
 
+
empty() - Static method in class org.apache.kafka.clients.consumer.ConsumerRecords
+
 
+
empty() - Static method in class org.apache.kafka.common.Cluster
+
+
Create an empty cluster instance with no nodes and no topic-partitions.
+
+
empty() - Static method in class org.apache.kafka.common.security.auth.SaslExtensions
+
+
Creates an "empty" instance indicating no SASL extensions.
+
+
EMPTY - Enum constant in enum class org.apache.kafka.clients.admin.TransactionState
+
 
+
EMPTY - Enum constant in enum class org.apache.kafka.common.ClassicGroupState
+
 
+
EMPTY - Enum constant in enum class org.apache.kafka.common.ConsumerGroupState
+
+
Deprecated.
+
EMPTY - Enum constant in enum class org.apache.kafka.common.GroupState
+
 
+
EMPTY - Static variable in class org.apache.kafka.clients.consumer.ConsumerRecords
+
 
+
emptyPosition() - Static method in class org.apache.kafka.streams.query.Position
+
+
Create a new, empty Position.
+
+
ENABLE_AUTO_COMMIT_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
enable.auto.commit
+
+
ENABLE_IDEMPOTENCE_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
enable.idempotence
+
+
ENABLE_IDEMPOTENCE_DOC - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
 
+
ENABLE_METRICS_PUSH_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
+
enable.metrics.push
+
+
ENABLE_METRICS_PUSH_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
enable.metrics.push
+
+
ENABLE_METRICS_PUSH_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
enable.metrics.push
+
+
ENABLE_METRICS_PUSH_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
enable.metrics.push
+
+
ENABLE_METRICS_PUSH_DOC - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
ENABLE_METRICS_PUSH_DOC - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
ENABLE_METRICS_PUSH_DOC - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
 
+
ENABLE_METRICS_PUSH_DOC - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated.
+
+
enableExecutionInfo() - Method in class org.apache.kafka.streams.query.StateQueryRequest
+
+
Requests for stores and the Streams runtime to record any useful details about how the query + was executed.
+
+
enableStaleStores() - Method in class org.apache.kafka.streams.StoreQueryParameters
+
+
Enable querying of stale state stores, i.e., allow to query active tasks during restore as well as standby tasks.
+
+
encoding() - Method in class org.apache.kafka.connect.storage.StringConverterConfig
+
+
Get the string encoding.
+
+
ENCODING_CONFIG - Static variable in class org.apache.kafka.connect.storage.StringConverterConfig
+
 
+
ENCODING_DEFAULT - Static variable in class org.apache.kafka.connect.storage.StringConverterConfig
+
 
+
end() - Method in class org.apache.kafka.streams.kstream.Window
+
+
Return the end timestamp of this window.
+
+
endOffset() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata
+
 
+
endOffsetPosition() - Method in class org.apache.kafka.streams.LagInfo
+
+
Get the end offset position for this store partition's changelog topic on the Kafka brokers.
+
+
endOffsets() - Method in interface org.apache.kafka.streams.TaskMetadata
+
+
End offsets of the source topic partitions of the task.
+
+
endOffsets(Collection<TopicPartition>) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
endOffsets(Collection<TopicPartition>) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Get the end offsets for the given partitions.
+
+
endOffsets(Collection<TopicPartition>) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
endOffsets(Collection<TopicPartition>, Duration) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
endOffsets(Collection<TopicPartition>, Duration) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Get the end offsets for the given partitions.
+
+
endOffsets(Collection<TopicPartition>, Duration) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
Endpoint - Class in org.apache.kafka.common
+
+
Represents a broker endpoint.
+
+
Endpoint(String, int) - Constructor for class org.apache.kafka.clients.admin.StreamsGroupMemberDescription.Endpoint
+
 
+
Endpoint(String, SecurityProtocol, String, int) - Constructor for class org.apache.kafka.common.Endpoint
+
 
+
endpoints() - Method in class org.apache.kafka.clients.admin.QuorumInfo.Node
+
 
+
endpoints() - Method in interface org.apache.kafka.server.authorizer.AuthorizerServerInfo
+
+
Returns endpoints for all listeners including the advertised host and port to which + the listener is bound.
+
+
EndpointType - Enum Class in org.apache.kafka.clients.admin
+
+
Identifies the endpoint type, as specified by KIP-919.
+
+
endTime() - Method in class org.apache.kafka.streams.kstream.Window
+
+
Return the end time of this window.
+
+
enforceRebalance() - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
enforceRebalance() - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
 
+
enforceRebalance() - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
enforceRebalance(String) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
enforceRebalance(String) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Alert the consumer to trigger a new rebalance by rejoining the group.
+
+
enforceRebalance(String) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
ENSURE_EXPLICIT_INTERNAL_RESOURCE_NAMING_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
ensure.explicit.internal.resource.naming
+
+
ensureExplicitInternalResourceNaming - Variable in class org.apache.kafka.streams.TopologyConfig
+
 
+
ensureValid(String, Object) - Method in class org.apache.kafka.common.config.ConfigDef.CaseInsensitiveValidString
+
 
+
ensureValid(String, Object) - Method in class org.apache.kafka.common.config.ConfigDef.CompositeValidator
+
 
+
ensureValid(String, Object) - Method in class org.apache.kafka.common.config.ConfigDef.LambdaValidator
+
 
+
ensureValid(String, Object) - Method in class org.apache.kafka.common.config.ConfigDef.ListSize
+
 
+
ensureValid(String, Object) - Method in class org.apache.kafka.common.config.ConfigDef.NonEmptyString
+
 
+
ensureValid(String, Object) - Method in class org.apache.kafka.common.config.ConfigDef.NonEmptyStringWithoutControlChars
+
 
+
ensureValid(String, Object) - Method in class org.apache.kafka.common.config.ConfigDef.NonNullValidator
+
 
+
ensureValid(String, Object) - Method in class org.apache.kafka.common.config.ConfigDef.Range
+
 
+
ensureValid(String, Object) - Method in interface org.apache.kafka.common.config.ConfigDef.Validator
+
+
Perform single configuration validation.
+
+
ensureValid(String, Object) - Method in class org.apache.kafka.common.config.ConfigDef.ValidList
+
 
+
ensureValid(String, Object) - Method in class org.apache.kafka.common.config.ConfigDef.ValidString
+
 
+
entities() - Method in class org.apache.kafka.clients.admin.DescribeClientQuotasResult
+
+
Returns a map from quota entity to a future which can be used to check the status of the operation.
+
+
entity() - Method in class org.apache.kafka.common.quota.ClientQuotaAlteration
+
 
+
entityType() - Method in class org.apache.kafka.common.quota.ClientQuotaFilterComponent
+
 
+
entityType() - Method in interface org.apache.kafka.server.quota.ClientQuotaEntity.ConfigEntity
+
+
Returns the type of this entity.
+
+
entries() - Method in class org.apache.kafka.clients.admin.Config
+
+
Configuration entries for a resource.
+
+
entries() - Method in class org.apache.kafka.common.quota.ClientQuotaEntity
+
 
+
entry() - Method in class org.apache.kafka.common.acl.AclBinding
+
 
+
entryFilter() - Method in class org.apache.kafka.common.acl.AclBindingFilter
+
 
+
EnvVarConfigProvider - Class in org.apache.kafka.common.config.provider
+
+
An implementation of ConfigProvider based on environment variables.
+
+
EnvVarConfigProvider() - Constructor for class org.apache.kafka.common.config.provider.EnvVarConfigProvider
+
 
+
EnvVarConfigProvider(Map<String, String>) - Constructor for class org.apache.kafka.common.config.provider.EnvVarConfigProvider
+
 
+
eosEnabled - Variable in class org.apache.kafka.streams.TopologyConfig
+
 
+
eosEnabled - Variable in class org.apache.kafka.streams.TopologyConfig.TaskConfig
+
 
+
epoch() - Method in class org.apache.kafka.clients.producer.PreparedTxnState
+
 
+
epochId(String) - Method in class org.apache.kafka.clients.admin.FenceProducersResult
+
+
Returns a future that provides the epoch ID generated while initializing the given transaction when the request completes.
+
+
equals(Object) - Method in class org.apache.kafka.clients.admin.AbortTransactionSpec
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.AlterConfigOp
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.ClassicGroupDescription
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.ClientMetricsResourceListing
+
+
Deprecated.
+
equals(Object) - Method in class org.apache.kafka.clients.admin.Config
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.ConfigEntry.ConfigSynonym
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.ConfigEntry
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.ConsumerGroupDescription
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.ConsumerGroupListing
+
+
Deprecated.
+
equals(Object) - Method in class org.apache.kafka.clients.admin.DescribeProducersOptions
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.FeatureMetadata
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.FeatureUpdate
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.FinalizedVersionRange
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.GroupListing
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.ListConsumerGroupOffsetsSpec
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.ListShareGroupOffsetsSpec
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.ListTopicsOptions
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.ListTransactionsOptions
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.MemberAssignment
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.MemberDescription
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.MemberToRemove
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.NewTopic
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.ProducerState
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.QuorumInfo
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.QuorumInfo.Node
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.QuorumInfo.ReplicaState
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.RaftVoterEndpoint
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.RecordsToDelete
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.ScramCredentialInfo
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.ShareGroupDescription
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.ShareMemberAssignment
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.ShareMemberDescription
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.StreamsGroupDescription
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberAssignment
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberAssignment.TaskIds
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription.Endpoint
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription.TaskOffset
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription.TopicInfo
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.SupportedVersionRange
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.TopicDescription
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.TransactionDescription
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.TransactionListing
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.admin.UserScramCredentialsDescription
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.consumer.ConsumerGroupMetadata
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.consumer.OffsetAndMetadata
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.consumer.OffsetAndTimestamp
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.consumer.SubscriptionPattern
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.producer.PreparedTxnState
+
 
+
equals(Object) - Method in class org.apache.kafka.clients.producer.ProducerRecord
+
 
+
equals(Object) - Method in class org.apache.kafka.common.acl.AccessControlEntry
+
 
+
equals(Object) - Method in class org.apache.kafka.common.acl.AccessControlEntryFilter
+
 
+
equals(Object) - Method in class org.apache.kafka.common.acl.AclBinding
+
 
+
equals(Object) - Method in class org.apache.kafka.common.acl.AclBindingFilter
+
 
+
equals(Object) - Method in class org.apache.kafka.common.Cluster
+
 
+
equals(Object) - Method in class org.apache.kafka.common.ClusterResource
+
 
+
equals(Object) - Method in class org.apache.kafka.common.config.AbstractConfig
+
 
+
equals(Object) - Method in class org.apache.kafka.common.config.ConfigResource
+
 
+
equals(Object) - Method in class org.apache.kafka.common.config.ConfigValue
+
 
+
equals(Object) - Method in class org.apache.kafka.common.Endpoint
+
 
+
equals(Object) - Method in class org.apache.kafka.common.MetricName
+
 
+
equals(Object) - Method in class org.apache.kafka.common.MetricNameTemplate
+
 
+
equals(Object) - Method in class org.apache.kafka.common.metrics.Quota
+
 
+
equals(Object) - Method in class org.apache.kafka.common.Node
+
 
+
equals(Object) - Method in class org.apache.kafka.common.PartitionInfo
+
 
+
equals(Object) - Method in class org.apache.kafka.common.quota.ClientQuotaAlteration.Op
+
 
+
equals(Object) - Method in class org.apache.kafka.common.quota.ClientQuotaEntity
+
 
+
equals(Object) - Method in class org.apache.kafka.common.quota.ClientQuotaFilter
+
 
+
equals(Object) - Method in class org.apache.kafka.common.quota.ClientQuotaFilterComponent
+
 
+
equals(Object) - Method in class org.apache.kafka.common.resource.Resource
+
 
+
equals(Object) - Method in class org.apache.kafka.common.resource.ResourcePattern
+
 
+
equals(Object) - Method in class org.apache.kafka.common.resource.ResourcePatternFilter
+
 
+
equals(Object) - Method in class org.apache.kafka.common.security.auth.KafkaPrincipal
+
 
+
equals(Object) - Method in class org.apache.kafka.common.security.auth.SaslExtensions
+
+
Implements equals using the reference comparison implementation from + Object.equals(Object).
+
+
equals(Object) - Method in class org.apache.kafka.common.security.token.delegation.DelegationToken
+
 
+
equals(Object) - Method in class org.apache.kafka.common.security.token.delegation.TokenInformation
+
 
+
equals(Object) - Method in class org.apache.kafka.common.TopicIdPartition
+
 
+
equals(Object) - Method in class org.apache.kafka.common.TopicPartition
+
 
+
equals(Object) - Method in class org.apache.kafka.common.TopicPartitionInfo
+
 
+
equals(Object) - Method in class org.apache.kafka.common.TopicPartitionReplica
+
 
+
equals(Object) - Method in class org.apache.kafka.common.Uuid
+
+
Returns true iff obj is another Uuid represented by the same two long values.
+
+
equals(Object) - Method in class org.apache.kafka.connect.connector.ConnectRecord
+
 
+
equals(Object) - Method in class org.apache.kafka.connect.data.ConnectSchema
+
 
+
equals(Object) - Method in class org.apache.kafka.connect.data.Field
+
 
+
equals(Object) - Method in class org.apache.kafka.connect.data.SchemaAndValue
+
 
+
equals(Object) - Method in class org.apache.kafka.connect.data.Struct
+
 
+
equals(Object) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
equals(Object) - Method in class org.apache.kafka.connect.health.AbstractState
+
 
+
equals(Object) - Method in class org.apache.kafka.connect.health.ConnectorHealth
+
 
+
equals(Object) - Method in class org.apache.kafka.connect.health.TaskState
+
 
+
equals(Object) - Method in class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
equals(Object) - Method in class org.apache.kafka.connect.mirror.SourceAndTarget
+
 
+
equals(Object) - Method in class org.apache.kafka.connect.sink.SinkRecord
+
 
+
equals(Object) - Method in class org.apache.kafka.connect.source.SourceRecord
+
 
+
equals(Object) - Method in class org.apache.kafka.coordinator.group.api.assignor.GroupAssignment
+
 
+
equals(Object) - Method in class org.apache.kafka.server.authorizer.Action
+
 
+
equals(Object) - Method in class org.apache.kafka.server.log.remote.storage.LogSegmentData
+
 
+
equals(Object) - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId
+
 
+
equals(Object) - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata.CustomMetadata
+
 
+
equals(Object) - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata
+
 
+
equals(Object) - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate
+
 
+
equals(Object) - Method in class org.apache.kafka.server.log.remote.storage.RemotePartitionDeleteMetadata
+
 
+
equals(Object) - Method in class org.apache.kafka.server.policy.AlterConfigPolicy.RequestMetadata
+
 
+
equals(Object) - Method in class org.apache.kafka.server.policy.CreateTopicPolicy.RequestMetadata
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.AutoOffsetReset
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.KeyQueryMetadata
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.KeyValue
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.kstream.Consumed
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.kstream.JoinWindows
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.kstream.Produced
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.kstream.SessionWindows
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.kstream.SlidingWindows
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.kstream.TimeWindows
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.kstream.UnlimitedWindows
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.kstream.Window
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.kstream.Windowed
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.LagInfo
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.processor.api.FixedKeyRecord
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext.CapturedForward
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.processor.api.Record
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment.AssignedTask
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.processor.assignment.ProcessId
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.processor.TaskId
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.processor.To
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.query.Position
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.query.PositionBound
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.state.DslKeyValueParams
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.state.DslSessionParams
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.state.DslWindowParams
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.state.HostInfo
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.state.ValueAndTimestamp
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.state.VersionedRecord
+
 
+
equals(Object) - Method in class org.apache.kafka.streams.StoreQueryParameters
+
 
+
equals(Object) - Method in interface org.apache.kafka.streams.StreamsMetadata
+
+
Compares the specified object with this StreamsMetadata.
+
+
equals(Object) - Method in interface org.apache.kafka.streams.TaskMetadata
+
+
Compares the specified object with this TaskMetadata.
+
+
equals(Object) - Method in class org.apache.kafka.streams.test.TestRecord
+
 
+
equals(Object) - Method in interface org.apache.kafka.streams.ThreadMetadata
+
+
Compares the specified object with this ThreadMetadata.
+
+
errantRecordReporter() - Method in interface org.apache.kafka.connect.sink.SinkTaskContext
+
+
Get the reporter to which the sink task can report problematic or failed records + passed to the SinkTask.put(java.util.Collection) method.
+
+
ErrantRecordReporter - Interface in org.apache.kafka.connect.sink
+
+
Component that a SinkTask can use to report problematic records (and their corresponding problems) as it + writes them through SinkTask.put(java.util.Collection).
+
+
error() - Method in class org.apache.kafka.clients.admin.LogDirDescription
+
+
Returns `ApiException` if the log directory is offline or an error occurred, otherwise returns null.
+
+
error(String, String) - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerExtensionsValidatorCallback
+
+
Set the error value for a specific extension key-value pair if validation has failed
+
+
error(String, String, String) - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerTokenCallback
+
+ +
+
error(String, String, String) - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallback
+
+ +
+
ERROR - Enum constant in enum class org.apache.kafka.streams.KafkaStreams.State
+
 
+
ERROR_LOG_LEVEL - Static variable in class org.apache.kafka.common.config.LogLevelConfig
+
+
The ERROR level designates error events that + might still allow the broker to continue running.
+
+
errorCode() - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerTokenCallback
+
+
Return the optional (but always non-empty if not null) error code as per + RFC 6749: The OAuth + 2.0 Authorization Framework.
+
+
errorDescription() - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerTokenCallback
+
+
Return the (potentially null) error description as per + RFC 6749: The OAuth + 2.0 Authorization Framework.
+
+
ErrorHandlerContext - Interface in org.apache.kafka.streams.errors
+
+
This interface allows user code to inspect the context of a record that has failed during processing.
+
+
errorMessages() - Method in class org.apache.kafka.common.config.ConfigValue
+
 
+
errorNext(RuntimeException) - Method in class org.apache.kafka.clients.producer.MockProducer
+
+
Complete the earliest uncompleted call with the given error.
+
+
errorOpenIDConfiguration() - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallback
+
+
Return the (potentially null) error openid-configuration value as per + RFC 7628: A Set + of Simple Authentication and Security Layer (SASL) Mechanisms for OAuth.
+
+
errors() - Method in class org.apache.kafka.clients.admin.ListConsumerGroupsResult
+
+
Deprecated.
+
Returns a future which yields just the errors which occurred.
+
+
errors() - Method in class org.apache.kafka.clients.admin.ListGroupsResult
+
+
Returns a future which yields just the errors which occurred.
+
+
errorScope() - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallback
+
+ +
+
errorStatus() - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallback
+
+ +
+
errorUri() - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerTokenCallback
+
+
Return the (potentially null) error URI as per + RFC 6749: The OAuth + 2.0 Authorization Framework.
+
+
eventTimestampMs() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogMetadata
+
 
+
eventWindow() - Method in class org.apache.kafka.common.metrics.MetricConfig
+
 
+
eventWindow(long) - Method in class org.apache.kafka.common.metrics.MetricConfig
+
 
+
EXACTLY_ONCE_V2 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "processing.guarantee" for exactly-once processing guarantees.
+
+
exactlyOnceSupport(Map<String, String>) - Method in class org.apache.kafka.connect.source.SourceConnector
+
+
Signals whether the connector supports exactly-once semantics with a proposed configuration.
+
+
ExactlyOnceSupport - Enum Class in org.apache.kafka.connect.source
+
+
An enum to represent the level of support for exactly-once semantics from a source connector.
+
+
exception() - Method in class org.apache.kafka.clients.admin.DeleteAclsResult.FilterResult
+
+
Return an exception if the ACL delete was not successful or null if it was.
+
+
exception() - Method in class org.apache.kafka.server.authorizer.AclCreateResult
+
+
Returns any exception during create.
+
+
exception() - Method in class org.apache.kafka.server.authorizer.AclDeleteResult.AclBindingDeleteResult
+
+
Returns any exception that resulted in failure to delete ACL binding.
+
+
exception() - Method in class org.apache.kafka.server.authorizer.AclDeleteResult
+
+
Returns any exception while attempting to match ACL filter to delete ACLs.
+
+
EXCLUDE_CONFIG - Static variable in class org.apache.kafka.common.metrics.JmxReporter
+
 
+
EXCLUDE_INTERNAL_TOPICS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
exclude.internal.topics
+
+
executionInfoEnabled() - Method in class org.apache.kafka.streams.query.StateQueryRequest
+
+
Whether the request includes detailed execution information.
+
+
EXPIRATION_CLAIM_NAME - Static variable in class org.apache.kafka.common.security.oauthbearer.ClientJwtValidator
+
 
+
expireDelegationToken(byte[]) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Expire a Delegation Token.
+
+
expireDelegationToken(byte[], ExpireDelegationTokenOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Expire a Delegation Token.
+
+
expireDelegationToken(byte[], ExpireDelegationTokenOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
expireDelegationToken(byte[], ExpireDelegationTokenOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
ExpireDelegationTokenOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
ExpireDelegationTokenOptions() - Constructor for class org.apache.kafka.clients.admin.ExpireDelegationTokenOptions
+
 
+
ExpireDelegationTokenResult - Class in org.apache.kafka.clients.admin
+
+ +
+
expiryTimePeriodMs() - Method in class org.apache.kafka.clients.admin.ExpireDelegationTokenOptions
+
 
+
expiryTimePeriodMs(long) - Method in class org.apache.kafka.clients.admin.ExpireDelegationTokenOptions
+
 
+
expiryTimestamp() - Method in class org.apache.kafka.clients.admin.ExpireDelegationTokenResult
+
+
Returns a future which yields expiry timestamp
+
+
expiryTimestamp() - Method in class org.apache.kafka.clients.admin.RenewDelegationTokenResult
+
+
Returns a future which yields expiry timestamp
+
+
expiryTimestamp() - Method in class org.apache.kafka.common.security.token.delegation.TokenInformation
+
 
+
exportMetrics(AuthorizableRequestContext, ClientTelemetryPayload) - Method in interface org.apache.kafka.server.telemetry.ClientTelemetryReceiver
+
+
Called by the broker when a client reports telemetry metrics.
+
+
extensions() - Method in class org.apache.kafka.common.security.auth.SaslExtensionsCallback
+
+
Returns always non-null SaslExtensions consisting of the extension + names and values that are sent by the client to the server in the initial + client SASL authentication message.
+
+
extensions() - Method in class org.apache.kafka.common.security.scram.ScramExtensionsCallback
+
+
Returns map of the extension names and values that are sent by the client to + the server in the initial client SCRAM authentication message.
+
+
extensions(Map<String, String>) - Method in class org.apache.kafka.common.security.scram.ScramExtensionsCallback
+
+
Sets the SCRAM extensions on this callback.
+
+
extensions(SaslExtensions) - Method in class org.apache.kafka.common.security.auth.SaslExtensionsCallback
+
+
Sets the SASL extensions on this callback.
+
+
extract(K, V, RecordContext) - Method in interface org.apache.kafka.streams.processor.TopicNameExtractor
+
+
Extracts the topic name to send to.
+
+
extract(ConsumerRecord<Object, Object>, long) - Method in class org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp
+
+
Extracts the embedded metadata timestamp from the given ConsumerRecord.
+
+
extract(ConsumerRecord<Object, Object>, long) - Method in interface org.apache.kafka.streams.processor.TimestampExtractor
+
+
Extracts a timestamp from a record.
+
+
extract(ConsumerRecord<Object, Object>, long) - Method in class org.apache.kafka.streams.processor.WallclockTimestampExtractor
+
+
Return the current wall clock time as timestamp.
+
+
+

F

+
+
FAIL - Enum constant in enum class org.apache.kafka.streams.errors.DeserializationExceptionHandler.DeserializationHandlerResponse
+
+
Fail processing.
+
+
FAIL - Enum constant in enum class org.apache.kafka.streams.errors.ProcessingExceptionHandler.ProcessingHandlerResponse
+
+
Fail processing.
+
+
FAIL - Enum constant in enum class org.apache.kafka.streams.errors.ProductionExceptionHandler.ProductionExceptionHandlerResponse
+
+
Fail processing.
+
+
FAILED_BUILD_REMOTE_LOG_AUX_STATE_PER_SEC_METRIC - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
FAILED_REMOTE_COPY_PER_SEC_METRIC - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
FAILED_REMOTE_DELETE_PER_SEC_METRIC - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
FAILED_REMOTE_FETCH_PER_SEC_METRIC - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
FailOnInvalidTimestamp - Class in org.apache.kafka.streams.processor
+
+
Retrieves embedded metadata timestamps from Kafka messages.
+
+
FailOnInvalidTimestamp() - Constructor for class org.apache.kafka.streams.processor.FailOnInvalidTimestamp
+
 
+
FailureReason - Enum Class in org.apache.kafka.streams.query
+
+
This enumeration type captures the various top-level reasons that a particular + partition of a store would fail to execute a query.
+
+
FATAL_LOG_LEVEL - Static variable in class org.apache.kafka.common.config.LogLevelConfig
+
+
The FATAL level designates a very severe error + that will lead the Kafka broker to abort.
+
+
featureMetadata() - Method in class org.apache.kafka.clients.admin.DescribeFeaturesResult
+
 
+
FeatureMetadata - Class in org.apache.kafka.clients.admin
+
+
Encapsulates details about finalized as well as supported features.
+
+
FeatureUpdate - Class in org.apache.kafka.clients.admin
+
+
Encapsulates details about an update to a finalized feature.
+
+
FeatureUpdate(short, FeatureUpdate.UpgradeType) - Constructor for class org.apache.kafka.clients.admin.FeatureUpdate
+
 
+
FeatureUpdate.UpgradeType - Enum Class in org.apache.kafka.clients.admin
+
 
+
FeatureUpdateFailedException - Exception in org.apache.kafka.common.errors
+
 
+
FeatureUpdateFailedException(String) - Constructor for exception org.apache.kafka.common.errors.FeatureUpdateFailedException
+
 
+
FeatureUpdateFailedException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.FeatureUpdateFailedException
+
 
+
FencedInstanceIdException - Exception in org.apache.kafka.common.errors
+
 
+
FencedInstanceIdException(String) - Constructor for exception org.apache.kafka.common.errors.FencedInstanceIdException
+
 
+
FencedInstanceIdException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.FencedInstanceIdException
+
 
+
FencedLeaderEpochException - Exception in org.apache.kafka.common.errors
+
+
The request contained a leader epoch which is smaller than that on the broker that received the + request.
+
+
FencedLeaderEpochException(String) - Constructor for exception org.apache.kafka.common.errors.FencedLeaderEpochException
+
 
+
FencedLeaderEpochException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.FencedLeaderEpochException
+
 
+
FencedMemberEpochException - Exception in org.apache.kafka.common.errors
+
 
+
FencedMemberEpochException(String) - Constructor for exception org.apache.kafka.common.errors.FencedMemberEpochException
+
 
+
fencedProducers() - Method in class org.apache.kafka.clients.admin.FenceProducersResult
+
+
Return a map from transactional ID to futures which can be used to check the status of + individual fencings.
+
+
FencedStateEpochException - Exception in org.apache.kafka.common.errors
+
+
Thrown when the share coordinator rejected the request because the share-group state epoch did not match.
+
+
FencedStateEpochException(String) - Constructor for exception org.apache.kafka.common.errors.FencedStateEpochException
+
 
+
fenceProducer() - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
fenceProducers(Collection<String>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Fence out all active producers that use any of the provided transactional IDs, with the default options.
+
+
fenceProducers(Collection<String>, FenceProducersOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Fence out all active producers that use any of the provided transactional IDs.
+
+
fenceProducers(Collection<String>, FenceProducersOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
fenceProducers(Collection<String>, FenceProducersOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
FenceProducersOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
FenceProducersOptions() - Constructor for class org.apache.kafka.clients.admin.FenceProducersOptions
+
 
+
FenceProducersResult - Class in org.apache.kafka.clients.admin
+
+
The result of the Admin.fenceProducers(Collection) call.
+
+
fetch(K) - Method in interface org.apache.kafka.streams.state.ReadOnlySessionStore
+
+
Retrieve all aggregated sessions for the provided key.
+
+
fetch(K, long) - Method in interface org.apache.kafka.streams.state.ReadOnlyWindowStore
+
+
Get the value of key from a window.
+
+
fetch(K, long, long) - Method in interface org.apache.kafka.streams.state.WindowStore
+
+
Get all the key-value pairs with the given key and the time range from all the existing windows.
+
+
fetch(K, Instant, Instant) - Method in interface org.apache.kafka.streams.state.ReadOnlyWindowStore
+
+
Get all the key-value pairs with the given key and the time range from all the existing windows.
+
+
fetch(K, Instant, Instant) - Method in interface org.apache.kafka.streams.state.WindowStore
+
 
+
fetch(K, K) - Method in interface org.apache.kafka.streams.state.ReadOnlySessionStore
+
+
Retrieve all aggregated sessions for the given range of keys.
+
+
fetch(K, K, long, long) - Method in interface org.apache.kafka.streams.state.WindowStore
+
+
Get all the key-value pairs in the given key range and time range from all the existing windows.
+
+
fetch(K, K, Instant, Instant) - Method in interface org.apache.kafka.streams.state.ReadOnlyWindowStore
+
+
Get all the key-value pairs in the given key range and time range from all the existing windows.
+
+
fetch(K, K, Instant, Instant) - Method in interface org.apache.kafka.streams.state.WindowStore
+
 
+
FETCH - Enum constant in enum class org.apache.kafka.server.quota.ClientQuotaType
+
 
+
FETCH_MAX_BYTES_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
fetch.max.bytes
+
+
FETCH_MAX_WAIT_MS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
fetch.max.wait.ms
+
+
FETCH_MIN_BYTES_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
fetch.min.bytes
+
+
fetchAll(long, long) - Method in interface org.apache.kafka.streams.state.WindowStore
+
+
Gets all the key-value pairs that belong to the windows within in the given time range.
+
+
fetchAll(Instant, Instant) - Method in interface org.apache.kafka.streams.state.ReadOnlyWindowStore
+
+
Gets all the key-value pairs that belong to the windows within in the given time range.
+
+
fetchAll(Instant, Instant) - Method in interface org.apache.kafka.streams.state.WindowStore
+
 
+
fetchIndex(RemoteLogSegmentMetadata, RemoteStorageManager.IndexType) - Method in interface org.apache.kafka.server.log.remote.storage.RemoteStorageManager
+
+
Returns the index for the respective log segment of RemoteLogSegmentMetadata.
+
+
fetchLogSegment(RemoteLogSegmentMetadata, int) - Method in interface org.apache.kafka.server.log.remote.storage.RemoteStorageManager
+
+
Returns the remote log segment data file/object as InputStream for the given RemoteLogSegmentMetadata + starting from the given startPosition.
+
+
fetchLogSegment(RemoteLogSegmentMetadata, int, int) - Method in interface org.apache.kafka.server.log.remote.storage.RemoteStorageManager
+
+
Returns the remote log segment data file/object as InputStream for the given RemoteLogSegmentMetadata + starting from the given startPosition.
+
+
fetchSession(K, long, long) - Method in interface org.apache.kafka.streams.state.ReadOnlySessionStore
+
+
Get the value of key from a single session.
+
+
fetchSession(K, Instant, Instant) - Method in interface org.apache.kafka.streams.state.ReadOnlySessionStore
+
+
Get the value of key from a single session.
+
+
fetchSession(K, Instant, Instant) - Method in interface org.apache.kafka.streams.state.SessionStore
+
 
+
FetchSessionIdNotFoundException - Exception in org.apache.kafka.common.errors
+
 
+
FetchSessionIdNotFoundException() - Constructor for exception org.apache.kafka.common.errors.FetchSessionIdNotFoundException
+
 
+
FetchSessionIdNotFoundException(String) - Constructor for exception org.apache.kafka.common.errors.FetchSessionIdNotFoundException
+
 
+
FetchSessionTopicIdException - Exception in org.apache.kafka.common.errors
+
 
+
FetchSessionTopicIdException(String) - Constructor for exception org.apache.kafka.common.errors.FetchSessionTopicIdException
+
 
+
field(String) - Method in class org.apache.kafka.connect.data.ConnectSchema
+
 
+
field(String) - Method in interface org.apache.kafka.connect.data.Schema
+
+
Get a Field for this Schema by name.
+
+
field(String) - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
field(String, Schema) - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
+
Add a field to this Schema.Type.STRUCT schema.
+
+
Field - Class in org.apache.kafka.connect.data
+
+
+ A field in a Struct, consisting of a field name, index, and Schema for the field value.
+
+
Field(String, int, Schema) - Constructor for class org.apache.kafka.connect.data.Field
+
 
+
fields() - Method in class org.apache.kafka.connect.data.ConnectSchema
+
 
+
fields() - Method in interface org.apache.kafka.connect.data.Schema
+
+
Get the list of Fields for this Schema.
+
+
fields() - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
+
Get the list of fields for this Schema.
+
+
FILE_DELETE_DELAY_MS_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
FILE_DELETE_DELAY_MS_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
FileConfigProvider - Class in org.apache.kafka.common.config.provider
+
+
An implementation of ConfigProvider that represents a Properties file.
+
+
FileConfigProvider() - Constructor for class org.apache.kafka.common.config.provider.FileConfigProvider
+
 
+
FileJwtRetriever - Class in org.apache.kafka.common.security.oauthbearer
+
+
FileJwtRetriever is an JwtRetriever that will load the contents + of a file, interpreting them as a JWT access key in the serialized form.
+
+
FileJwtRetriever() - Constructor for class org.apache.kafka.common.security.oauthbearer.FileJwtRetriever
+
 
+
fillInStackTrace() - Method in exception org.apache.kafka.common.errors.ApiException
+
 
+
fillInStackTrace() - Method in exception org.apache.kafka.common.metrics.QuotaViolationException
+
 
+
filter(Predicate<? super K, ? super V>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Create a new KStream that consists of all records of this stream which satisfy the given predicate.
+
+
filter(Predicate<? super K, ? super V>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Create a new KTable that consists of all records of this KTable which satisfy the given + predicate, with default serializers, deserializers, and state store.
+
+
filter(Predicate<? super K, ? super V>, Materialized<K, V, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Create a new KTable that consists of all records of this KTable which satisfy the given + predicate, with the key serde, value serde, and the underlying + materialized state storage configured in the Materialized instance.
+
+
filter(Predicate<? super K, ? super V>, Named) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
filter(Predicate<? super K, ? super V>, Named) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Create a new KTable that consists of all records of this KTable which satisfy the given + predicate, with default serializers, deserializers, and state store.
+
+
filter(Predicate<? super K, ? super V>, Named, Materialized<K, V, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Create a new KTable that consists of all records of this KTable which satisfy the given + predicate, with the key serde, value serde, and the underlying + materialized state storage configured in the Materialized instance.
+
+
filteredDuration() - Method in class org.apache.kafka.clients.admin.ListTransactionsOptions
+
+
Returns the duration ms value being filtered.
+
+
filteredProducerIds() - Method in class org.apache.kafka.clients.admin.ListTransactionsOptions
+
+
Returns the set of producerIds that are being filtered or empty if none have been specified.
+
+
filteredStates() - Method in class org.apache.kafka.clients.admin.ListTransactionsOptions
+
+
Returns the set of states to be filtered or empty if no states have been specified.
+
+
filteredTransactionalIdPattern() - Method in class org.apache.kafka.clients.admin.ListTransactionsOptions
+
+
Returns transactional ID being filtered.
+
+
filterNot(Predicate<? super K, ? super V>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Create a new KStream that consists all records of this stream which do not satisfy the given + predicate.
+
+
filterNot(Predicate<? super K, ? super V>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Create a new KTable that consists all records of this KTable which do not satisfy the + given predicate, with default serializers, deserializers, and state store.
+
+
filterNot(Predicate<? super K, ? super V>, Materialized<K, V, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Create a new KTable that consists all records of this KTable which do not satisfy the + given predicate, with the key serde, value serde, and the underlying + materialized state storage configured in the Materialized instance.
+
+
filterNot(Predicate<? super K, ? super V>, Named) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
filterNot(Predicate<? super K, ? super V>, Named) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Create a new KTable that consists all records of this KTable which do not satisfy the + given predicate, with default serializers, deserializers, and state store.
+
+
filterNot(Predicate<? super K, ? super V>, Named, Materialized<K, V, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Create a new KTable that consists all records of this KTable which do not satisfy the + given predicate, with the key serde, value serde, and the underlying + materialized state storage configured in the Materialized instance.
+
+
filterOnDuration(long) - Method in class org.apache.kafka.clients.admin.ListTransactionsOptions
+
+
Filter only the transactions that are running longer than the specified duration.
+
+
filterOnTransactionalIdPattern(String) - Method in class org.apache.kafka.clients.admin.ListTransactionsOptions
+
+
Filter only the transactions that match with the given transactional ID pattern.
+
+
filterProducerIds(Collection<Long>) - Method in class org.apache.kafka.clients.admin.ListTransactionsOptions
+
+
Filter only the transactions from producers in a specific set of producerIds.
+
+
filterStates(Collection<TransactionState>) - Method in class org.apache.kafka.clients.admin.ListTransactionsOptions
+
+
Filter only the transactions that are in a specific set of states.
+
+
finalizedFeatures() - Method in class org.apache.kafka.clients.admin.FeatureMetadata
+
+
Returns a map of finalized feature versions.
+
+
finalizedFeaturesEpoch() - Method in class org.apache.kafka.clients.admin.FeatureMetadata
+
+
The epoch for the finalized features.
+
+
FinalizedVersionRange - Class in org.apache.kafka.clients.admin
+
+
Represents a range of version levels supported by every broker in a cluster for some feature.
+
+
FinalizedVersionRange(short, short) - Constructor for class org.apache.kafka.clients.admin.FinalizedVersionRange
+
+
Raises an exception unless the following condition is met: + minVersionLevel >= 1 and maxVersionLevel >= 1 and maxVersionLevel >= minVersionLevel.
+
+
findIndefiniteField() - Method in class org.apache.kafka.common.acl.AccessControlEntryFilter
+
+
Returns a string describing an ANY or UNKNOWN field, or null if there is + no such field.
+
+
findIndefiniteField() - Method in class org.apache.kafka.common.acl.AclBindingFilter
+
+
Return a string describing an ANY or UNKNOWN field, or null if there is no such field.
+
+
findIndefiniteField() - Method in class org.apache.kafka.common.resource.ResourcePatternFilter
+
 
+
findSessions(long, long) - Method in interface org.apache.kafka.streams.state.SessionStore
+
+
Return all the session window entries that ends between the specified range (both ends are inclusive).
+
+
findSessions(K, long, long) - Method in interface org.apache.kafka.streams.state.ReadOnlySessionStore
+
+
Fetch any sessions with the matching key and the sessions end is ≥ earliestSessionEndTime + and the sessions start is ≤ latestSessionStartTime iterating from earliest to latest.
+
+
findSessions(K, Instant, Instant) - Method in interface org.apache.kafka.streams.state.ReadOnlySessionStore
+
+
Fetch any sessions with the matching key and the sessions end is ≥ earliestSessionEndTime + and the sessions start is ≤ latestSessionStartTime iterating from earliest to latest.
+
+
findSessions(K, Instant, Instant) - Method in interface org.apache.kafka.streams.state.SessionStore
+
 
+
findSessions(K, K, long, long) - Method in interface org.apache.kafka.streams.state.ReadOnlySessionStore
+
+
Fetch any sessions in the given range of keys and the sessions end is ≥ + earliestSessionEndTime and the sessions start is ≤ latestSessionStartTime iterating from + earliest to latest.
+
+
findSessions(K, K, Instant, Instant) - Method in interface org.apache.kafka.streams.state.ReadOnlySessionStore
+
+
Fetch any sessions in the given range of keys and the sessions end is ≥ + earliestSessionEndTime and the sessions start is ≤ latestSessionStartTime iterating from + earliest to latest.
+
+
findSessions(K, K, Instant, Instant) - Method in interface org.apache.kafka.streams.state.SessionStore
+
 
+
FixedKeyProcessor<KIn,VIn,VOut> - Interface in org.apache.kafka.streams.processor.api
+
+
A processor of key-value pair records where keys are immutable.
+
+
FixedKeyProcessorContext<KForward,VForward> - Interface in org.apache.kafka.streams.processor.api
+
+
Processor context interface for FixedKeyRecord.
+
+
FixedKeyProcessorSupplier<KIn,VIn,VOut> - Interface in org.apache.kafka.streams.processor.api
+
+
A processor supplier that can create one or more FixedKeyProcessor instances.
+
+
FixedKeyRecord<K,V> - Class in org.apache.kafka.streams.processor.api
+
+
A data class representing an incoming record with fixed key for processing in a FixedKeyProcessor + or a record to forward to downstream processors via FixedKeyProcessorContext.
+
+
flatMap(KeyValueMapper<? super K, ? super V, ? extends Iterable<? extends KeyValue<? extends KOut, ? extends VOut>>>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Create a new KStream that consists of zero or more records for each record in this stream.
+
+
flatMap(KeyValueMapper<? super K, ? super V, ? extends Iterable<? extends KeyValue<? extends KR, ? extends VOut>>>, Named) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
flatMapValues(ValueMapper<? super V, ? extends Iterable<? extends VOut>>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Create a new KStream that consists of zero or more records with modified value for each record + in this stream.
+
+
flatMapValues(ValueMapper<? super V, ? extends Iterable<? extends VOut>>, Named) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
flatMapValues(ValueMapperWithKey<? super K, ? super V, ? extends Iterable<? extends VOut>>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
flatMapValues(ValueMapperWithKey<? super K, ? super V, ? extends Iterable<? extends VOut>>, Named) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
Float() - Static method in class org.apache.kafka.common.serialization.Serdes
+
+
A serde for nullable Float type.
+
+
float32() - Static method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
FLOAT32 - Enum constant in enum class org.apache.kafka.connect.data.Schema.Type
+
+
32-bit IEEE 754 floating point number
+
+
FLOAT32_SCHEMA - Static variable in interface org.apache.kafka.connect.data.Schema
+
 
+
float64() - Static method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
FLOAT64 - Enum constant in enum class org.apache.kafka.connect.data.Schema.Type
+
+
64-bit IEEE 754 floating point number
+
+
FLOAT64_SCHEMA - Static variable in interface org.apache.kafka.connect.data.Schema
+
 
+
FloatDeserializer - Class in org.apache.kafka.common.serialization
+
 
+
FloatDeserializer() - Constructor for class org.apache.kafka.common.serialization.FloatDeserializer
+
 
+
FloatSerde() - Constructor for class org.apache.kafka.common.serialization.Serdes.FloatSerde
+
 
+
FloatSerializer - Class in org.apache.kafka.common.serialization
+
 
+
FloatSerializer() - Constructor for class org.apache.kafka.common.serialization.FloatSerializer
+
 
+
flush() - Method in class org.apache.kafka.clients.producer.KafkaProducer
+
+
Invoking this method makes all buffered records immediately available to send (even if linger.ms is + greater than 0) and blocks on the completion of the requests associated with these records.
+
+
flush() - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
flush() - Method in interface org.apache.kafka.clients.producer.Producer
+
+ +
+
flush() - Method in interface org.apache.kafka.streams.processor.StateStore
+
+
Flush any cached data
+
+
flush(Map<TopicPartition, OffsetAndMetadata>) - Method in class org.apache.kafka.connect.sink.SinkTask
+
+
Flush all records that have been SinkTask.put(Collection) for the specified topic-partitions.
+
+
flush(Map<TopicPartition, OffsetAndMetadata>) - Method in class org.apache.kafka.connect.tools.VerifiableSinkTask
+
 
+
FLUSH_MESSAGES_INTERVAL_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
FLUSH_MESSAGES_INTERVAL_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
FLUSH_MS_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
FLUSH_MS_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
flushed() - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
flushException - Variable in class org.apache.kafka.clients.producer.MockProducer
+
 
+
followupRebalanceDeadline() - Method in class org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment
+
 
+
forBooleanValues(MetricName, MetricName) - Static method in class org.apache.kafka.common.metrics.stats.Frequencies
+
+
Create a Frequencies instance with metrics for the frequency of a boolean sensor that records 0.0 for + false and 1.0 for true.
+
+
forceTerminateTransaction(String) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Force terminate a transaction for the given transactional ID with the default options.
+
+
forceTerminateTransaction(String, TerminateTransactionOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Force terminate a transaction for the given transactional ID.
+
+
forceTerminateTransaction(String, TerminateTransactionOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
forceTerminateTransaction(String, TerminateTransactionOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
+
Forcefully terminates an ongoing transaction for a given transactional ID.
+
+
forChangelog(boolean) - Method in class org.apache.kafka.streams.kstream.WindowedSerdes.TimeWindowedSerde
+
 
+
forConfig(String) - Static method in enum class org.apache.kafka.common.config.SslClientAuth
+
 
+
forConsumerGroups() - Static method in class org.apache.kafka.clients.admin.ListGroupsOptions
+
+
Only consumer groups will be returned by listGroups().
+
+
foreach(ForeachAction<? super K, ? super V>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Perform an action on each record of this KStream.
+
+
foreach(ForeachAction<? super K, ? super V>, Named) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
ForeachAction<K,V> - Interface in org.apache.kafka.streams.kstream
+
+
The ForeachAction interface for performing an action on a key-value + pair.
+
+
ForeachProcessor<K,V> - Class in org.apache.kafka.streams.kstream
+
+
Deprecated. +
Since 4.0 and should not be used any longer.
+
+
+
ForeachProcessor(ForeachAction<K, V>) - Constructor for class org.apache.kafka.streams.kstream.ForeachProcessor
+
+
Deprecated.
+
forFailure(FailureReason, String) - Static method in interface org.apache.kafka.streams.query.QueryResult
+
+
Static factory method to create a result object for a failed query.
+
+
forGroupId(String) - Static method in exception org.apache.kafka.common.errors.GroupAuthorizationException
+
 
+
forId(byte) - Static method in enum class org.apache.kafka.clients.admin.AlterConfigOp.OpType
+
 
+
forId(byte) - Static method in enum class org.apache.kafka.clients.consumer.AcknowledgeType
+
 
+
forId(byte) - Static method in enum class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.RebalanceProtocol
+
 
+
forId(byte) - Static method in enum class org.apache.kafka.common.config.ConfigResource.Type
+
 
+
forId(byte) - Static method in enum class org.apache.kafka.common.IsolationLevel
+
 
+
forId(byte) - Static method in enum class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState
+
 
+
forId(byte) - Static method in enum class org.apache.kafka.server.log.remote.storage.RemotePartitionDeleteState
+
 
+
forId(int) - Static method in enum class org.apache.kafka.common.metrics.Sensor.RecordingLevel
+
 
+
forId(short) - Static method in enum class org.apache.kafka.common.security.auth.SecurityProtocol
+
 
+
formatRemoteTopic(String, String) - Method in class org.apache.kafka.connect.mirror.DefaultReplicationPolicy
+
 
+
formatRemoteTopic(String, String) - Method in class org.apache.kafka.connect.mirror.IdentityReplicationPolicy
+
+
Unlike DefaultReplicationPolicy, IdentityReplicationPolicy does not include the source + cluster alias in the remote topic name.
+
+
formatRemoteTopic(String, String) - Method in interface org.apache.kafka.connect.mirror.ReplicationPolicy
+
+
Returns the remote topic name for the given topic and source cluster alias.
+
+
forName(String) - Static method in enum class org.apache.kafka.common.metrics.Sensor.RecordingLevel
+
+
Case insensitive lookup by protocol name
+
+
forName(String) - Static method in enum class org.apache.kafka.common.security.auth.SecurityProtocol
+
+
Case insensitive lookup by protocol name
+
+
forResult(R) - Static method in interface org.apache.kafka.streams.query.QueryResult
+
+
Static factory method to create a result object for a successful query.
+
+
forShareGroups() - Static method in class org.apache.kafka.clients.admin.ListGroupsOptions
+
+
Only share groups will be returned by listGroups().
+
+
forStatefulTasks() - Method in class org.apache.kafka.streams.processor.assignment.TaskAssignmentUtils.RackAwareOptimizationParams
+
+
Return a new config object with the tasksToOptimize set to all stateful tasks in the given ApplicationState
+
+
forStatelessTasks() - Method in class org.apache.kafka.streams.processor.assignment.TaskAssignmentUtils.RackAwareOptimizationParams
+
+
Return a new config object with the tasksToOptimize set to all stateless tasks in the given ApplicationState
+
+
forStreamsGroups() - Static method in class org.apache.kafka.clients.admin.ListGroupsOptions
+
+
Only streams groups will be returned by listGroups().
+
+
forTasks(SortedSet<TaskId>) - Method in class org.apache.kafka.streams.processor.assignment.TaskAssignmentUtils.RackAwareOptimizationParams
+
+
Return a new config object with the provided tasksToOptimize
+
+
forTimestamp(long) - Static method in class org.apache.kafka.clients.admin.OffsetSpec
+
+
Used to retrieve the earliest offset whose timestamp is greater than + or equal to the given timestamp in the corresponding partition
+
+
forType(EmitStrategy.StrategyType) - Static method in enum class org.apache.kafka.streams.kstream.EmitStrategy.StrategyType
+
 
+
forUnknownQueryType(Query<R>, StateStore) - Static method in interface org.apache.kafka.streams.query.QueryResult
+
+
Static factory method to create a failed query result object to indicate that the store does + not know how to handle the query.
+
+
forward(K, V) - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
forward(K, V) - Method in interface org.apache.kafka.streams.processor.ProcessorContext
+
+
Forward a key/value pair to all downstream processors.
+
+
forward(K, V, To) - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
forward(K, V, To) - Method in interface org.apache.kafka.streams.processor.ProcessorContext
+
+
Forward a key/value pair to the specified downstream processors.
+
+
forward(FixedKeyRecord<K, V>) - Method in interface org.apache.kafka.streams.processor.api.FixedKeyProcessorContext
+
+
Forward a record to all child processors.
+
+
forward(FixedKeyRecord<K, V>, String) - Method in interface org.apache.kafka.streams.processor.api.FixedKeyProcessorContext
+
+
Forward a record to the specified child processor.
+
+
forward(Record<K, V>) - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
 
+
forward(Record<K, V>) - Method in interface org.apache.kafka.streams.processor.api.ProcessorContext
+
+
Forward a record to all child processors.
+
+
forward(Record<K, V>, String) - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
 
+
forward(Record<K, V>, String) - Method in interface org.apache.kafka.streams.processor.api.ProcessorContext
+
+
Forward a record to the specified child processor.
+
+
forwarded() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
+
Get all the forwarded data this context has observed.
+
+
forwarded() - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
Get all the forwarded data this context has observed.
+
+
forwarded(String) - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
+
Get all the forwarded data this context has observed for a specific child by name.
+
+
forwarded(String) - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
Get all the forwarded data this context has observed for a specific child by name.
+
+
FORWARDING_ADMIN_CLASS - Static variable in class org.apache.kafka.connect.mirror.MirrorClientConfig
+
 
+
FORWARDING_ADMIN_CLASS_DEFAULT - Static variable in class org.apache.kafka.connect.mirror.MirrorClientConfig
+
 
+
FORWARDING_ADMIN_CLASS_DOC - Static variable in class org.apache.kafka.connect.mirror.MirrorClientConfig
+
 
+
ForwardingAdmin - Class in org.apache.kafka.clients.admin
+
+
ForwardingAdmin is the default value of forwarding.admin.class in MirrorMaker.
+
+
ForwardingAdmin(Map<String, Object>) - Constructor for class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
Frequencies - Class in org.apache.kafka.common.metrics.stats
+
+
A CompoundStat that represents a normalized distribution with a Frequency metric for each + bucketed value.
+
+
Frequencies(int, double, double, Frequency...) - Constructor for class org.apache.kafka.common.metrics.stats.Frequencies
+
+
Create a Frequencies that captures the values in the specified range into the given number of buckets, + where the buckets are centered around the minimum, maximum, and intermediate values.
+
+
frequency(MetricConfig, long, double) - Method in class org.apache.kafka.common.metrics.stats.Frequencies
+
+
Return the computed frequency describing the number of occurrences of the values in the bucket for the given + center point, relative to the total number of occurrences in the samples.
+
+
Frequency - Class in org.apache.kafka.common.metrics.stats
+
+
Definition of a frequency metric used in a Frequencies compound statistic.
+
+
Frequency(MetricName, double) - Constructor for class org.apache.kafka.common.metrics.stats.Frequency
+
+
Create an instance with the given name and center point value.
+
+
fromBin(int) - Method in interface org.apache.kafka.common.metrics.stats.Histogram.BinScheme
+
+
Determine the value at the upper range of the specified bin.
+
+
fromBin(int) - Method in class org.apache.kafka.common.metrics.stats.Histogram.ConstantBinScheme
+
 
+
fromBin(int) - Method in class org.apache.kafka.common.metrics.stats.Histogram.LinearBinScheme
+
 
+
fromBytes(byte[]) - Method in interface org.apache.kafka.tools.api.Decoder
+
 
+
fromBytes(byte[]) - Method in class org.apache.kafka.tools.api.DefaultDecoder
+
 
+
fromBytes(byte[]) - Method in class org.apache.kafka.tools.api.IntegerDecoder
+
 
+
fromBytes(byte[]) - Method in class org.apache.kafka.tools.api.LongDecoder
+
 
+
fromBytes(byte[]) - Method in class org.apache.kafka.tools.api.StringDecoder
+
 
+
fromCode(byte) - Static method in enum class org.apache.kafka.common.acl.AclOperation
+
+
Return the AclOperation with the provided code or `AclOperation.UNKNOWN` if one cannot be found.
+
+
fromCode(byte) - Static method in enum class org.apache.kafka.common.acl.AclPermissionType
+
+
Return the AclPermissionType with the provided code or `AclPermissionType.UNKNOWN` if one cannot be found.
+
+
fromCode(byte) - Static method in enum class org.apache.kafka.common.resource.PatternType
+
+
Return the PatternType with the provided code or PatternType.UNKNOWN if one cannot be found.
+
+
fromCode(byte) - Static method in enum class org.apache.kafka.common.resource.ResourceType
+
+
Return the ResourceType with the provided code or `ResourceType.UNKNOWN` if one cannot be found.
+
+
fromCode(int) - Static method in enum class org.apache.kafka.clients.admin.FeatureUpdate.UpgradeType
+
 
+
fromConnectData(String, Headers, Schema, Object) - Method in interface org.apache.kafka.connect.storage.Converter
+
+
Convert a Kafka Connect data object to a native object for serialization, + potentially using the supplied topic and headers in the record as necessary.
+
+
fromConnectData(String, Schema, Object) - Method in interface org.apache.kafka.connect.storage.Converter
+
+
Convert a Kafka Connect data object to a native object for serialization.
+
+
fromConnectData(String, Schema, Object) - Method in class org.apache.kafka.connect.storage.StringConverter
+
 
+
fromConnectHeader(String, String, Schema, Object) - Method in interface org.apache.kafka.connect.storage.HeaderConverter
+
+
Convert the Header's value into its byte array representation.
+
+
fromConnectHeader(String, String, Schema, Object) - Method in class org.apache.kafka.connect.storage.SimpleHeaderConverter
+
 
+
fromConnectHeader(String, String, Schema, Object) - Method in class org.apache.kafka.connect.storage.StringConverter
+
 
+
fromId(byte) - Static method in enum class org.apache.kafka.clients.admin.EndpointType
+
 
+
fromLogical(Schema, BigDecimal) - Static method in class org.apache.kafka.connect.data.Decimal
+
+
Convert a value from its logical format (BigDecimal) to its encoded format (byte[]).
+
+
fromLogical(Schema, Date) - Static method in class org.apache.kafka.connect.data.Date
+
+
Convert a value from its logical format (Date) to its encoded format (int).
+
+
fromLogical(Schema, Date) - Static method in class org.apache.kafka.connect.data.Time
+
+
Convert a value from its logical format (Date) to its encoded format (int).
+
+
fromLogical(Schema, Date) - Static method in class org.apache.kafka.connect.data.Timestamp
+
+
Convert a value from its logical format (Date) to its encoded format (long).
+
+
fromMap(Map<String, ? extends Map<Integer, Long>>) - Static method in class org.apache.kafka.streams.query.Position
+
+
Create a new Position and populate it with a mapping of topic -> partition -> offset.
+
+
fromMechanismName(String) - Static method in enum class org.apache.kafka.clients.admin.ScramMechanism
+
 
+
fromNameAndType(String, QueryableStoreType<T>) - Static method in class org.apache.kafka.streams.StoreQueryParameters
+
 
+
fromProperty(String) - Static method in enum class org.apache.kafka.connect.source.SourceTask.TransactionBoundary
+
+
Parse a SourceTask.TransactionBoundary from the given string.
+
+
fromRecord(String, KafkaPrincipal, KafkaPrincipal, Collection<KafkaPrincipal>, long, long, long) - Static method in class org.apache.kafka.common.security.token.delegation.TokenInformation
+
 
+
fromString(String) - Static method in enum class org.apache.kafka.common.acl.AclOperation
+
+
Parse the given string as an ACL operation.
+
+
fromString(String) - Static method in enum class org.apache.kafka.common.acl.AclPermissionType
+
+
Parse the given string as an ACL permission.
+
+
fromString(String) - Static method in enum class org.apache.kafka.common.resource.PatternType
+
+
Return the PatternType with the provided name or PatternType.UNKNOWN if one cannot be found.
+
+
fromString(String) - Static method in enum class org.apache.kafka.common.resource.ResourceType
+
+
Parse the given string as an ACL resource type.
+
+
fromString(String) - Static method in class org.apache.kafka.common.Uuid
+
+
Creates a UUID based on a base64 string encoding used in the toString() method.
+
+
fromTime() - Method in class org.apache.kafka.streams.query.MultiVersionedKeyQuery
+
+
The starting time point of the query, if specified
+
+
fromTime(Instant) - Method in class org.apache.kafka.streams.query.MultiVersionedKeyQuery
+
+
Specifies the starting time point for the key query.
+
+
fromType(byte) - Static method in enum class org.apache.kafka.clients.admin.ScramMechanism
+
 
+
+

G

+
+
Gauge<T> - Interface in org.apache.kafka.common.metrics
+
+
A gauge metric is an instantaneous reading of a particular value.
+
+
generateNew(TopicIdPartition) - Static method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId
+
+
Creates a new RemoteLogSegmentId for the provided TopicIdPartition with a random Uuid.
+
+
generationId() - Method in class org.apache.kafka.clients.consumer.ConsumerGroupMetadata
+
 
+
generationId() - Method in class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription
+
 
+
get() - Method in class org.apache.kafka.common.KafkaFuture
+
+
Waits if necessary for this future to complete, and then returns its result.
+
+
get() - Method in interface org.apache.kafka.common.security.oauthbearer.BrokerJwtValidator.ClaimSupplier
+
 
+
get() - Method in interface org.apache.kafka.streams.kstream.TransformerSupplier
+
+
Deprecated.
+
Return a newly constructed Transformer instance.
+
+
get() - Method in interface org.apache.kafka.streams.kstream.ValueTransformerSupplier
+
+
Deprecated.
+
Return a newly constructed ValueTransformer instance.
+
+
get() - Method in interface org.apache.kafka.streams.kstream.ValueTransformerWithKeySupplier
+
+
Return a newly constructed ValueTransformerWithKey instance.
+
+
get() - Method in interface org.apache.kafka.streams.processor.api.FixedKeyProcessorSupplier
+
+
Return a newly constructed FixedKeyProcessor instance.
+
+
get() - Method in interface org.apache.kafka.streams.processor.api.ProcessorSupplier
+
+
Return a newly constructed Processor instance.
+
+
get() - Method in interface org.apache.kafka.streams.state.StoreSupplier
+
+
Return a new StateStore instance.
+
+
get(long, TimeUnit) - Method in class org.apache.kafka.common.KafkaFuture
+
+
Waits if necessary for at most the given time for this future to complete, and then returns + its result, if available.
+
+
get(String) - Method in class org.apache.kafka.clients.admin.Config
+
+
Get the configuration entry with the provided name or null if there isn't one.
+
+
get(String) - Method in interface org.apache.kafka.common.config.provider.ConfigProvider
+
+
Retrieves the data at the given path.
+
+
get(String) - Method in class org.apache.kafka.common.config.provider.DirectoryConfigProvider
+
+
Retrieves the data contained in regular files in the directory given by path.
+
+
get(String) - Method in class org.apache.kafka.common.config.provider.EnvVarConfigProvider
+
 
+
get(String) - Method in class org.apache.kafka.common.config.provider.FileConfigProvider
+
+
Retrieves the data at the given Properties file.
+
+
get(String) - Method in class org.apache.kafka.connect.data.Struct
+
+
Get the value of a field, returning the default value if no value has been set yet and a default value is specified + in the field's schema.
+
+
get(String, Set<String>) - Method in interface org.apache.kafka.common.config.provider.ConfigProvider
+
+
Retrieves the data with the given keys at the given path.
+
+
get(String, Set<String>) - Method in class org.apache.kafka.common.config.provider.DirectoryConfigProvider
+
+
Retrieves the data contained in the regular files named by keys in the directory given by path.
+
+
get(String, Set<String>) - Method in class org.apache.kafka.common.config.provider.EnvVarConfigProvider
+
 
+
get(String, Set<String>) - Method in class org.apache.kafka.common.config.provider.FileConfigProvider
+
+
Retrieves the data with the given keys at the given Properties file.
+
+
get(K) - Method in interface org.apache.kafka.streams.state.ReadOnlyKeyValueStore
+
+
Get the value corresponding to this key.
+
+
get(K) - Method in interface org.apache.kafka.streams.state.VersionedKeyValueStore
+
+
Get the current (i.e., latest by timestamp) record associated with this key.
+
+
get(K, long) - Method in interface org.apache.kafka.streams.state.VersionedKeyValueStore
+
+
Get the record associated with this key as of the specified timestamp (i.e., + the existing record with the largest timestamp not exceeding the provided + timestamp bound).
+
+
get(Bytes, long) - Method in interface org.apache.kafka.streams.state.VersionedBytesStore
+
+ +
+
get(Field) - Method in class org.apache.kafka.connect.data.Struct
+
+
Get the value of a field, returning the default value if no value has been set yet and a default value is specified + in the field's schema.
+
+
getAdmin(Map<String, Object>) - Method in interface org.apache.kafka.streams.KafkaClientSupplier
+
+
Create an Admin which is used for internal topic management.
+
+
getAdminConfigs(String) - Method in class org.apache.kafka.streams.StreamsConfig
+
+
Get the configs for the admin client.
+
+
getAllStateStores() - Method in class org.apache.kafka.streams.TopologyTestDriver
+
+
Get all StateStores from the topology.
+
+
getArray(String) - Method in class org.apache.kafka.connect.data.Struct
+
+
Equivalent to calling Struct.get(String) and casting the result to a List.
+
+
getAssignorInstances(List<String>, Map<String, Object>) - Static method in interface org.apache.kafka.clients.consumer.ConsumerPartitionAssignor
+
+
Get a list of configured instances of ConsumerPartitionAssignor + based on the class names/types specified by ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG
+
+
getBoolean(String) - Method in class org.apache.kafka.common.config.AbstractConfig
+
 
+
getBoolean(String) - Method in class org.apache.kafka.connect.data.Struct
+
+
Equivalent to calling Struct.get(String) and casting the result to a Boolean.
+
+
getBoolean(Map<String, Object>, String, boolean) - Static method in class org.apache.kafka.streams.StreamsConfig.InternalConfig
+
 
+
getBytes(String) - Method in class org.apache.kafka.connect.data.Struct
+
+
Equivalent to calling Struct.get(String) and casting the result to a byte[].
+
+
getClass(String) - Method in class org.apache.kafka.common.config.AbstractConfig
+
 
+
getClientTags() - Method in class org.apache.kafka.streams.StreamsConfig
+
+
Get the configured client tags set with StreamsConfig.CLIENT_TAG_PREFIX prefix.
+
+
getConfiguredInstance(String, Class<T>) - Method in class org.apache.kafka.common.config.AbstractConfig
+
+
Get a configured instance of the give class specified by the given configuration key.
+
+
getConfiguredInstance(String, Class<T>, Map<String, Object>) - Method in class org.apache.kafka.common.config.AbstractConfig
+
+
Get a configured instance of the give class specified by the given configuration key.
+
+
getConfiguredInstances(String, Class<T>) - Method in class org.apache.kafka.common.config.AbstractConfig
+
+
Get a list of configured instances of the given class specified by the given configuration key.
+
+
getConfiguredInstances(String, Class<T>, Map<String, Object>) - Method in class org.apache.kafka.common.config.AbstractConfig
+
+
Get a list of configured instances of the given class specified by the given configuration key.
+
+
getConfiguredInstances(List<String>, Class<T>, Map<String, Object>) - Method in class org.apache.kafka.common.config.AbstractConfig
+
+
Get a list of configured instances of the given class specified by the given configuration key.
+
+
getConsumer(Map<String, Object>) - Method in interface org.apache.kafka.streams.KafkaClientSupplier
+
+
Create a Consumer which is used to read records of source topics.
+
+
getCurrentReplicaLogDir() - Method in class org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult.ReplicaLogDirInfo
+
+
The current log directory of the replica of this partition on the given broker.
+
+
getCurrentReplicaOffsetLag() - Method in class org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult.ReplicaLogDirInfo
+
+
Defined as max(HW of partition - LEO of the replica, 0).
+
+
getDouble(String) - Method in class org.apache.kafka.common.config.AbstractConfig
+
 
+
getExecutionInfo() - Method in interface org.apache.kafka.streams.query.QueryResult
+
+
If detailed execution information was requested in StateQueryRequest.enableExecutionInfo(), + this method returned the execution details for this partition's result.
+
+
getFailureMessage() - Method in interface org.apache.kafka.streams.query.QueryResult
+
+
If this partition failed to execute the query, returns the failure message.
+
+
getFailureReason() - Method in interface org.apache.kafka.streams.query.QueryResult
+
+
If this partition failed to execute the query, returns the reason.
+
+
getFloat32(String) - Method in class org.apache.kafka.connect.data.Struct
+
+
Equivalent to calling Struct.get(String) and casting the result to a Float.
+
+
getFloat64(String) - Method in class org.apache.kafka.connect.data.Struct
+
+
Equivalent to calling Struct.get(String) and casting the result to a Double.
+
+
getFutureReplicaLogDir() - Method in class org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult.ReplicaLogDirInfo
+
+
The future log directory of the replica of this partition on the given broker.
+
+
getFutureReplicaOffsetLag() - Method in class org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult.ReplicaLogDirInfo
+
+
The LEO of the replica - LEO of the future log of this replica in the destination log directory.
+
+
getGlobalConsumer(Map<String, Object>) - Method in interface org.apache.kafka.streams.KafkaClientSupplier
+
+
Create a Consumer which is used to consume records for GlobalKTable.
+
+
getGlobalConsumerConfigs(String) - Method in class org.apache.kafka.streams.StreamsConfig
+
+
Get the configs for the global consumer.
+
+
getGlobalResult() - Method in class org.apache.kafka.streams.query.StateQueryResult
+
+
The query's result for global store queries.
+
+
getHeaders() - Method in class org.apache.kafka.streams.test.TestRecord
+
 
+
getInnerSerializer() - Method in class org.apache.kafka.common.serialization.ListSerializer
+
 
+
getInt(String) - Method in class org.apache.kafka.common.config.AbstractConfig
+
 
+
getInt16(String) - Method in class org.apache.kafka.connect.data.Struct
+
+
Equivalent to calling Struct.get(String) and casting the result to a Short.
+
+
getInt32(String) - Method in class org.apache.kafka.connect.data.Struct
+
+
Equivalent to calling Struct.get(String) and casting the result to an Integer.
+
+
getInt64(String) - Method in class org.apache.kafka.connect.data.Struct
+
+
Equivalent to calling Struct.get(String) and casting the result to a Long.
+
+
getInt8(String) - Method in class org.apache.kafka.connect.data.Struct
+
+
Equivalent to calling Struct.get(String) and casting the result to a Byte.
+
+
getInterval() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext.CapturedPunctuator
+
 
+
getIntervalMs() - Method in class org.apache.kafka.streams.processor.MockProcessorContext.CapturedPunctuator
+
+
Deprecated.
+
getKafkaClientSupplier() - Method in class org.apache.kafka.streams.StreamsConfig
+
+
Return configured KafkaClientSupplier
+
+
getKey() - Method in class org.apache.kafka.streams.query.KeyQuery
+
+
Return the key that was specified for this query.
+
+
getKey() - Method in class org.apache.kafka.streams.query.WindowKeyQuery
+
 
+
getKey() - Method in class org.apache.kafka.streams.query.WindowRangeQuery
+
 
+
getKey() - Method in class org.apache.kafka.streams.test.TestRecord
+
 
+
getKeyValueStore(String) - Method in class org.apache.kafka.streams.TopologyTestDriver
+
+
Get the KeyValueStore or TimestampedKeyValueStore with the given name.
+
+
getLeastSignificantBits() - Method in class org.apache.kafka.common.Uuid
+
+
Returns the least significant bits of the UUID's 128 value.
+
+
getList(String) - Method in class org.apache.kafka.common.config.AbstractConfig
+
 
+
getLong(String) - Method in class org.apache.kafka.common.config.AbstractConfig
+
 
+
getLong(Map<String, Object>, String, long) - Static method in class org.apache.kafka.streams.StreamsConfig.InternalConfig
+
 
+
getLowerBound() - Method in class org.apache.kafka.streams.query.RangeQuery
+
+
The lower bound of the query, if specified.
+
+
getMainConsumerConfigs(String, String, int) - Method in class org.apache.kafka.streams.StreamsConfig
+
+
Get the configs to the main consumer.
+
+
getMap(String) - Method in class org.apache.kafka.connect.data.Struct
+
+
Equivalent to calling Struct.get(String) and casting the result to a Map.
+
+
getMostSignificantBits() - Method in class org.apache.kafka.common.Uuid
+
+
Returns the most significant bits of the UUID's 128 value.
+
+
getName() - Method in class org.apache.kafka.common.security.auth.KafkaPrincipal
+
 
+
getName() - Method in enum class org.apache.kafka.connect.data.Schema.Type
+
 
+
getName() - Method in enum class org.apache.kafka.connect.storage.ConverterType
+
 
+
getNow(T) - Method in class org.apache.kafka.common.KafkaFuture
+
+
Returns the result value (or throws any encountered exception) if completed, else returns + the given valueIfAbsent.
+
+
getOnlyPartitionResult() - Method in class org.apache.kafka.streams.query.StateQueryResult
+
+
For queries that are expected to match records in only one partition, returns the result.
+
+
getPartitionPositions(String) - Method in class org.apache.kafka.streams.query.Position
+
+
Return the partition -> offset mapping for a specific topic.
+
+
getPartitionResults() - Method in class org.apache.kafka.streams.query.StateQueryResult
+
+
The query's result for each partition that executed the query.
+
+
getPartitions() - Method in class org.apache.kafka.streams.query.StateQueryRequest
+
+
If the request is for specific partitions, return the set of partitions to query.
+
+
getPassword(String) - Method in class org.apache.kafka.common.config.AbstractConfig
+
 
+
getPosition() - Method in interface org.apache.kafka.streams.processor.StateStore
+
+
Returns the position the state store is at with respect to the input topic/partitions
+
+
getPosition() - Method in interface org.apache.kafka.streams.query.QueryResult
+
+
This state partition's exact position in its history when this query was executed.
+
+
getPosition() - Method in class org.apache.kafka.streams.query.StateQueryResult
+
+
The position of the state store at the moment it executed the query.
+
+
getPositionBound() - Method in class org.apache.kafka.streams.query.StateQueryRequest
+
+
The bound that this request places on its query, in terms of the partitions' positions + against its inputs.
+
+
getPrincipalType() - Method in class org.apache.kafka.common.security.auth.KafkaPrincipal
+
 
+
getProducer(Map<String, Object>) - Method in interface org.apache.kafka.streams.KafkaClientSupplier
+
+
Create a Producer which is used to write records to sink topics.
+
+
getProducerConfigs(String) - Method in class org.apache.kafka.streams.StreamsConfig
+
+
Get the configs for the producer.
+
+
getProvider() - Method in interface org.apache.kafka.common.security.auth.SecurityProviderCreator
+
+
Generate the security provider configured
+
+
getPunctuator() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext.CapturedPunctuator
+
 
+
getPunctuator() - Method in class org.apache.kafka.streams.processor.MockProcessorContext.CapturedPunctuator
+
+
Deprecated.
+
getQuery() - Method in class org.apache.kafka.streams.query.StateQueryRequest
+
+
The query this request is meant to run.
+
+
getQueueSize() - Method in class org.apache.kafka.streams.TestOutputTopic
+
+
Get size of unread record in the topic queue.
+
+
getRecordTime() - Method in class org.apache.kafka.streams.test.TestRecord
+
 
+
getRestoreConsumer(Map<String, Object>) - Method in interface org.apache.kafka.streams.KafkaClientSupplier
+
+
Create a Consumer which is used to read records to restore StateStores.
+
+
getRestoreConsumerConfigs(String) - Method in class org.apache.kafka.streams.StreamsConfig
+
+
Get the configs for the restore-consumer.
+
+
getResult() - Method in interface org.apache.kafka.streams.query.QueryResult
+
+
Returns the result of executing the query on one partition.
+
+
getSensor(String) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Get the sensor with the given name if it exists
+
+
getSessionStore(String) - Method in class org.apache.kafka.streams.TopologyTestDriver
+
+
Get the SessionStore with the given name.
+
+
getShort(String) - Method in class org.apache.kafka.common.config.AbstractConfig
+
 
+
getStateStore(String) - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
 
+
getStateStore(String) - Method in interface org.apache.kafka.streams.processor.api.ProcessingContext
+
+
Get the state store given the store name.
+
+
getStateStore(String) - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
getStateStore(String) - Method in interface org.apache.kafka.streams.processor.ProcessorContext
+
+
Get the state store given the store name.
+
+
getStateStore(String) - Method in class org.apache.kafka.streams.TopologyTestDriver
+
+
Get the StateStore with the given name.
+
+
getStateStoreContext() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
+
Used to get a StateStoreContext for use with + StateStore.init(StateStoreContext, StateStore) + if you need to initialize a store for your tests.
+
+
getStoreName() - Method in class org.apache.kafka.streams.query.StateQueryRequest
+
+
The name of the store this request is for.
+
+
getString(String) - Method in class org.apache.kafka.common.config.AbstractConfig
+
 
+
getString(String) - Method in class org.apache.kafka.connect.data.Struct
+
+
Equivalent to calling Struct.get(String) and casting the result to a String.
+
+
getString(Map<String, Object>, String, String) - Static method in class org.apache.kafka.streams.StreamsConfig.InternalConfig
+
 
+
getStruct(String) - Method in class org.apache.kafka.connect.data.Struct
+
+
Equivalent to calling Struct.get(String) and casting the result to a Struct.
+
+
getTaskConfig() - Method in class org.apache.kafka.streams.TopologyConfig
+
 
+
getTimeFrom() - Method in class org.apache.kafka.streams.query.WindowKeyQuery
+
 
+
getTimeFrom() - Method in class org.apache.kafka.streams.query.WindowRangeQuery
+
 
+
getTimestampedKeyValueStore(String) - Method in class org.apache.kafka.streams.TopologyTestDriver
+
+
Get the TimestampedKeyValueStore with the given name.
+
+
getTimestampedWindowStore(String) - Method in class org.apache.kafka.streams.TopologyTestDriver
+
+
Get the TimestampedWindowStore with the given name.
+
+
getTimeTo() - Method in class org.apache.kafka.streams.query.WindowKeyQuery
+
 
+
getTimeTo() - Method in class org.apache.kafka.streams.query.WindowRangeQuery
+
 
+
getTopics() - Method in class org.apache.kafka.streams.query.Position
+
+
Return the topics that are represented in this Position.
+
+
getType() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext.CapturedPunctuator
+
 
+
getType() - Method in class org.apache.kafka.streams.processor.MockProcessorContext.CapturedPunctuator
+
+
Deprecated.
+
getUpperBound() - Method in class org.apache.kafka.streams.query.RangeQuery
+
+
The upper bound of the query, if specified
+
+
getValue() - Method in class org.apache.kafka.streams.test.TestRecord
+
 
+
getValueOrNull(ValueAndTimestamp<V>) - Static method in class org.apache.kafka.streams.state.ValueAndTimestamp
+
+
Return the wrapped value of the given valueAndTimestamp parameter + if the parameter is not null.
+
+
getVersionedKeyValueStore(String) - Method in class org.apache.kafka.streams.TopologyTestDriver
+
+
Get the VersionedKeyValueStore with the given name.
+
+
getWindowSize() - Method in class org.apache.kafka.streams.kstream.TimeWindowedDeserializer
+
 
+
getWindowStore(String) - Method in class org.apache.kafka.streams.TopologyTestDriver
+
+
Get the WindowStore or TimestampedWindowStore with the given name.
+
+
getWithoutDefault(String) - Method in class org.apache.kafka.connect.data.Struct
+
+
Get the underlying raw value for the field without accounting for default values.
+
+
GLOBAL_CONSUMER_PREFIX - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Prefix used to override consumer configs for the global consumer client from + the general consumer client configs.
+
+
globalConsumerPrefix(String) - Static method in class org.apache.kafka.streams.StreamsConfig
+
+
Prefix a property with StreamsConfig.GLOBAL_CONSUMER_PREFIX.
+
+
GlobalKTable<K,V> - Interface in org.apache.kafka.streams.kstream
+
+
GlobalKTable is an abstraction of a changelog stream from a primary-keyed table.
+
+
globalStores() - Method in interface org.apache.kafka.streams.TopologyDescription
+
+
All global stores of the represented topology.
+
+
globalTable(String) - Method in class org.apache.kafka.streams.StreamsBuilder
+
+
Create a GlobalKTable for the specified topic.
+
+
globalTable(String, Consumed<K, V>) - Method in class org.apache.kafka.streams.StreamsBuilder
+
+
Create a GlobalKTable for the specified topic.
+
+
globalTable(String, Consumed<K, V>, Materialized<K, V, KeyValueStore<Bytes, byte[]>>) - Method in class org.apache.kafka.streams.StreamsBuilder
+
+
Create a GlobalKTable for the specified topic.
+
+
globalTable(String, Materialized<K, V, KeyValueStore<Bytes, byte[]>>) - Method in class org.apache.kafka.streams.StreamsBuilder
+
+
Create a GlobalKTable for the specified topic.
+
+
grace(Duration) - Method in class org.apache.kafka.streams.kstream.JoinWindows
+
+
Deprecated. + +
+
+
gracePeriod() - Method in class org.apache.kafka.streams.kstream.Joined
+
+
Deprecated. +
Since 4.0 and should not be used any longer.
+
+
+
gracePeriodMs() - Method in class org.apache.kafka.streams.kstream.JoinWindows
+
 
+
gracePeriodMs() - Method in class org.apache.kafka.streams.kstream.SessionWindows
+
 
+
gracePeriodMs() - Method in class org.apache.kafka.streams.kstream.SlidingWindows
+
 
+
gracePeriodMs() - Method in class org.apache.kafka.streams.kstream.TimeWindows
+
 
+
gracePeriodMs() - Method in class org.apache.kafka.streams.kstream.UnlimitedWindows
+
 
+
gracePeriodMs() - Method in class org.apache.kafka.streams.kstream.Windows
+
+
Return the window grace period (the time to admit + out-of-order events after the end of the window.) + + Delay is defined as (stream_time - record_timestamp).
+
+
group - Variable in class org.apache.kafka.common.config.ConfigDef.ConfigKey
+
 
+
group() - Method in class org.apache.kafka.common.MetricName
+
 
+
group() - Method in class org.apache.kafka.common.MetricNameTemplate
+
+
Get the name of the group.
+
+
GROUP - Enum constant in enum class org.apache.kafka.common.config.ConfigResource.Type
+
 
+
GROUP - Enum constant in enum class org.apache.kafka.common.resource.ResourceType
+
+
A consumer group.
+
+
GROUP_ID_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
group.id
+
+
GROUP_INSTANCE_ID_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
group.instance.id
+
+
GROUP_PROTOCOL_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
group.protocol
+
+
GROUP_PROTOCOL_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
group.protocol
+
+
GROUP_PROTOCOL_DOC - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
GROUP_REMOTE_ASSIGNOR_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
group.remote.assignor
+
+
GROUP_REMOTE_ASSIGNOR_DOC - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
groupAssignment() - Method in class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupAssignment
+
 
+
GroupAssignment - Class in org.apache.kafka.coordinator.group.api.assignor
+
+
The partition assignment for a consumer group.
+
+
GroupAssignment(Map<String, ConsumerPartitionAssignor.Assignment>) - Constructor for class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupAssignment
+
 
+
GroupAssignment(Map<String, MemberAssignment>) - Constructor for class org.apache.kafka.coordinator.group.api.assignor.GroupAssignment
+
 
+
GroupAuthorizationException - Exception in org.apache.kafka.common.errors
+
 
+
GroupAuthorizationException(String) - Constructor for exception org.apache.kafka.common.errors.GroupAuthorizationException
+
 
+
GroupAuthorizationException(String, String) - Constructor for exception org.apache.kafka.common.errors.GroupAuthorizationException
+
 
+
groupBy(KeyValueMapper<? super K, ? super V, ? extends KeyValue<? extends KR, ? extends VR>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Re-groups the records of this KTable using the provided KeyValueMapper and default serializers + and deserializers.
+
+
groupBy(KeyValueMapper<? super K, ? super V, ? extends KeyValue<? extends KR, ? extends VR>>, Grouped<KR, VR>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Re-groups the records of this KTable using the provided KeyValueMapper + and Serdes as specified by Grouped.
+
+
groupBy(KeyValueMapper<? super K, ? super V, KOut>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Group the records of this KStream on a new key (in contrast to KStream.groupByKey()).
+
+
groupBy(KeyValueMapper<? super K, ? super V, KOut>, Grouped<KOut, V>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
groupByKey() - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Group the records by their current key into a KGroupedStream while preserving the original values.
+
+
groupByKey(Grouped<K, V>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
Grouped<K,V> - Class in org.apache.kafka.streams.kstream
+
+
The class that is used to capture the key and value Serdes and set the part of name used for + repartition topics when performing KStream.groupBy(KeyValueMapper, Grouped), KStream.groupByKey(Grouped), or KTable.groupBy(KeyValueMapper, Grouped) operations.
+
+
groupEpoch() - Method in class org.apache.kafka.clients.admin.ConsumerGroupDescription
+
+
The epoch of the consumer group.
+
+
groupEpoch() - Method in class org.apache.kafka.clients.admin.ShareGroupDescription
+
+
The epoch of the share group.
+
+
groupEpoch() - Method in class org.apache.kafka.clients.admin.StreamsGroupDescription
+
+
The epoch of the consumer group.
+
+
groupId() - Method in class org.apache.kafka.clients.admin.ClassicGroupDescription
+
+
The id of the classic group.
+
+
groupId() - Method in class org.apache.kafka.clients.admin.ConsumerGroupDescription
+
+
The id of the consumer group.
+
+
groupId() - Method in class org.apache.kafka.clients.admin.ConsumerGroupListing
+
+
Deprecated.
+
Consumer Group Id
+
+
groupId() - Method in class org.apache.kafka.clients.admin.GroupListing
+
+
The group Id.
+
+
groupId() - Method in class org.apache.kafka.clients.admin.ShareGroupDescription
+
+
The id of the share group.
+
+
groupId() - Method in class org.apache.kafka.clients.admin.StreamsGroupDescription
+
+
The id of the streams group.
+
+
groupId() - Method in class org.apache.kafka.clients.consumer.ConsumerGroupMetadata
+
 
+
groupId() - Method in exception org.apache.kafka.common.errors.GroupAuthorizationException
+
+
Return the group ID that failed authorization.
+
+
GroupIdNotFoundException - Exception in org.apache.kafka.common.errors
+
 
+
GroupIdNotFoundException(String) - Constructor for exception org.apache.kafka.common.errors.GroupIdNotFoundException
+
 
+
groupInstanceId() - Method in class org.apache.kafka.clients.admin.MemberDescription
+
+
The instance id of the group member.
+
+
groupInstanceId() - Method in class org.apache.kafka.clients.admin.MemberToRemove
+
 
+
groupInstanceId() - Method in class org.apache.kafka.clients.consumer.ConsumerGroupMetadata
+
 
+
groupInstanceId() - Method in class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription
+
 
+
GroupListing - Class in org.apache.kafka.clients.admin
+
+
A listing of a group in the cluster.
+
+
GroupListing(String, Optional<GroupType>, String, Optional<GroupState>) - Constructor for class org.apache.kafka.clients.admin.GroupListing
+
+
Create an instance with the specified parameters.
+
+
GroupMaxSizeReachedException - Exception in org.apache.kafka.common.errors
+
+
Indicates that a group is already at its configured maximum capacity and cannot accommodate more members
+
+
GroupMaxSizeReachedException(String) - Constructor for exception org.apache.kafka.common.errors.GroupMaxSizeReachedException
+
 
+
groupMembershipOperation() - Method in class org.apache.kafka.clients.consumer.CloseOptions
+
 
+
groupMembershipOperation(CloseOptions.GroupMembershipOperation) - Static method in class org.apache.kafka.clients.consumer.CloseOptions
+
+
Static method to create a CloseOptions with a specified group membership operation.
+
+
groupMetadata() - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
groupMetadata() - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Return the current group metadata associated with this consumer.
+
+
groupMetadata() - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
GroupNotEmptyException - Exception in org.apache.kafka.common.errors
+
 
+
GroupNotEmptyException(String) - Constructor for exception org.apache.kafka.common.errors.GroupNotEmptyException
+
 
+
groupPartitions(List<T>, int) - Static method in class org.apache.kafka.connect.util.ConnectorUtils
+
+
Given a list of elements and a target number of groups, generates list of groups of + elements to match the target number of groups, spreading them evenly among the groups.
+
+
GroupProtocol - Enum Class in org.apache.kafka.clients.consumer
+
 
+
GroupProtocol - Enum Class in org.apache.kafka.streams
+
 
+
groups() - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Get the groups for the configuration
+
+
GroupSpec - Interface in org.apache.kafka.coordinator.group.api.assignor
+
+
The group metadata specifications required to compute the target assignment.
+
+
groupState() - Method in class org.apache.kafka.clients.admin.ConsumerGroupDescription
+
+
The group state, or UNKNOWN if the state is too new for us to parse.
+
+
groupState() - Method in class org.apache.kafka.clients.admin.ConsumerGroupListing
+
+
Deprecated.
+
Group state
+
+
groupState() - Method in class org.apache.kafka.clients.admin.GroupListing
+
+
The group state.
+
+
groupState() - Method in class org.apache.kafka.clients.admin.ShareGroupDescription
+
+
The group state, or UNKNOWN if the state is too new for us to parse.
+
+
groupState() - Method in class org.apache.kafka.clients.admin.StreamsGroupDescription
+
+
The state of the streams group, or UNKNOWN if the state is too new for us to parse.
+
+
GroupState - Enum Class in org.apache.kafka.common
+
+
The group state.
+
+
groupStates() - Method in class org.apache.kafka.clients.admin.ListConsumerGroupsOptions
+
+
Deprecated.
+
Returns the list of group states that are requested or empty if no states have been specified.
+
+
groupStates() - Method in class org.apache.kafka.clients.admin.ListGroupsOptions
+
+
Returns the list of group states that are requested or empty if no states have been specified.
+
+
groupStatesForType(GroupType) - Static method in enum class org.apache.kafka.common.GroupState
+
 
+
GroupSubscribedToTopicException - Exception in org.apache.kafka.common.errors
+
 
+
GroupSubscribedToTopicException(String) - Constructor for exception org.apache.kafka.common.errors.GroupSubscribedToTopicException
+
 
+
groupSubscription() - Method in class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription
+
 
+
GroupSubscription(Map<String, ConsumerPartitionAssignor.Subscription>) - Constructor for class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription
+
 
+
GroupType - Enum Class in org.apache.kafka.common
+
 
+
GSSAPI_MECHANISM - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
+

H

+
+
handle(Throwable) - Method in interface org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler
+
+
Inspect the exception received in a stream thread and respond with an action.
+
+
handle(Callback[]) - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler
+
 
+
handle(Callback[]) - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallbackHandler
+
 
+
handle(ProducerRecord<byte[], byte[]>, Exception) - Method in class org.apache.kafka.streams.errors.DefaultProductionExceptionHandler
+
+ +
+
handle(ProducerRecord<byte[], byte[]>, Exception) - Method in interface org.apache.kafka.streams.errors.ProductionExceptionHandler
+
+ +
+
handle(ErrorHandlerContext, ConsumerRecord<byte[], byte[]>, Exception) - Method in interface org.apache.kafka.streams.errors.DeserializationExceptionHandler
+
+
Inspect a record and the exception received.
+
+
handle(ErrorHandlerContext, ConsumerRecord<byte[], byte[]>, Exception) - Method in class org.apache.kafka.streams.errors.LogAndContinueExceptionHandler
+
 
+
handle(ErrorHandlerContext, ConsumerRecord<byte[], byte[]>, Exception) - Method in class org.apache.kafka.streams.errors.LogAndFailExceptionHandler
+
 
+
handle(ErrorHandlerContext, ProducerRecord<byte[], byte[]>, Exception) - Method in class org.apache.kafka.streams.errors.DefaultProductionExceptionHandler
+
 
+
handle(ErrorHandlerContext, ProducerRecord<byte[], byte[]>, Exception) - Method in interface org.apache.kafka.streams.errors.ProductionExceptionHandler
+
+
Inspect a record that we attempted to produce, and the exception that resulted + from attempting to produce it and determine to continue or stop processing.
+
+
handle(ErrorHandlerContext, Record<?, ?>, Exception) - Method in class org.apache.kafka.streams.errors.LogAndContinueProcessingExceptionHandler
+
 
+
handle(ErrorHandlerContext, Record<?, ?>, Exception) - Method in class org.apache.kafka.streams.errors.LogAndFailProcessingExceptionHandler
+
 
+
handle(ErrorHandlerContext, Record<?, ?>, Exception) - Method in interface org.apache.kafka.streams.errors.ProcessingExceptionHandler
+
+
Inspect a record and the exception received
+
+
handle(ProcessorContext, ConsumerRecord<byte[], byte[]>, Exception) - Method in interface org.apache.kafka.streams.errors.DeserializationExceptionHandler
+
+ +
+
handle(ProcessorContext, ConsumerRecord<byte[], byte[]>, Exception) - Method in class org.apache.kafka.streams.errors.LogAndContinueExceptionHandler
+
+ +
+
handle(ProcessorContext, ConsumerRecord<byte[], byte[]>, Exception) - Method in class org.apache.kafka.streams.errors.LogAndFailExceptionHandler
+
+ +
+
handleSerializationException(ProducerRecord, Exception) - Method in interface org.apache.kafka.streams.errors.ProductionExceptionHandler
+
+ +
+
handleSerializationException(ErrorHandlerContext, ProducerRecord, Exception, ProductionExceptionHandler.SerializationExceptionOrigin) - Method in interface org.apache.kafka.streams.errors.ProductionExceptionHandler
+
+
Handles serialization exception and determine if the process should continue.
+
+
hasCompletedShutdown() - Method in enum class org.apache.kafka.streams.KafkaStreams.State
+
 
+
hasDefault() - Method in class org.apache.kafka.common.config.ConfigDef.ConfigKey
+
 
+
hasExpired() - Method in class org.apache.kafka.common.metrics.Sensor
+
+
Return true if the Sensor is eligible for removal due to inactivity.
+
+
hashCode() - Method in class org.apache.kafka.clients.admin.AbortTransactionSpec
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.AlterConfigOp
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.ClassicGroupDescription
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.ClientMetricsResourceListing
+
+
Deprecated.
+
hashCode() - Method in class org.apache.kafka.clients.admin.Config
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.ConfigEntry.ConfigSynonym
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.ConfigEntry
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.ConsumerGroupDescription
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.ConsumerGroupListing
+
+
Deprecated.
+
hashCode() - Method in class org.apache.kafka.clients.admin.DescribeProducersOptions
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.FeatureMetadata
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.FeatureUpdate
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.FinalizedVersionRange
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.GroupListing
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.ListConsumerGroupOffsetsSpec
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.ListShareGroupOffsetsSpec
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.ListTopicsOptions
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.ListTransactionsOptions
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.MemberAssignment
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.MemberDescription
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.MemberToRemove
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.NewTopic
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.ProducerState
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.QuorumInfo
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.QuorumInfo.Node
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.QuorumInfo.ReplicaState
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.RaftVoterEndpoint
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.RecordsToDelete
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.ScramCredentialInfo
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.ShareGroupDescription
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.ShareMemberAssignment
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.ShareMemberDescription
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.StreamsGroupDescription
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberAssignment
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberAssignment.TaskIds
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription.Endpoint
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription.TaskOffset
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription.TopicInfo
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.SupportedVersionRange
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.TopicDescription
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.TransactionDescription
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.TransactionListing
+
 
+
hashCode() - Method in class org.apache.kafka.clients.admin.UserScramCredentialsDescription
+
 
+
hashCode() - Method in class org.apache.kafka.clients.consumer.ConsumerGroupMetadata
+
 
+
hashCode() - Method in class org.apache.kafka.clients.consumer.OffsetAndMetadata
+
 
+
hashCode() - Method in class org.apache.kafka.clients.consumer.OffsetAndTimestamp
+
 
+
hashCode() - Method in class org.apache.kafka.clients.consumer.SubscriptionPattern
+
 
+
hashCode() - Method in class org.apache.kafka.clients.producer.PreparedTxnState
+
 
+
hashCode() - Method in class org.apache.kafka.clients.producer.ProducerRecord
+
 
+
hashCode() - Method in class org.apache.kafka.common.acl.AccessControlEntry
+
 
+
hashCode() - Method in class org.apache.kafka.common.acl.AccessControlEntryFilter
+
 
+
hashCode() - Method in class org.apache.kafka.common.acl.AclBinding
+
 
+
hashCode() - Method in class org.apache.kafka.common.acl.AclBindingFilter
+
 
+
hashCode() - Method in class org.apache.kafka.common.Cluster
+
 
+
hashCode() - Method in class org.apache.kafka.common.ClusterResource
+
 
+
hashCode() - Method in class org.apache.kafka.common.config.AbstractConfig
+
 
+
hashCode() - Method in class org.apache.kafka.common.config.ConfigResource
+
 
+
hashCode() - Method in class org.apache.kafka.common.config.ConfigValue
+
 
+
hashCode() - Method in class org.apache.kafka.common.Endpoint
+
 
+
hashCode() - Method in class org.apache.kafka.common.MetricName
+
 
+
hashCode() - Method in class org.apache.kafka.common.MetricNameTemplate
+
 
+
hashCode() - Method in class org.apache.kafka.common.metrics.Quota
+
 
+
hashCode() - Method in class org.apache.kafka.common.Node
+
 
+
hashCode() - Method in class org.apache.kafka.common.PartitionInfo
+
 
+
hashCode() - Method in class org.apache.kafka.common.quota.ClientQuotaAlteration.Op
+
 
+
hashCode() - Method in class org.apache.kafka.common.quota.ClientQuotaEntity
+
 
+
hashCode() - Method in class org.apache.kafka.common.quota.ClientQuotaFilter
+
 
+
hashCode() - Method in class org.apache.kafka.common.quota.ClientQuotaFilterComponent
+
 
+
hashCode() - Method in class org.apache.kafka.common.resource.Resource
+
 
+
hashCode() - Method in class org.apache.kafka.common.resource.ResourcePattern
+
 
+
hashCode() - Method in class org.apache.kafka.common.resource.ResourcePatternFilter
+
 
+
hashCode() - Method in class org.apache.kafka.common.security.auth.KafkaPrincipal
+
 
+
hashCode() - Method in class org.apache.kafka.common.security.auth.SaslExtensions
+
+
Implements hashCode using the native implementation from + Object.hashCode().
+
+
hashCode() - Method in class org.apache.kafka.common.security.token.delegation.DelegationToken
+
 
+
hashCode() - Method in class org.apache.kafka.common.security.token.delegation.TokenInformation
+
 
+
hashCode() - Method in class org.apache.kafka.common.TopicIdPartition
+
 
+
hashCode() - Method in class org.apache.kafka.common.TopicPartition
+
 
+
hashCode() - Method in class org.apache.kafka.common.TopicPartitionInfo
+
 
+
hashCode() - Method in class org.apache.kafka.common.TopicPartitionReplica
+
 
+
hashCode() - Method in class org.apache.kafka.common.Uuid
+
+
Returns a hash code for this UUID
+
+
hashCode() - Method in class org.apache.kafka.connect.connector.ConnectRecord
+
 
+
hashCode() - Method in class org.apache.kafka.connect.data.ConnectSchema
+
 
+
hashCode() - Method in class org.apache.kafka.connect.data.Field
+
 
+
hashCode() - Method in class org.apache.kafka.connect.data.SchemaAndValue
+
 
+
hashCode() - Method in class org.apache.kafka.connect.data.Struct
+
 
+
hashCode() - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
hashCode() - Method in class org.apache.kafka.connect.health.AbstractState
+
 
+
hashCode() - Method in class org.apache.kafka.connect.health.ConnectorHealth
+
 
+
hashCode() - Method in class org.apache.kafka.connect.health.TaskState
+
 
+
hashCode() - Method in class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
hashCode() - Method in class org.apache.kafka.connect.mirror.SourceAndTarget
+
 
+
hashCode() - Method in class org.apache.kafka.connect.sink.SinkRecord
+
 
+
hashCode() - Method in class org.apache.kafka.connect.source.SourceRecord
+
 
+
hashCode() - Method in class org.apache.kafka.coordinator.group.api.assignor.GroupAssignment
+
 
+
hashCode() - Method in class org.apache.kafka.server.authorizer.Action
+
 
+
hashCode() - Method in class org.apache.kafka.server.log.remote.storage.LogSegmentData
+
 
+
hashCode() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId
+
 
+
hashCode() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata.CustomMetadata
+
 
+
hashCode() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata
+
 
+
hashCode() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate
+
 
+
hashCode() - Method in class org.apache.kafka.server.log.remote.storage.RemotePartitionDeleteMetadata
+
 
+
hashCode() - Method in class org.apache.kafka.server.policy.AlterConfigPolicy.RequestMetadata
+
 
+
hashCode() - Method in class org.apache.kafka.server.policy.CreateTopicPolicy.RequestMetadata
+
 
+
hashCode() - Method in class org.apache.kafka.streams.AutoOffsetReset
+
 
+
hashCode() - Method in class org.apache.kafka.streams.KeyQueryMetadata
+
 
+
hashCode() - Method in class org.apache.kafka.streams.KeyValue
+
 
+
hashCode() - Method in class org.apache.kafka.streams.kstream.Consumed
+
 
+
hashCode() - Method in class org.apache.kafka.streams.kstream.JoinWindows
+
 
+
hashCode() - Method in class org.apache.kafka.streams.kstream.Produced
+
 
+
hashCode() - Method in class org.apache.kafka.streams.kstream.SessionWindows
+
 
+
hashCode() - Method in class org.apache.kafka.streams.kstream.SlidingWindows
+
 
+
hashCode() - Method in class org.apache.kafka.streams.kstream.TimeWindows
+
 
+
hashCode() - Method in class org.apache.kafka.streams.kstream.UnlimitedWindows
+
 
+
hashCode() - Method in class org.apache.kafka.streams.kstream.Window
+
 
+
hashCode() - Method in class org.apache.kafka.streams.kstream.Windowed
+
 
+
hashCode() - Method in class org.apache.kafka.streams.LagInfo
+
 
+
hashCode() - Method in class org.apache.kafka.streams.processor.api.FixedKeyRecord
+
 
+
hashCode() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext.CapturedForward
+
 
+
hashCode() - Method in class org.apache.kafka.streams.processor.api.Record
+
 
+
hashCode() - Method in class org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment.AssignedTask
+
 
+
hashCode() - Method in class org.apache.kafka.streams.processor.assignment.ProcessId
+
 
+
hashCode() - Method in class org.apache.kafka.streams.processor.TaskId
+
 
+
hashCode() - Method in class org.apache.kafka.streams.processor.To
+
+
Equality is implemented in support of tests, *not* for use in Hash collections, since this class is mutable.
+
+
hashCode() - Method in class org.apache.kafka.streams.query.Position
+
 
+
hashCode() - Method in class org.apache.kafka.streams.query.PositionBound
+
 
+
hashCode() - Method in class org.apache.kafka.streams.state.DslKeyValueParams
+
 
+
hashCode() - Method in class org.apache.kafka.streams.state.DslSessionParams
+
 
+
hashCode() - Method in class org.apache.kafka.streams.state.DslWindowParams
+
 
+
hashCode() - Method in class org.apache.kafka.streams.state.HostInfo
+
 
+
hashCode() - Method in class org.apache.kafka.streams.state.ValueAndTimestamp
+
 
+
hashCode() - Method in class org.apache.kafka.streams.state.VersionedRecord
+
 
+
hashCode() - Method in class org.apache.kafka.streams.StoreQueryParameters
+
 
+
hashCode() - Method in interface org.apache.kafka.streams.StreamsMetadata
+
+
Returns the hash code value for this TaskMetadata.
+
+
hashCode() - Method in interface org.apache.kafka.streams.TaskMetadata
+
+
Returns the hash code value for this TaskMetadata.
+
+
hashCode() - Method in class org.apache.kafka.streams.test.TestRecord
+
 
+
hashCode() - Method in interface org.apache.kafka.streams.ThreadMetadata
+
+
Returns the hash code value for this ThreadMetadata.
+
+
hasMetrics() - Method in class org.apache.kafka.common.metrics.Sensor
+
+
Return if metrics were registered with this sensor.
+
+
hasNotStarted() - Method in enum class org.apache.kafka.streams.KafkaStreams.State
+
 
+
hasOffset() - Method in class org.apache.kafka.clients.producer.RecordMetadata
+
+
Indicates whether the record metadata includes the offset.
+
+
hasRack() - Method in class org.apache.kafka.common.Node
+
+
True if this node has a defined rack
+
+
hasStartedOrFinishedShuttingDown() - Method in enum class org.apache.kafka.streams.KafkaStreams.State
+
 
+
hasTimestamp() - Method in class org.apache.kafka.clients.producer.RecordMetadata
+
+
Indicates whether the record metadata includes the timestamp.
+
+
hasTransaction() - Method in class org.apache.kafka.clients.producer.PreparedTxnState
+
+
Checks if this preparedTxnState represents an initialized transaction with a valid producer ID + that is not -1 (the uninitialized value).
+
+
Header - Interface in org.apache.kafka.common.header
+
 
+
Header - Interface in org.apache.kafka.connect.header
+
+
A Header is a key-value pair, and multiple headers can be included with the key, value, and timestamp in each Kafka message.
+
+
HEADER - Enum constant in enum class org.apache.kafka.connect.storage.ConverterType
+
 
+
HEADER_SCHEMA - Static variable in class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
HEADER_SCHEMA - Static variable in class org.apache.kafka.connect.mirror.Heartbeat
+
 
+
HeaderConverter - Interface in org.apache.kafka.connect.storage
+
+
The HeaderConverter interface provides support for translating between Kafka Connect's runtime data format + and byte[].
+
+
headers() - Method in class org.apache.kafka.clients.consumer.ConsumerRecord
+
+
The headers (never null)
+
+
headers() - Method in class org.apache.kafka.clients.producer.ProducerRecord
+
 
+
headers() - Method in exception org.apache.kafka.common.errors.RecordDeserializationException
+
 
+
headers() - Method in class org.apache.kafka.connect.connector.ConnectRecord
+
+
Get the headers for this record.
+
+
headers() - Method in interface org.apache.kafka.streams.errors.ErrorHandlerContext
+
+
Return the headers of the current source record; could be an empty header if it is not + available.
+
+
headers() - Method in class org.apache.kafka.streams.processor.api.FixedKeyRecord
+
+
The headers of the record.
+
+
headers() - Method in class org.apache.kafka.streams.processor.api.Record
+
+
The headers of the record.
+
+
headers() - Method in class org.apache.kafka.streams.processor.MockProcessorContext.CapturedForward
+
+
Deprecated.
+
headers() - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
Returns the headers of the current input record; could be null if it is not + available.
+
+
headers() - Method in interface org.apache.kafka.streams.processor.ProcessorContext
+
+
Return the headers of the current input record; could be an empty header if it is not + available.
+
+
headers() - Method in interface org.apache.kafka.streams.processor.RecordContext
+
+
Return the headers of the current input record; could be an empty header if it is not + available.
+
+
headers() - Method in class org.apache.kafka.streams.test.TestRecord
+
 
+
headers(String) - Method in interface org.apache.kafka.common.header.Headers
+
+
Returns all headers for the given key, in the order they were added in, if present.
+
+
Headers - Interface in org.apache.kafka.common.header
+
 
+
Headers - Interface in org.apache.kafka.connect.header
+
+
A mutable ordered collection of Header objects.
+
+
Headers.HeaderTransform - Interface in org.apache.kafka.connect.header
+
+
A function to transform the supplied Header.
+
+
Heartbeat - Class in org.apache.kafka.connect.mirror
+
+
Heartbeat records emitted by MirrorHeartbeatConnector.
+
+
Heartbeat(String, String, long) - Constructor for class org.apache.kafka.connect.mirror.Heartbeat
+
 
+
HEARTBEAT_INTERVAL_MS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
heartbeat.interval.ms
+
+
heartbeatsTopic() - Method in interface org.apache.kafka.connect.mirror.ReplicationPolicy
+
+
Returns the name of heartbeats topic.
+
+
heartbeatTopics() - Method in class org.apache.kafka.connect.mirror.MirrorClient
+
+
Finds all heartbeats topics on this cluster.
+
+
heartbeatTopics(Map<String, Object>) - Static method in class org.apache.kafka.connect.mirror.RemoteClusterUtils
+
+
Finds all heartbeats topics
+
+
HETEROGENEOUS - Enum constant in enum class org.apache.kafka.coordinator.group.api.assignor.SubscriptionType
+
+
A heterogeneous subscription type means that not all the members + of the group use the same subscription.
+
+
HIGH - Enum constant in enum class org.apache.kafka.common.config.ConfigDef.Importance
+
 
+
highestOffsetForEpoch(TopicIdPartition, int) - Method in interface org.apache.kafka.server.log.remote.storage.RemoteLogMetadataManager
+
+
Returns the highest log offset of topic partition for the given leader epoch in remote storage.
+
+
highWatermark() - Method in class org.apache.kafka.clients.admin.QuorumInfo
+
 
+
Histogram - Class in org.apache.kafka.common.metrics.stats
+
 
+
Histogram(Histogram.BinScheme) - Constructor for class org.apache.kafka.common.metrics.stats.Histogram
+
 
+
Histogram.BinScheme - Interface in org.apache.kafka.common.metrics.stats
+
+
An algorithm for determining the bin in which a value is to be placed as well as calculating the upper end + of each bin.
+
+
Histogram.ConstantBinScheme - Class in org.apache.kafka.common.metrics.stats
+
+
A scheme for calculating the bins where the width of each bin is a constant determined by the range of values + and the number of bins.
+
+
Histogram.LinearBinScheme - Class in org.apache.kafka.common.metrics.stats
+
+
A scheme for calculating the bins where the width of each bin is one more than the previous bin, and therefore + the bin widths are increasing at a linear rate.
+
+
history() - Method in class org.apache.kafka.clients.producer.MockProducer
+
+
Get the list of sent records since the last call to MockProducer.clear()
+
+
historyRetentionMs() - Method in interface org.apache.kafka.streams.state.VersionedBytesStoreSupplier
+
+
Returns the history retention (in milliseconds) that stores created from this supplier will have.
+
+
hmac() - Method in class org.apache.kafka.common.security.token.delegation.DelegationToken
+
 
+
hmacAsBase64String() - Method in class org.apache.kafka.common.security.token.delegation.DelegationToken
+
 
+
HOMOGENEOUS - Enum constant in enum class org.apache.kafka.coordinator.group.api.assignor.SubscriptionType
+
+
A homogeneous subscription type means that all the members + of the group use the same subscription.
+
+
host() - Method in class org.apache.kafka.clients.admin.MemberDescription
+
+
The host where the group member is running.
+
+
host() - Method in class org.apache.kafka.clients.admin.RaftVoterEndpoint
+
 
+
host() - Method in class org.apache.kafka.clients.admin.ShareMemberDescription
+
+
The host where the group member is running.
+
+
host() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription.Endpoint
+
 
+
host() - Method in class org.apache.kafka.common.acl.AccessControlEntry
+
+
Return the host or `*` for all hosts.
+
+
host() - Method in class org.apache.kafka.common.acl.AccessControlEntryFilter
+
+
Return the host or null.
+
+
host() - Method in class org.apache.kafka.common.Endpoint
+
+
Returns advertised host name of this endpoint.
+
+
host() - Method in class org.apache.kafka.common.Node
+
+
The host name for this node
+
+
host() - Method in class org.apache.kafka.streams.state.HostInfo
+
 
+
host() - Method in interface org.apache.kafka.streams.StreamsMetadata
+
+
Host where the Streams client runs.
+
+
hostInfo() - Method in interface org.apache.kafka.streams.processor.assignment.KafkaStreamsState
+
+
The HostInfo of this KafkaStreams client, if set via the + application.server config
+
+
hostInfo() - Method in interface org.apache.kafka.streams.StreamsMetadata
+
+
The value of StreamsConfig.APPLICATION_SERVER_CONFIG configured for the Streams + client.
+
+
HostInfo - Class in org.apache.kafka.streams.state
+
+
Represents a user defined endpoint in a KafkaStreams application.
+
+
HostInfo(String, int) - Constructor for class org.apache.kafka.streams.state.HostInfo
+
 
+
+

I

+
+
id - Variable in enum class org.apache.kafka.clients.consumer.AcknowledgeType
+
 
+
id - Variable in enum class org.apache.kafka.common.metrics.Sensor.RecordingLevel
+
+
the permanent and immutable id of an API--this can't change ever
+
+
id - Variable in enum class org.apache.kafka.common.security.auth.SecurityProtocol
+
+
The permanent and immutable id of a security protocol -- this can't change, and must match kafka.cluster.SecurityProtocol
+
+
id - Variable in enum class org.apache.kafka.streams.errors.DeserializationExceptionHandler.DeserializationHandlerResponse
+
+
The permanent and immutable id for the used option.
+
+
id - Variable in enum class org.apache.kafka.streams.errors.ProcessingExceptionHandler.ProcessingHandlerResponse
+
+
The permanent and immutable id for the used option.
+
+
id - Variable in enum class org.apache.kafka.streams.errors.ProductionExceptionHandler.ProductionExceptionHandlerResponse
+
+
The permanent and immutable id for the used option.
+
+
id - Variable in enum class org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse
+
+
The permanent and immutable id for the used option.
+
+
id() - Method in enum class org.apache.kafka.clients.admin.AlterConfigOp.OpType
+
 
+
id() - Method in enum class org.apache.kafka.clients.admin.EndpointType
+
 
+
id() - Method in enum class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.RebalanceProtocol
+
 
+
id() - Method in enum class org.apache.kafka.common.config.ConfigResource.Type
+
 
+
id() - Method in enum class org.apache.kafka.common.IsolationLevel
+
 
+
id() - Method in class org.apache.kafka.common.Node
+
+
The node id of this node
+
+
id() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId
+
 
+
id() - Method in enum class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState
+
 
+
id() - Method in enum class org.apache.kafka.server.log.remote.storage.RemotePartitionDeleteState
+
 
+
id() - Method in class org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment.AssignedTask
+
 
+
id() - Method in class org.apache.kafka.streams.processor.assignment.ProcessId
+
 
+
id() - Method in interface org.apache.kafka.streams.processor.assignment.TaskInfo
+
 
+
id() - Method in interface org.apache.kafka.streams.TopologyDescription.GlobalStore
+
 
+
id() - Method in interface org.apache.kafka.streams.TopologyDescription.Subtopology
+
+
Internally assigned unique ID.
+
+
ID_CONFIG - Static variable in class org.apache.kafka.connect.tools.SchemaSourceTask
+
 
+
ID_CONFIG - Static variable in class org.apache.kafka.connect.tools.VerifiableSinkTask
+
 
+
ID_CONFIG - Static variable in class org.apache.kafka.connect.tools.VerifiableSourceTask
+
 
+
IDEMPOTENT_WRITE - Enum constant in enum class org.apache.kafka.common.acl.AclOperation
+
+
IDEMPOTENT_WRITE operation.
+
+
identityAssignment(ApplicationState) - Static method in class org.apache.kafka.streams.processor.assignment.TaskAssignmentUtils
+
+
Return a "no-op" assignment that just copies the previous assignment of tasks to KafkaStreams clients
+
+
IdentityReplicationPolicy - Class in org.apache.kafka.connect.mirror
+
+
Alternative implementation of ReplicationPolicy that does not rename remote topics.
+
+
IdentityReplicationPolicy() - Constructor for class org.apache.kafka.connect.mirror.IdentityReplicationPolicy
+
 
+
idString() - Method in class org.apache.kafka.common.Node
+
+
String representation of the node id.
+
+
ignore(String) - Method in class org.apache.kafka.common.config.AbstractConfig
+
 
+
ignoredExtensions() - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerExtensionsValidatorCallback
+
 
+
IllegalGenerationException - Exception in org.apache.kafka.common.errors
+
 
+
IllegalGenerationException() - Constructor for exception org.apache.kafka.common.errors.IllegalGenerationException
+
 
+
IllegalGenerationException(String) - Constructor for exception org.apache.kafka.common.errors.IllegalGenerationException
+
 
+
IllegalGenerationException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.IllegalGenerationException
+
 
+
IllegalGenerationException(Throwable) - Constructor for exception org.apache.kafka.common.errors.IllegalGenerationException
+
 
+
IllegalSaslStateException - Exception in org.apache.kafka.common.errors
+
+
This exception indicates unexpected requests prior to SASL authentication.
+
+
IllegalSaslStateException(String) - Constructor for exception org.apache.kafka.common.errors.IllegalSaslStateException
+
 
+
IllegalSaslStateException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.IllegalSaslStateException
+
 
+
IllegalWorkerStateException - Exception in org.apache.kafka.connect.errors
+
+
Indicates that a method has been invoked illegally or at an invalid time by a connector or task.
+
+
IllegalWorkerStateException(String) - Constructor for exception org.apache.kafka.connect.errors.IllegalWorkerStateException
+
 
+
IllegalWorkerStateException(String, Throwable) - Constructor for exception org.apache.kafka.connect.errors.IllegalWorkerStateException
+
 
+
IllegalWorkerStateException(Throwable) - Constructor for exception org.apache.kafka.connect.errors.IllegalWorkerStateException
+
 
+
importance - Variable in class org.apache.kafka.common.config.ConfigDef.ConfigKey
+
 
+
in(String...) - Static method in class org.apache.kafka.common.config.ConfigDef.CaseInsensitiveValidString
+
 
+
in(String...) - Static method in class org.apache.kafka.common.config.ConfigDef.ValidList
+
 
+
in(String...) - Static method in class org.apache.kafka.common.config.ConfigDef.ValidString
+
 
+
IN_MEMORY - Enum constant in enum class org.apache.kafka.streams.kstream.Materialized.StoreType
+
 
+
IN_MEMORY - Static variable in class org.apache.kafka.streams.state.BuiltInDslStoreSuppliers
+
 
+
IN_MEMORY - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated.
+
+
inactivityGap() - Method in class org.apache.kafka.streams.kstream.SessionWindows
+
+
Return the specified gap for the session windows in milliseconds.
+
+
INCLUDE_CONFIG - Static variable in class org.apache.kafka.common.metrics.JmxReporter
+
 
+
includeAuthorizedOperations() - Method in class org.apache.kafka.clients.admin.DescribeClassicGroupsOptions
+
 
+
includeAuthorizedOperations() - Method in class org.apache.kafka.clients.admin.DescribeClusterOptions
+
+
Specify if authorized operations should be included in the response.
+
+
includeAuthorizedOperations() - Method in class org.apache.kafka.clients.admin.DescribeConsumerGroupsOptions
+
 
+
includeAuthorizedOperations() - Method in class org.apache.kafka.clients.admin.DescribeShareGroupsOptions
+
 
+
includeAuthorizedOperations() - Method in class org.apache.kafka.clients.admin.DescribeStreamsGroupsOptions
+
 
+
includeAuthorizedOperations() - Method in class org.apache.kafka.clients.admin.DescribeTopicsOptions
+
 
+
includeAuthorizedOperations(boolean) - Method in class org.apache.kafka.clients.admin.DescribeClassicGroupsOptions
+
 
+
includeAuthorizedOperations(boolean) - Method in class org.apache.kafka.clients.admin.DescribeClusterOptions
+
 
+
includeAuthorizedOperations(boolean) - Method in class org.apache.kafka.clients.admin.DescribeConsumerGroupsOptions
+
 
+
includeAuthorizedOperations(boolean) - Method in class org.apache.kafka.clients.admin.DescribeShareGroupsOptions
+
 
+
includeAuthorizedOperations(boolean) - Method in class org.apache.kafka.clients.admin.DescribeStreamsGroupsOptions
+
 
+
includeAuthorizedOperations(boolean) - Method in class org.apache.kafka.clients.admin.DescribeTopicsOptions
+
 
+
includeDocumentation() - Method in class org.apache.kafka.clients.admin.DescribeConfigsOptions
+
+
Return true if config documentation should be returned in the response.
+
+
includeDocumentation(boolean) - Method in class org.apache.kafka.clients.admin.DescribeConfigsOptions
+
+
Set to true if config documentation should be returned in the response.
+
+
includeFencedBrokers() - Method in class org.apache.kafka.clients.admin.DescribeClusterOptions
+
+
Specify if fenced brokers should be included in the response.
+
+
includeFencedBrokers(boolean) - Method in class org.apache.kafka.clients.admin.DescribeClusterOptions
+
 
+
includeSynonyms() - Method in class org.apache.kafka.clients.admin.DescribeConfigsOptions
+
+
Return true if synonym configs should be returned in the response.
+
+
includeSynonyms(boolean) - Method in class org.apache.kafka.clients.admin.DescribeConfigsOptions
+
+
Set to true if synonym configs should be returned in the response.
+
+
InconsistentClusterIdException - Exception in org.apache.kafka.common.errors
+
 
+
InconsistentClusterIdException(String) - Constructor for exception org.apache.kafka.common.errors.InconsistentClusterIdException
+
 
+
InconsistentClusterIdException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.InconsistentClusterIdException
+
 
+
InconsistentGroupProtocolException - Exception in org.apache.kafka.common.errors
+
 
+
InconsistentGroupProtocolException(String) - Constructor for exception org.apache.kafka.common.errors.InconsistentGroupProtocolException
+
 
+
InconsistentGroupProtocolException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.InconsistentGroupProtocolException
+
 
+
InconsistentTopicIdException - Exception in org.apache.kafka.common.errors
+
 
+
InconsistentTopicIdException(String) - Constructor for exception org.apache.kafka.common.errors.InconsistentTopicIdException
+
 
+
InconsistentVoterSetException - Exception in org.apache.kafka.common.errors
+
 
+
InconsistentVoterSetException(String) - Constructor for exception org.apache.kafka.common.errors.InconsistentVoterSetException
+
 
+
InconsistentVoterSetException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.InconsistentVoterSetException
+
 
+
increaseTo(int) - Static method in class org.apache.kafka.clients.admin.NewPartitions
+
+
Increase the partition count for a topic to the given totalCount.
+
+
increaseTo(int, List<List<Integer>>) - Static method in class org.apache.kafka.clients.admin.NewPartitions
+
+
Increase the partition count for a topic to the given totalCount + assigning the new partitions according to the given newAssignments.
+
+
incrementalAlterConfigs(Map<ConfigResource, Collection<AlterConfigOp>>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Incrementally updates the configuration for the specified resources with default options.
+
+
incrementalAlterConfigs(Map<ConfigResource, Collection<AlterConfigOp>>, AlterConfigsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Incrementally update the configuration for the specified resources.
+
+
incrementalAlterConfigs(Map<ConfigResource, Collection<AlterConfigOp>>, AlterConfigsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
incrementalAlterConfigs(Map<ConfigResource, Collection<AlterConfigOp>>, AlterConfigsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
index() - Method in class org.apache.kafka.connect.data.Field
+
+
Get the index of this field within the struct.
+
+
INDEX_INTERVAL_BYTES_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
INDEX_INTERVAL_BYTES_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
IneligibleReplicaException - Exception in org.apache.kafka.common.errors
+
 
+
IneligibleReplicaException(String) - Constructor for exception org.apache.kafka.common.errors.IneligibleReplicaException
+
 
+
inferSchema(Object) - Static method in class org.apache.kafka.connect.data.Values
+
+
If possible infer a schema for the given value.
+
+
INFO - Enum constant in enum class org.apache.kafka.common.metrics.Sensor.RecordingLevel
+
 
+
INFO_LOG_LEVEL - Static variable in class org.apache.kafka.common.config.LogLevelConfig
+
+
The INFO level designates informational messages + that highlight normal Kafka events at a coarse-grained level
+
+
inGroupStates(Set<GroupState>) - Method in class org.apache.kafka.clients.admin.ListConsumerGroupsOptions
+
+
Deprecated.
+
If groupStates is set, only groups in these states will be returned by listGroups().
+
+
inGroupStates(Set<GroupState>) - Method in class org.apache.kafka.clients.admin.ListGroupsOptions
+
+
If groupStates is set, only groups in these states will be returned by listGroups().
+
+
init(List<KafkaMetric>) - Method in class org.apache.kafka.common.metrics.JmxReporter
+
 
+
init(List<KafkaMetric>) - Method in interface org.apache.kafka.common.metrics.MetricsReporter
+
+
This is called when the reporter is first registered to initially register all existing metrics
+
+
init(FixedKeyProcessorContext<KIn, VOut>) - Method in class org.apache.kafka.streams.processor.api.ContextualFixedKeyProcessor
+
 
+
init(FixedKeyProcessorContext<KIn, VOut>) - Method in interface org.apache.kafka.streams.processor.api.FixedKeyProcessor
+
+
Initialize this processor with the given context.
+
+
init(ProcessorContext<KOut, VOut>) - Method in class org.apache.kafka.streams.processor.api.ContextualProcessor
+
 
+
init(ProcessorContext<KOut, VOut>) - Method in interface org.apache.kafka.streams.processor.api.Processor
+
+
Initialize this processor with the given context.
+
+
init(ProcessorContext) - Method in interface org.apache.kafka.streams.kstream.Transformer
+
+
Deprecated.
+
Initialize this transformer.
+
+
init(ProcessorContext) - Method in interface org.apache.kafka.streams.kstream.ValueTransformer
+
+
Deprecated.
+
Initialize this transformer.
+
+
init(ProcessorContext) - Method in interface org.apache.kafka.streams.kstream.ValueTransformerWithKey
+
+
Initialize this transformer.
+
+
init(StateStoreContext, StateStore) - Method in interface org.apache.kafka.streams.processor.StateStore
+
+
Initializes this state store.
+
+
initialize(Subject, CallbackHandler, Map<String, ?>, Map<String, ?>) - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule
+
 
+
initialize(Subject, CallbackHandler, Map<String, ?>, Map<String, ?>) - Method in class org.apache.kafka.common.security.plain.PlainLoginModule
+
 
+
initialize(Subject, CallbackHandler, Map<String, ?>, Map<String, ?>) - Method in class org.apache.kafka.common.security.scram.ScramLoginModule
+
 
+
initialize(ConnectorContext) - Method in class org.apache.kafka.connect.connector.Connector
+
+
Initialize this connector, using the provided ConnectorContext to notify the runtime of + input configuration changes.
+
+
initialize(ConnectorContext) - Method in class org.apache.kafka.connect.tools.MockSinkConnector
+
 
+
initialize(ConnectorContext) - Method in class org.apache.kafka.connect.tools.MockSourceConnector
+
 
+
initialize(ConnectorContext, List<Map<String, String>>) - Method in class org.apache.kafka.connect.connector.Connector
+
+
+ Initialize this connector, using the provided ConnectorContext to notify the runtime of + input configuration changes and using the provided set of Task configurations.
+
+
initialize(ConnectorContext, List<Map<String, String>>) - Method in class org.apache.kafka.connect.tools.MockSinkConnector
+
 
+
initialize(ConnectorContext, List<Map<String, String>>) - Method in class org.apache.kafka.connect.tools.MockSourceConnector
+
 
+
initialize(SinkTaskContext) - Method in class org.apache.kafka.connect.sink.SinkTask
+
+
Initialize the context of this task.
+
+
initialize(SourceTaskContext) - Method in class org.apache.kafka.connect.source.SourceTask
+
+
Initialize this SourceTask with the specified context object.
+
+
Initializer<VAgg> - Interface in org.apache.kafka.streams.kstream
+
+
The Initializer interface for creating an initial value in aggregations.
+
+
initTransactionException - Variable in class org.apache.kafka.clients.producer.MockProducer
+
 
+
initTransactions() - Method in class org.apache.kafka.clients.producer.KafkaProducer
+
+
Needs to be called before any other methods when the transactional.id is set in the configuration.
+
+
initTransactions() - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
initTransactions() - Method in interface org.apache.kafka.clients.producer.Producer
+
+ +
+
injectTimeoutException(int) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
injectTimeoutException(int) - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
InMemoryDslStoreSuppliers() - Constructor for class org.apache.kafka.streams.state.BuiltInDslStoreSuppliers.InMemoryDslStoreSuppliers
+
 
+
inMemoryKeyValueStore(String) - Static method in class org.apache.kafka.streams.state.Stores
+
+
Create an in-memory KeyValueBytesStoreSupplier.
+
+
inMemorySessionStore(String, Duration) - Static method in class org.apache.kafka.streams.state.Stores
+
+
Create an in-memory SessionBytesStoreSupplier.
+
+
inMemoryWindowStore(String, Duration, Duration, boolean) - Static method in class org.apache.kafka.streams.state.Stores
+
+
Create an in-memory WindowBytesStoreSupplier.
+
+
innerDeserializer() - Method in class org.apache.kafka.common.serialization.ListDeserializer
+
 
+
inputExtensions() - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerExtensionsValidatorCallback
+
 
+
INSTANCE - Static variable in exception org.apache.kafka.common.errors.CoordinatorNotAvailableException
+
 
+
INSTANCE - Static variable in exception org.apache.kafka.common.errors.DisconnectException
+
 
+
instanceId() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription
+
+
The id of the instance, used for static membership, if available.
+
+
instanceId() - Method in interface org.apache.kafka.coordinator.group.api.assignor.MemberSubscription
+
+
Gets the instance Id if present.
+
+
inStates(Set<ConsumerGroupState>) - Method in class org.apache.kafka.clients.admin.ListConsumerGroupsOptions
+
+
Deprecated. + +
+
+
inStore(String) - Static method in class org.apache.kafka.streams.query.StateQueryRequest
+
+
Specifies the name of the store to query.
+
+
inSyncReplicas() - Method in class org.apache.kafka.common.PartitionInfo
+
+
The subset of the replicas that are in sync, that is caught-up to the leader and ready to take over as leader if + the leader should fail
+
+
INT - Enum constant in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigType
+
 
+
INT - Enum constant in enum class org.apache.kafka.common.config.ConfigDef.Type
+
+
Used for numerical values within the Java Integer range.
+
+
int16() - Static method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
INT16 - Enum constant in enum class org.apache.kafka.connect.data.Schema.Type
+
+
16-bit signed integer
+
+
INT16_SCHEMA - Static variable in interface org.apache.kafka.connect.data.Schema
+
 
+
int32() - Static method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
INT32 - Enum constant in enum class org.apache.kafka.connect.data.Schema.Type
+
+
32-bit signed integer
+
+
INT32_SCHEMA - Static variable in interface org.apache.kafka.connect.data.Schema
+
 
+
int64() - Static method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
INT64 - Enum constant in enum class org.apache.kafka.connect.data.Schema.Type
+
+
64-bit signed integer
+
+
INT64_SCHEMA - Static variable in interface org.apache.kafka.connect.data.Schema
+
 
+
int8() - Static method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
INT8 - Enum constant in enum class org.apache.kafka.connect.data.Schema.Type
+
+
8-bit signed integer
+
+
INT8_SCHEMA - Static variable in interface org.apache.kafka.connect.data.Schema
+
 
+
Integer() - Static method in class org.apache.kafka.common.serialization.Serdes
+
+
A serde for nullable Integer type.
+
+
IntegerDecoder - Class in org.apache.kafka.tools.api
+
+
The integer decoder translates bytes into integers.
+
+
IntegerDecoder() - Constructor for class org.apache.kafka.tools.api.IntegerDecoder
+
 
+
IntegerDeserializer - Class in org.apache.kafka.common.serialization
+
 
+
IntegerDeserializer() - Constructor for class org.apache.kafka.common.serialization.IntegerDeserializer
+
 
+
IntegerSerde() - Constructor for class org.apache.kafka.common.serialization.Serdes.IntegerSerde
+
 
+
IntegerSerializer - Class in org.apache.kafka.common.serialization
+
 
+
IntegerSerializer() - Constructor for class org.apache.kafka.common.serialization.IntegerSerializer
+
 
+
interBrokerEndpoint() - Method in interface org.apache.kafka.server.authorizer.AuthorizerServerInfo
+
+
Returns the inter-broker endpoint.
+
+
INTERCEPTOR_CLASSES_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
interceptor.classes
+
+
INTERCEPTOR_CLASSES_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
interceptor.classes
+
+
INTERCEPTOR_CLASSES_DOC - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
INTERCEPTOR_CLASSES_DOC - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
 
+
InterfaceStability - Class in org.apache.kafka.common.annotation
+
+
Annotation to inform users of how much to rely on a particular package, class or method not changing over time.
+
+
InterfaceStability() - Constructor for class org.apache.kafka.common.annotation.InterfaceStability
+
 
+
InterfaceStability.Evolving - Annotation Interface in org.apache.kafka.common.annotation
+
+
Compatibility may be broken at minor release (i.e.
+
+
InterfaceStability.Stable - Annotation Interface in org.apache.kafka.common.annotation
+
+
Compatibility is maintained in major, minor and patch releases with one exception: compatibility may be broken + in a major release (i.e.
+
+
InterfaceStability.Unstable - Annotation Interface in org.apache.kafka.common.annotation
+
+
No guarantee is provided as to reliability or stability across any level of release granularity.
+
+
INTERNAL_CONSUMER_WRAPPER - Static variable in class org.apache.kafka.streams.StreamsConfig.InternalConfig
+
 
+
INTERNAL_TASK_ASSIGNOR_CLASS - Static variable in class org.apache.kafka.streams.StreamsConfig.InternalConfig
+
 
+
INTERNAL_TOPIC_SEPARATOR_ENABLED - Static variable in class org.apache.kafka.connect.mirror.MirrorClientConfig
+
 
+
INTERNAL_TOPIC_SEPARATOR_ENABLED_CONFIG - Static variable in class org.apache.kafka.connect.mirror.DefaultReplicationPolicy
+
 
+
INTERNAL_TOPIC_SEPARATOR_ENABLED_DEFAULT - Static variable in class org.apache.kafka.connect.mirror.DefaultReplicationPolicy
+
 
+
INTERNAL_TOPIC_SEPARATOR_ENABLED_DEFAULT - Static variable in class org.apache.kafka.connect.mirror.MirrorClientConfig
+
 
+
INTERNAL_TOPIC_SEPARATOR_ENABLED_DOC - Static variable in class org.apache.kafka.connect.mirror.MirrorClientConfig
+
 
+
internalConfig - Variable in class org.apache.kafka.common.config.ConfigDef.ConfigKey
+
 
+
InternalConfig() - Constructor for class org.apache.kafka.streams.StreamsConfig.InternalConfig
+
 
+
InternalFixedKeyRecordFactory - Class in org.apache.kafka.streams.processor.api
+
 
+
internalTopics() - Method in class org.apache.kafka.common.Cluster
+
 
+
InterruptException - Exception in org.apache.kafka.common.errors
+
+
An unchecked wrapper for InterruptedException
+
+
InterruptException(InterruptedException) - Constructor for exception org.apache.kafka.common.errors.InterruptException
+
 
+
InterruptException(String) - Constructor for exception org.apache.kafka.common.errors.InterruptException
+
 
+
InterruptException(String, InterruptedException) - Constructor for exception org.apache.kafka.common.errors.InterruptException
+
 
+
INTERVAL - Enum constant in enum class org.apache.kafka.connect.source.SourceTask.TransactionBoundary
+
+
Transactions will be started and committed on a user-defined time interval.
+
+
INVALID_STANDBY_TASK - Enum constant in enum class org.apache.kafka.streams.processor.assignment.TaskAssignor.AssignmentError
+
 
+
InvalidCommitOffsetSizeException - Exception in org.apache.kafka.common.errors
+
 
+
InvalidCommitOffsetSizeException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidCommitOffsetSizeException
+
 
+
InvalidCommitOffsetSizeException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.InvalidCommitOffsetSizeException
+
 
+
InvalidConfigurationException - Exception in org.apache.kafka.common.errors
+
 
+
InvalidConfigurationException() - Constructor for exception org.apache.kafka.common.errors.InvalidConfigurationException
+
 
+
InvalidConfigurationException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidConfigurationException
+
 
+
InvalidConfigurationException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.InvalidConfigurationException
+
 
+
InvalidConfigurationException(Throwable) - Constructor for exception org.apache.kafka.common.errors.InvalidConfigurationException
+
 
+
invalidExtensions() - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerExtensionsValidatorCallback
+
 
+
InvalidFetchSessionEpochException - Exception in org.apache.kafka.common.errors
+
 
+
InvalidFetchSessionEpochException() - Constructor for exception org.apache.kafka.common.errors.InvalidFetchSessionEpochException
+
 
+
InvalidFetchSessionEpochException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidFetchSessionEpochException
+
 
+
InvalidFetchSizeException - Exception in org.apache.kafka.common.errors
+
 
+
InvalidFetchSizeException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidFetchSizeException
+
 
+
InvalidFetchSizeException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.InvalidFetchSizeException
+
 
+
InvalidGroupIdException - Exception in org.apache.kafka.common.errors
+
 
+
InvalidGroupIdException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidGroupIdException
+
 
+
InvalidGroupIdException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.InvalidGroupIdException
+
 
+
InvalidMetadataException - Exception in org.apache.kafka.common.errors
+
+
An exception that may indicate the client's metadata is out of date
+
+
InvalidOffsetException - Exception in org.apache.kafka.clients.consumer
+
+
Thrown when the offset for a set of partitions is invalid (either undefined or out of range), + and no reset policy has been configured.
+
+
InvalidOffsetException - Exception in org.apache.kafka.common.errors
+
+
Thrown when the offset for a set of partitions is invalid (either undefined or out of range), + and no reset policy has been configured.
+
+
InvalidOffsetException(String) - Constructor for exception org.apache.kafka.clients.consumer.InvalidOffsetException
+
 
+
InvalidOffsetException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidOffsetException
+
 
+
InvalidOffsetException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.InvalidOffsetException
+
 
+
InvalidPartitionsException - Exception in org.apache.kafka.common.errors
+
 
+
InvalidPartitionsException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidPartitionsException
+
 
+
InvalidPartitionsException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.InvalidPartitionsException
+
 
+
InvalidPidMappingException - Exception in org.apache.kafka.common.errors
+
 
+
InvalidPidMappingException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidPidMappingException
+
 
+
InvalidPrincipalTypeException - Exception in org.apache.kafka.common.errors
+
 
+
InvalidPrincipalTypeException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidPrincipalTypeException
+
 
+
InvalidPrincipalTypeException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.InvalidPrincipalTypeException
+
 
+
InvalidProducerEpochException - Exception in org.apache.kafka.common.errors
+
+
This exception indicates that the produce request sent to the partition leader + contains a non-matching producer epoch.
+
+
InvalidProducerEpochException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidProducerEpochException
+
 
+
InvalidRecordException - Exception in org.apache.kafka.common
+
 
+
InvalidRecordException(String) - Constructor for exception org.apache.kafka.common.InvalidRecordException
+
 
+
InvalidRecordException(String, Throwable) - Constructor for exception org.apache.kafka.common.InvalidRecordException
+
 
+
InvalidRecordStateException - Exception in org.apache.kafka.common.errors
+
+
Thrown when the acknowledgement of delivery of a record could not be completed because the record + state is invalid.
+
+
InvalidRecordStateException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidRecordStateException
+
 
+
InvalidRegistrationException - Exception in org.apache.kafka.common.errors
+
+
Thrown when a broker registration request is considered invalid by the controller.
+
+
InvalidRegistrationException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidRegistrationException
+
 
+
InvalidRegularExpression - Exception in org.apache.kafka.common.errors
+
+
Thrown when a regular expression received in a request is not valid.
+
+
InvalidRegularExpression(String) - Constructor for exception org.apache.kafka.common.errors.InvalidRegularExpression
+
 
+
InvalidReplicaAssignmentException - Exception in org.apache.kafka.common.errors
+
 
+
InvalidReplicaAssignmentException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidReplicaAssignmentException
+
 
+
InvalidReplicaAssignmentException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.InvalidReplicaAssignmentException
+
 
+
InvalidReplicationFactorException - Exception in org.apache.kafka.common.errors
+
 
+
InvalidReplicationFactorException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidReplicationFactorException
+
 
+
InvalidReplicationFactorException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.InvalidReplicationFactorException
+
 
+
InvalidRequestException - Exception in org.apache.kafka.common.errors
+
+
Thrown when a request breaks basic wire protocol rules.
+
+
InvalidRequestException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidRequestException
+
 
+
InvalidRequestException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.InvalidRequestException
+
 
+
InvalidRequiredAcksException - Exception in org.apache.kafka.common.errors
+
 
+
InvalidRequiredAcksException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidRequiredAcksException
+
 
+
InvalidSessionTimeoutException - Exception in org.apache.kafka.common.errors
+
 
+
InvalidSessionTimeoutException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidSessionTimeoutException
+
 
+
InvalidSessionTimeoutException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.InvalidSessionTimeoutException
+
 
+
InvalidShareSessionEpochException - Exception in org.apache.kafka.common.errors
+
+
Thrown when the share session epoch is invalid.
+
+
InvalidShareSessionEpochException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidShareSessionEpochException
+
 
+
InvalidStateStoreException - Exception in org.apache.kafka.streams.errors
+
+
Indicates that there was a problem when trying to access a StateStore.
+
+
InvalidStateStoreException(String) - Constructor for exception org.apache.kafka.streams.errors.InvalidStateStoreException
+
 
+
InvalidStateStoreException(String, Throwable) - Constructor for exception org.apache.kafka.streams.errors.InvalidStateStoreException
+
 
+
InvalidStateStoreException(Throwable) - Constructor for exception org.apache.kafka.streams.errors.InvalidStateStoreException
+
 
+
InvalidStateStorePartitionException - Exception in org.apache.kafka.streams.errors
+
+
Indicates that the specific state store being queried via + StoreQueryParameters used a partitioning that is not assigned to this instance.
+
+
InvalidStateStorePartitionException(String) - Constructor for exception org.apache.kafka.streams.errors.InvalidStateStorePartitionException
+
 
+
InvalidStateStorePartitionException(String, Throwable) - Constructor for exception org.apache.kafka.streams.errors.InvalidStateStorePartitionException
+
 
+
InvalidTimestampException - Exception in org.apache.kafka.common.errors
+
+
Indicate the timestamp of a record is invalid.
+
+
InvalidTimestampException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidTimestampException
+
 
+
InvalidTimestampException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.InvalidTimestampException
+
 
+
InvalidTopicException - Exception in org.apache.kafka.common.errors
+
+
The client has attempted to perform an operation on an invalid topic.
+
+
InvalidTopicException() - Constructor for exception org.apache.kafka.common.errors.InvalidTopicException
+
 
+
InvalidTopicException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidTopicException
+
 
+
InvalidTopicException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.InvalidTopicException
+
 
+
InvalidTopicException(String, Set<String>) - Constructor for exception org.apache.kafka.common.errors.InvalidTopicException
+
 
+
InvalidTopicException(Throwable) - Constructor for exception org.apache.kafka.common.errors.InvalidTopicException
+
 
+
InvalidTopicException(Set<String>) - Constructor for exception org.apache.kafka.common.errors.InvalidTopicException
+
 
+
invalidTopics() - Method in class org.apache.kafka.common.Cluster
+
 
+
invalidTopics() - Method in exception org.apache.kafka.common.errors.InvalidTopicException
+
 
+
InvalidTxnStateException - Exception in org.apache.kafka.common.errors
+
 
+
InvalidTxnStateException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidTxnStateException
+
 
+
InvalidTxnTimeoutException - Exception in org.apache.kafka.common.errors
+
+
The transaction coordinator returns this error code if the timeout received via the InitProducerIdRequest is larger than + the `transaction.max.timeout.ms` config value.
+
+
InvalidTxnTimeoutException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidTxnTimeoutException
+
 
+
InvalidTxnTimeoutException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.InvalidTxnTimeoutException
+
 
+
InvalidUpdateVersionException - Exception in org.apache.kafka.common.errors
+
 
+
InvalidUpdateVersionException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidUpdateVersionException
+
 
+
InvalidUpdateVersionException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.InvalidUpdateVersionException
+
 
+
InvalidVoterKeyException - Exception in org.apache.kafka.common.errors
+
 
+
InvalidVoterKeyException(String) - Constructor for exception org.apache.kafka.common.errors.InvalidVoterKeyException
+
 
+
InvalidVoterKeyException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.InvalidVoterKeyException
+
 
+
IP - Static variable in class org.apache.kafka.common.quota.ClientQuotaEntity
+
 
+
IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED - Static variable in class org.apache.kafka.streams.StreamsConfig.InternalConfig
+
 
+
isAllPartitions() - Method in class org.apache.kafka.streams.query.StateQueryRequest
+
+
Whether this request should fetch from all locally available partitions.
+
+
isBootstrapConfigured() - Method in class org.apache.kafka.common.Cluster
+
 
+
isCancelled() - Method in class org.apache.kafka.common.KafkaFuture
+
+
Returns true if this CompletableFuture was cancelled before it completed normally.
+
+
isChangelog() - Method in interface org.apache.kafka.streams.processor.assignment.TaskTopicPartition
+
 
+
isCheckpointsTopic(String) - Method in class org.apache.kafka.connect.mirror.DefaultReplicationPolicy
+
 
+
isCheckpointsTopic(String) - Method in interface org.apache.kafka.connect.mirror.ReplicationPolicy
+
+
Returns true if the topic is a checkpoints topic.
+
+
isClassic() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription
+
+
The flag indicating whether a member is classic.
+
+
isCollectExecutionInfo() - Method in class org.apache.kafka.streams.query.QueryConfig
+
 
+
isCompletedExceptionally() - Method in class org.apache.kafka.common.KafkaFuture
+
+
Returns true if this CompletableFuture completed exceptionally, in any way.
+
+
isDefault() - Method in class org.apache.kafka.clients.admin.ConfigEntry
+
+
Return whether the config value is the default or if it's been explicitly set.
+
+
isDefault() - Method in class org.apache.kafka.common.config.ConfigResource
+
+
Returns true if this is the default resource of a resource type.
+
+
isDone() - Method in class org.apache.kafka.common.KafkaFuture
+
+
Returns true if completed in any fashion: normally, exceptionally, or via cancellation.
+
+
isEmpty() - Method in class org.apache.kafka.clients.consumer.ConsumerRecords
+
 
+
isEmpty() - Method in class org.apache.kafka.common.Node
+
+
Check whether this node is empty, which may be the case if noNode() is used as a placeholder + in a response payload with an error.
+
+
isEmpty() - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
isEmpty() - Method in interface org.apache.kafka.connect.header.Headers
+
+
Determine whether this object has no headers.
+
+
isEmpty() - Method in class org.apache.kafka.streams.query.Position
+
 
+
isEmpty() - Method in class org.apache.kafka.streams.TestOutputTopic
+
+
Verify if the topic queue is empty.
+
+
isFailure() - Method in interface org.apache.kafka.streams.query.QueryResult
+
+
True iff the query execution failed.
+
+
isFenced() - Method in class org.apache.kafka.common.Node
+
+
Whether if this node is fenced
+
+
isFuture() - Method in class org.apache.kafka.clients.admin.ReplicaInfo
+
+
Whether this replica has been created by a AlterReplicaLogDirsRequest + but not yet replaced the current replica on the broker.
+
+
isHeartbeatsTopic(String) - Method in interface org.apache.kafka.connect.mirror.ReplicationPolicy
+
+
Returns true if the topic is a heartbeats topic
+
+
isInternal() - Method in class org.apache.kafka.clients.admin.TopicDescription
+
+
Whether the topic is internal to Kafka.
+
+
isInternal() - Method in class org.apache.kafka.clients.admin.TopicListing
+
+
Whether the topic is internal to Kafka.
+
+
isInternalTopic(String) - Method in interface org.apache.kafka.connect.mirror.ReplicationPolicy
+
+
Returns true if the topic is considered an internal topic.
+
+
isMeasurable() - Method in class org.apache.kafka.common.metrics.KafkaMetric
+
+
The method determines if the metric value provider is of type Measurable.
+
+
isMM2InternalTopic(String) - Method in class org.apache.kafka.connect.mirror.DefaultReplicationPolicy
+
 
+
isMM2InternalTopic(String) - Method in interface org.apache.kafka.connect.mirror.ReplicationPolicy
+
+
Returns true if the topic is one of MirrorMaker internal topics.
+
+
isNamedTopology() - Method in class org.apache.kafka.streams.TopologyConfig
+
 
+
ISOLATION_LEVEL_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
isolation.level
+
+
ISOLATION_LEVEL_DOC - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
isolationLevel() - Method in class org.apache.kafka.clients.admin.ListOffsetsOptions
+
 
+
IsolationLevel - Enum Class in org.apache.kafka.common
+
 
+
isOpen() - Method in interface org.apache.kafka.streams.processor.StateStore
+
+
Is this store open for reading and writing
+
+
isOptional() - Method in class org.apache.kafka.connect.data.ConnectSchema
+
 
+
isOptional() - Method in interface org.apache.kafka.connect.data.Schema
+
 
+
isOptional() - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
isPartitionAssignable(Uuid, int) - Method in interface org.apache.kafka.coordinator.group.api.assignor.GroupSpec
+
+
For share groups, a partition can only be assigned once its initialization is complete.
+
+
isPartitionAssigned(Uuid, int) - Method in interface org.apache.kafka.coordinator.group.api.assignor.GroupSpec
+
+
Determine whether a topic id and partition have been assigned to + a member.
+
+
isPaused() - Method in class org.apache.kafka.streams.KafkaStreams
+
 
+
isPrimitive() - Method in enum class org.apache.kafka.connect.data.Schema.Type
+
 
+
isr() - Method in class org.apache.kafka.common.TopicPartitionInfo
+
+
Return the in-sync replicas of the partition.
+
+
isReadOnly() - Method in class org.apache.kafka.clients.admin.ConfigEntry
+
+
Return whether the config is read-only and cannot be updated.
+
+
isReady(TopicIdPartition) - Method in interface org.apache.kafka.server.log.remote.storage.RemoteLogMetadataManager
+
+
Denotes whether the partition metadata is ready to serve.
+
+
isRequireActive() - Method in class org.apache.kafka.streams.query.StateQueryRequest
+
+
Whether this request requires the query to execute only on active partitions.
+
+
isRunningOrRebalancing() - Method in enum class org.apache.kafka.streams.KafkaStreams.State
+
 
+
isSensitive() - Method in class org.apache.kafka.clients.admin.ConfigEntry
+
+
Return whether the config value is sensitive.
+
+
isSensitive() - Method in enum class org.apache.kafka.common.config.ConfigDef.Type
+
+
Whether this type contains sensitive data such as a password or key.
+
+
isShuttingDown() - Method in enum class org.apache.kafka.streams.KafkaStreams.State
+
 
+
isSimpleConsumerGroup() - Method in class org.apache.kafka.clients.admin.ClassicGroupDescription
+
+
If the group is a simple consumer group or not.
+
+
isSimpleConsumerGroup() - Method in class org.apache.kafka.clients.admin.ConsumerGroupDescription
+
+
If consumer group is simple or not.
+
+
isSimpleConsumerGroup() - Method in class org.apache.kafka.clients.admin.ConsumerGroupListing
+
+
Deprecated.
+
If Consumer Group is simple or not.
+
+
isSimpleConsumerGroup() - Method in class org.apache.kafka.clients.admin.GroupListing
+
+
If the group is a simple consumer group or not.
+
+
isSkipCache() - Method in class org.apache.kafka.streams.query.KeyQuery
+
+
The flag whether to skip the cache or not during query evaluation.
+
+
isSkipCache() - Method in class org.apache.kafka.streams.query.TimestampedKeyQuery
+
+
The flag whether to skip the cache or not during query evaluation.
+
+
isSlidingWindow() - Method in class org.apache.kafka.streams.state.DslWindowParams
+
 
+
isSource() - Method in interface org.apache.kafka.streams.processor.assignment.TaskTopicPartition
+
 
+
isSpecific() - Method in enum class org.apache.kafka.common.resource.PatternType
+
 
+
isStateful() - Method in interface org.apache.kafka.streams.processor.assignment.TaskInfo
+
 
+
isSuccess() - Method in interface org.apache.kafka.streams.query.QueryResult
+
+
True iff the query was successfully executed.
+
+
ISSUED_AT_CLAIM_NAME - Static variable in class org.apache.kafka.common.security.oauthbearer.ClientJwtValidator
+
 
+
issueTimestamp() - Method in class org.apache.kafka.common.security.token.delegation.TokenInformation
+
 
+
isTerminating() - Method in interface org.apache.kafka.server.telemetry.ClientTelemetryPayload
+
+
Indicates whether the client is terminating and thus making its last metrics push.
+
+
isTimestamped() - Method in class org.apache.kafka.streams.state.DslKeyValueParams
+
 
+
isTimestamped() - Method in class org.apache.kafka.streams.state.DslWindowParams
+
 
+
isTxnIdxEmpty() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata
+
+
If true indicates that the transaction index is empty.
+
+
isUnbounded() - Method in class org.apache.kafka.streams.query.PositionBound
+
+
Returns true iff this object specifies that there is no position bound.
+
+
isUnknown() - Method in class org.apache.kafka.common.acl.AccessControlEntry
+
+
Return true if this AclResource has any UNKNOWN components.
+
+
isUnknown() - Method in class org.apache.kafka.common.acl.AccessControlEntryFilter
+
+
Return true if there are any UNKNOWN components.
+
+
isUnknown() - Method in class org.apache.kafka.common.acl.AclBinding
+
 
+
isUnknown() - Method in class org.apache.kafka.common.acl.AclBindingFilter
+
 
+
isUnknown() - Method in enum class org.apache.kafka.common.acl.AclOperation
+
+
Return true if this operation is UNKNOWN.
+
+
isUnknown() - Method in enum class org.apache.kafka.common.acl.AclPermissionType
+
+
Return true if this permission type is UNKNOWN.
+
+
isUnknown() - Method in enum class org.apache.kafka.common.resource.PatternType
+
 
+
isUnknown() - Method in class org.apache.kafka.common.resource.Resource
+
+
Return true if this Resource has any UNKNOWN components.
+
+
isUnknown() - Method in class org.apache.kafka.common.resource.ResourcePattern
+
 
+
isUnknown() - Method in class org.apache.kafka.common.resource.ResourcePatternFilter
+
 
+
isUnknown() - Method in enum class org.apache.kafka.common.resource.ResourceType
+
+
Return whether this resource type is UNKNOWN.
+
+
isUpperBound() - Method in class org.apache.kafka.common.metrics.Quota
+
 
+
isValidEntityType(String) - Static method in class org.apache.kafka.common.quota.ClientQuotaEntity
+
 
+
isValidTransition(RemoteLogSegmentState, RemoteLogSegmentState) - Static method in enum class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState
+
 
+
isValidTransition(RemotePartitionDeleteState, RemotePartitionDeleteState) - Static method in enum class org.apache.kafka.server.log.remote.storage.RemotePartitionDeleteState
+
 
+
isValidTransition(KafkaStreams.State) - Method in enum class org.apache.kafka.streams.KafkaStreams.State
+
 
+
iterations() - Method in class org.apache.kafka.clients.admin.ScramCredentialInfo
+
 
+
iterations() - Method in class org.apache.kafka.common.security.scram.ScramCredential
+
+
Number of iterations used to process this credential using the SCRAM algorithm.
+
+
iterator() - Method in class org.apache.kafka.clients.consumer.ConsumerRecords
+
 
+
iterator() - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
+

J

+
+
JmxReporter - Class in org.apache.kafka.common.metrics
+
+
Register metrics in JMX as dynamic mbeans based on the metric names
+
+
JmxReporter() - Constructor for class org.apache.kafka.common.metrics.JmxReporter
+
 
+
join(GlobalKTable<GlobalKey, GlobalValue>, KeyValueMapper<? super K, ? super V, ? extends GlobalKey>, ValueJoiner<? super V, ? super GlobalValue, ? extends VOut>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Join records of this stream with GlobalKTable's records using non-windowed inner equi-join.
+
+
join(GlobalKTable<GlobalKey, GlobalValue>, KeyValueMapper<? super K, ? super V, ? extends GlobalKey>, ValueJoiner<? super V, ? super GlobalValue, ? extends VOut>, Named) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
join(GlobalKTable<GlobalKey, GlobalValue>, KeyValueMapper<? super K, ? super V, ? extends GlobalKey>, ValueJoinerWithKey<? super K, ? super V, ? super GlobalValue, ? extends VOut>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
join(GlobalKTable<GlobalKey, GlobalValue>, KeyValueMapper<? super K, ? super V, ? extends GlobalKey>, ValueJoinerWithKey<? super K, ? super V, ? super GlobalValue, ? extends VOut>, Named) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
join(KStream<K, VRight>, ValueJoiner<? super V, ? super VRight, ? extends VOut>, JoinWindows) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Join records of this (left) stream with another (right) KStream's records using a windowed inner equi-join.
+
+
join(KStream<K, VRight>, ValueJoiner<? super V, ? super VRight, ? extends VOut>, JoinWindows, StreamJoined<K, V, VRight>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
join(KStream<K, VRight>, ValueJoinerWithKey<? super K, ? super V, ? super VRight, ? extends VOut>, JoinWindows) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
join(KStream<K, VRight>, ValueJoinerWithKey<? super K, ? super V, ? super VRight, ? extends VOut>, JoinWindows, StreamJoined<K, V, VRight>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
join(KTable<K, TableValue>, ValueJoiner<? super V, ? super TableValue, ? extends VOut>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Join records of this stream with KTable's records using non-windowed inner equi-join.
+
+
join(KTable<K, TableValue>, ValueJoiner<? super V, ? super TableValue, ? extends VOut>, Joined<K, V, TableValue>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Join records of this stream with KTable's records using non-windowed inner equi-join.
+
+
join(KTable<K, TableValue>, ValueJoinerWithKey<? super K, ? super V, ? super TableValue, ? extends VOut>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
join(KTable<K, TableValue>, ValueJoinerWithKey<? super K, ? super V, ? super TableValue, ? extends VOut>, Joined<K, V, TableValue>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
join(KTable<K, VO>, ValueJoiner<? super V, ? super VO, ? extends VR>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable with another KTable's records using non-windowed inner equi join, + with default serializers, deserializers, and state store.
+
+
join(KTable<K, VO>, ValueJoiner<? super V, ? super VO, ? extends VR>, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable with another KTable's records using non-windowed inner equi join, + with the Materialized instance for configuration of the key serde, + the result table's value serde, and state store.
+
+
join(KTable<K, VO>, ValueJoiner<? super V, ? super VO, ? extends VR>, Named) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable with another KTable's records using non-windowed inner equi join, + with default serializers, deserializers, and state store.
+
+
join(KTable<K, VO>, ValueJoiner<? super V, ? super VO, ? extends VR>, Named, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable with another KTable's records using non-windowed inner equi join, + with the Materialized instance for configuration of the key serde, + the result table's value serde, and state store.
+
+
join(KTable<KO, VO>, BiFunction<? super K, ? super V, ? extends KO>, ValueJoiner<? super V, ? super VO, ? extends VR>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable with another KTable using non-windowed inner join.
+
+
join(KTable<KO, VO>, BiFunction<? super K, ? super V, ? extends KO>, ValueJoiner<? super V, ? super VO, ? extends VR>, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable with another KTable using non-windowed inner join.
+
+
join(KTable<KO, VO>, BiFunction<? super K, ? super V, ? extends KO>, ValueJoiner<? super V, ? super VO, ? extends VR>, TableJoined<K, KO>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable with another KTable using non-windowed inner join, + using the TableJoined instance for optional configurations including + partitioners when the tables being joined use non-default partitioning, + and also the base name for components of the join.
+
+
join(KTable<KO, VO>, BiFunction<? super K, ? super V, ? extends KO>, ValueJoiner<? super V, ? super VO, ? extends VR>, TableJoined<K, KO>, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable with another KTable using non-windowed inner join, + using the TableJoined instance for optional configurations including + partitioners when the tables being joined use non-default partitioning, + and also the base name for components of the join.
+
+
join(KTable<KO, VO>, Function<? super V, ? extends KO>, ValueJoiner<? super V, ? super VO, ? extends VR>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable with another KTable using non-windowed inner join.
+
+
join(KTable<KO, VO>, Function<? super V, ? extends KO>, ValueJoiner<? super V, ? super VO, ? extends VR>, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable with another KTable using non-windowed inner join.
+
+
join(KTable<KO, VO>, Function<? super V, ? extends KO>, ValueJoiner<? super V, ? super VO, ? extends VR>, TableJoined<K, KO>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable with another KTable using non-windowed inner join, + using the TableJoined instance for optional configurations including + partitioners when the tables being joined use non-default partitioning, + and also the base name for components of the join.
+
+
join(KTable<KO, VO>, Function<? super V, ? extends KO>, ValueJoiner<? super V, ? super VO, ? extends VR>, TableJoined<K, KO>, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable with another KTable using non-windowed inner join, + using the TableJoined instance for optional configurations including + partitioners when the tables being joined use non-default partitioning, + and also the base name for components of the join.
+
+
Joined<K,VLeft,VRight> - Class in org.apache.kafka.streams.kstream
+
+
The Joined class represents optional params that can be passed to + KStream#join(KTable,...) and + KStream#leftJoin(KTable,...) operations.
+
+
JoinWindows - Class in org.apache.kafka.streams.kstream
+
+
The window specifications used for joins.
+
+
JwtBearerJwtRetriever - Class in org.apache.kafka.common.security.oauthbearer
+
+
JwtBearerJwtRetriever is a JwtRetriever that performs the steps to request + a JWT from an OAuth/OIDC identity provider using the urn:ietf:params:oauth:grant-type:jwt-bearer + grant type.
+
+
JwtBearerJwtRetriever() - Constructor for class org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever
+
 
+
JwtBearerJwtRetriever(Time) - Constructor for class org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever
+
 
+
JwtRetriever - Interface in org.apache.kafka.common.security.oauthbearer
+
+
A JwtRetriever is the internal API by which the login module will + retrieve an access token for use in authorization by the broker.
+
+
JwtRetrieverException - Exception in org.apache.kafka.common.security.oauthbearer
+
+
A JwtRetrieverException is thrown in cases where the JWT cannot be retrieved.
+
+
JwtRetrieverException(String) - Constructor for exception org.apache.kafka.common.security.oauthbearer.JwtRetrieverException
+
 
+
JwtRetrieverException(String, Throwable) - Constructor for exception org.apache.kafka.common.security.oauthbearer.JwtRetrieverException
+
 
+
JwtRetrieverException(Throwable) - Constructor for exception org.apache.kafka.common.security.oauthbearer.JwtRetrieverException
+
 
+
JwtValidator - Interface in org.apache.kafka.common.security.oauthbearer
+
+
An instance of JwtValidator acts as a function object that, given an access + token in base-64 encoded JWT format, can parse the data, perform validation, and construct an + OAuthBearerToken for use by the caller.
+
+
JwtValidatorException - Exception in org.apache.kafka.common.security.oauthbearer
+
+
A JwtValidatorException is thrown in cases where the validity of a JWT cannot be + determined.
+
+
JwtValidatorException(String) - Constructor for exception org.apache.kafka.common.security.oauthbearer.JwtValidatorException
+
 
+
JwtValidatorException(String, Throwable) - Constructor for exception org.apache.kafka.common.security.oauthbearer.JwtValidatorException
+
 
+
JwtValidatorException(Throwable) - Constructor for exception org.apache.kafka.common.security.oauthbearer.JwtValidatorException
+
 
+
+

K

+
+
KafkaAdminClient - Class in org.apache.kafka.clients.admin
+
+
The default implementation of Admin.
+
+
KafkaClientSupplier - Interface in org.apache.kafka.streams
+
+
KafkaClientSupplier can be used to provide custom Kafka clients to a KafkaStreams instance.
+
+
kafkaClusterId() - Method in interface org.apache.kafka.connect.health.ConnectClusterDetails
+
+
Get the cluster ID of the Kafka cluster backing this Connect cluster.
+
+
KafkaConsumer<K,V> - Class in org.apache.kafka.clients.consumer
+
+
A client that consumes records from a Kafka cluster.
+
+
KafkaConsumer(Map<String, Object>) - Constructor for class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
A consumer is instantiated by providing a set of key-value pairs as configuration.
+
+
KafkaConsumer(Map<String, Object>, Deserializer<K>, Deserializer<V>) - Constructor for class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
A consumer is instantiated by providing a set of key-value pairs as configuration, and a key and a value Deserializer.
+
+
KafkaConsumer(Properties) - Constructor for class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
A consumer is instantiated by providing a Properties object as configuration.
+
+
KafkaConsumer(Properties, Deserializer<K>, Deserializer<V>) - Constructor for class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
A consumer is instantiated by providing a Properties object as configuration, and a + key and a value Deserializer.
+
+
KafkaException - Exception in org.apache.kafka.common
+
+
The base class of all other Kafka exceptions
+
+
KafkaException() - Constructor for exception org.apache.kafka.common.KafkaException
+
 
+
KafkaException(String) - Constructor for exception org.apache.kafka.common.KafkaException
+
 
+
KafkaException(String, Throwable) - Constructor for exception org.apache.kafka.common.KafkaException
+
 
+
KafkaException(Throwable) - Constructor for exception org.apache.kafka.common.KafkaException
+
 
+
KafkaFuture<T> - Class in org.apache.kafka.common
+
+
A flexible future which supports call chaining and other asynchronous programming patterns.
+
+
KafkaFuture() - Constructor for class org.apache.kafka.common.KafkaFuture
+
 
+
KafkaFuture.BaseFunction<A,B> - Interface in org.apache.kafka.common
+
+
A function which takes objects of type A and returns objects of type B.
+
+
KafkaFuture.BiConsumer<A,B> - Interface in org.apache.kafka.common
+
+
A consumer of two different types of object.
+
+
KafkaMetric - Class in org.apache.kafka.common.metrics
+
 
+
KafkaMetric(Object, MetricName, MetricValueProvider<?>, MetricConfig, Time) - Constructor for class org.apache.kafka.common.metrics.KafkaMetric
+
+
Create a metric to monitor an object that implements MetricValueProvider.
+
+
KafkaMetricsContext - Class in org.apache.kafka.common.metrics
+
+
An implementation of MetricsContext, it encapsulates required metrics context properties for Kafka services and clients
+
+
KafkaMetricsContext(String) - Constructor for class org.apache.kafka.common.metrics.KafkaMetricsContext
+
+
Create a MetricsContext with namespace, no service or client properties
+
+
KafkaMetricsContext(String, Map<String, ?>) - Constructor for class org.apache.kafka.common.metrics.KafkaMetricsContext
+
+
Create a MetricsContext with namespace, service or client properties
+
+
kafkaOffset() - Method in class org.apache.kafka.connect.sink.SinkRecord
+
 
+
kafkaPartition() - Method in class org.apache.kafka.connect.connector.ConnectRecord
+
 
+
KafkaPrincipal - Class in org.apache.kafka.common.security.auth
+
+
Principals in Kafka are defined by a type and a name.
+
+
KafkaPrincipal(String, String) - Constructor for class org.apache.kafka.common.security.auth.KafkaPrincipal
+
 
+
KafkaPrincipal(String, String, boolean) - Constructor for class org.apache.kafka.common.security.auth.KafkaPrincipal
+
 
+
KafkaPrincipalBuilder - Interface in org.apache.kafka.common.security.auth
+
+
Pluggable principal builder interface which supports both SSL authentication through + SslAuthenticationContext and SASL through SaslAuthenticationContext.
+
+
KafkaPrincipalSerde - Interface in org.apache.kafka.common.security.auth
+
+
Serializer/Deserializer interface for KafkaPrincipal for the purpose of inter-broker forwarding.
+
+
KafkaProducer<K,V> - Class in org.apache.kafka.clients.producer
+
+
A Kafka client that publishes records to the Kafka cluster.
+
+
KafkaProducer(Map<String, Object>) - Constructor for class org.apache.kafka.clients.producer.KafkaProducer
+
+
A producer is instantiated by providing a set of key-value pairs as configuration.
+
+
KafkaProducer(Map<String, Object>, Serializer<K>, Serializer<V>) - Constructor for class org.apache.kafka.clients.producer.KafkaProducer
+
+
A producer is instantiated by providing a set of key-value pairs as configuration, a key and a value Serializer.
+
+
KafkaProducer(Properties) - Constructor for class org.apache.kafka.clients.producer.KafkaProducer
+
+
A producer is instantiated by providing a set of key-value pairs as configuration.
+
+
KafkaProducer(Properties, Serializer<K>, Serializer<V>) - Constructor for class org.apache.kafka.clients.producer.KafkaProducer
+
+
A producer is instantiated by providing a set of key-value pairs as configuration, a key and a value Serializer.
+
+
KafkaShareConsumer<K,V> - Class in org.apache.kafka.clients.consumer
+
+
A client that consumes records from a Kafka cluster using a share group.
+
+
KafkaShareConsumer(Map<String, Object>) - Constructor for class org.apache.kafka.clients.consumer.KafkaShareConsumer
+
+
A consumer is instantiated by providing a set of key-value pairs as configuration.
+
+
KafkaShareConsumer(Map<String, Object>, Deserializer<K>, Deserializer<V>) - Constructor for class org.apache.kafka.clients.consumer.KafkaShareConsumer
+
+
A consumer is instantiated by providing a set of key-value pairs as configuration, and a key and a value Deserializer.
+
+
KafkaShareConsumer(Properties) - Constructor for class org.apache.kafka.clients.consumer.KafkaShareConsumer
+
+
A consumer is instantiated by providing a Properties object as configuration.
+
+
KafkaShareConsumer(Properties, Deserializer<K>, Deserializer<V>) - Constructor for class org.apache.kafka.clients.consumer.KafkaShareConsumer
+
+
A consumer is instantiated by providing a Properties object as configuration, and a + key and a value Deserializer.
+
+
KafkaStorageException - Exception in org.apache.kafka.common.errors
+
+
Miscellaneous disk-related IOException occurred when handling a request.
+
+
KafkaStorageException() - Constructor for exception org.apache.kafka.common.errors.KafkaStorageException
+
 
+
KafkaStorageException(String) - Constructor for exception org.apache.kafka.common.errors.KafkaStorageException
+
 
+
KafkaStorageException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.KafkaStorageException
+
 
+
KafkaStorageException(Throwable) - Constructor for exception org.apache.kafka.common.errors.KafkaStorageException
+
 
+
KafkaStreams - Class in org.apache.kafka.streams
+
+
A Kafka client that allows for performing continuous computation on input coming from one or more input topics and + sends output to zero, one, or more output topics.
+
+
KafkaStreams(Topology, Properties) - Constructor for class org.apache.kafka.streams.KafkaStreams
+
+
Create a KafkaStreams instance.
+
+
KafkaStreams(Topology, Properties, Time) - Constructor for class org.apache.kafka.streams.KafkaStreams
+
+
Create a KafkaStreams instance.
+
+
KafkaStreams(Topology, Properties, KafkaClientSupplier) - Constructor for class org.apache.kafka.streams.KafkaStreams
+
+
Create a KafkaStreams instance.
+
+
KafkaStreams(Topology, Properties, KafkaClientSupplier, Time) - Constructor for class org.apache.kafka.streams.KafkaStreams
+
+
Create a KafkaStreams instance.
+
+
KafkaStreams(Topology, StreamsConfig) - Constructor for class org.apache.kafka.streams.KafkaStreams
+
+
Create a KafkaStreams instance.
+
+
KafkaStreams(Topology, StreamsConfig, Time) - Constructor for class org.apache.kafka.streams.KafkaStreams
+
+
Create a KafkaStreams instance.
+
+
KafkaStreams(Topology, StreamsConfig, KafkaClientSupplier) - Constructor for class org.apache.kafka.streams.KafkaStreams
+
+
Create a KafkaStreams instance.
+
+
KafkaStreams.CloseOptions - Class in org.apache.kafka.streams
+
+
Class that handles options passed in case of KafkaStreams instance scale down
+
+
KafkaStreams.State - Enum Class in org.apache.kafka.streams
+
+
Kafka Streams states are the possible state that a Kafka Streams instance can be in.
+
+
KafkaStreams.StateListener - Interface in org.apache.kafka.streams
+
+
Listen to KafkaStreams.State change events.
+
+
KafkaStreamsAssignment - Class in org.apache.kafka.streams.processor.assignment
+
+
A simple container class for the assignor to return the desired placement of active and standby tasks on + KafkaStreams clients.
+
+
KafkaStreamsAssignment.AssignedTask - Class in org.apache.kafka.streams.processor.assignment
+
 
+
KafkaStreamsAssignment.AssignedTask.Type - Enum Class in org.apache.kafka.streams.processor.assignment
+
 
+
KafkaStreamsState - Interface in org.apache.kafka.streams.processor.assignment
+
+
A read-only metadata class representing the current state of each KafkaStreams client with at least one StreamThread participating in this rebalance
+
+
kafkaStreamsStates(boolean) - Method in interface org.apache.kafka.streams.processor.assignment.ApplicationState
+
 
+
key - Variable in class org.apache.kafka.streams.KeyValue
+
+
The key of the key-value pair.
+
+
key() - Method in class org.apache.kafka.clients.consumer.ConsumerRecord
+
+
The key (or null if no key is specified)
+
+
key() - Method in class org.apache.kafka.clients.producer.ProducerRecord
+
 
+
key() - Method in interface org.apache.kafka.common.header.Header
+
 
+
key() - Method in class org.apache.kafka.common.quota.ClientQuotaAlteration.Op
+
 
+
key() - Method in class org.apache.kafka.connect.connector.ConnectRecord
+
 
+
key() - Method in interface org.apache.kafka.connect.header.Header
+
+
The header's key, which is not necessarily unique within the set of headers on a Kafka message.
+
+
key() - Method in class org.apache.kafka.streams.kstream.Windowed
+
+
Return the key of the window.
+
+
key() - Method in class org.apache.kafka.streams.processor.api.FixedKeyRecord
+
+
The key of the record.
+
+
key() - Method in class org.apache.kafka.streams.processor.api.Record
+
+
The key of the record.
+
+
key() - Method in class org.apache.kafka.streams.query.MultiVersionedKeyQuery
+
+
The key that was specified for this query.
+
+
key() - Method in class org.apache.kafka.streams.query.TimestampedKeyQuery
+
+
Return the key that was specified for this query.
+
+
key() - Method in class org.apache.kafka.streams.query.VersionedKeyQuery
+
+
The key that was specified for this query.
+
+
key() - Method in class org.apache.kafka.streams.test.TestRecord
+
 
+
KEY - Enum constant in enum class org.apache.kafka.common.errors.RecordDeserializationException.DeserializationExceptionOrigin
+
 
+
KEY - Enum constant in enum class org.apache.kafka.connect.storage.ConverterType
+
 
+
KEY - Enum constant in enum class org.apache.kafka.streams.errors.ProductionExceptionHandler.SerializationExceptionOrigin
+
+
Serialization exception occurred during serialization of the key.
+
+
KEY_DESERIALIZER_CLASS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
key.deserializer
+
+
KEY_DESERIALIZER_CLASS_DOC - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
KEY_SCHEMA - Static variable in class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
KEY_SCHEMA - Static variable in class org.apache.kafka.connect.mirror.Heartbeat
+
 
+
KEY_SERIALIZER_CLASS_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
key.serializer
+
+
KEY_SERIALIZER_CLASS_DOC - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
 
+
keyBuffer() - Method in exception org.apache.kafka.common.errors.RecordDeserializationException
+
 
+
keyDeserializer() - Method in class org.apache.kafka.streams.state.StateSerdes
+
+
Return the key deserializer.
+
+
keyFrom(byte[]) - Method in class org.apache.kafka.streams.state.StateSerdes
+
+
Deserialize the key from raw bytes.
+
+
KeyQuery<K,V> - Class in org.apache.kafka.streams.query
+
+
Interactive query for retrieving a single record based on its key.
+
+
KeyQueryMetadata - Class in org.apache.kafka.streams
+
+
Represents all the metadata related to a key, where a particular key resides in a KafkaStreams application.
+
+
KeyQueryMetadata(HostInfo, Set<HostInfo>, int) - Constructor for class org.apache.kafka.streams.KeyQueryMetadata
+
 
+
keySchema() - Method in class org.apache.kafka.connect.connector.ConnectRecord
+
 
+
keySchema() - Method in class org.apache.kafka.connect.data.ConnectSchema
+
 
+
keySchema() - Method in interface org.apache.kafka.connect.data.Schema
+
+
Get the key schema for this map schema.
+
+
keySchema() - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
keySerde() - Method in class org.apache.kafka.streams.kstream.Joined
+
+
Deprecated. +
Since 4.0 and should not be used any longer.
+
+
+
keySerde() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
 
+
keySerde() - Method in interface org.apache.kafka.streams.processor.api.ProcessingContext
+
+
Return the default key serde.
+
+
keySerde() - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
keySerde() - Method in interface org.apache.kafka.streams.processor.ProcessorContext
+
+
Return the default key serde.
+
+
keySerde() - Method in interface org.apache.kafka.streams.processor.StateStoreContext
+
+
Returns the default key serde.
+
+
keySerde() - Method in class org.apache.kafka.streams.state.StateSerdes
+
+
Return the key serde.
+
+
keySerde(Serde<K>) - Static method in class org.apache.kafka.streams.kstream.Grouped
+
+
Create a Grouped instance with the provided keySerde.
+
+
keySerde(Serde<K>) - Static method in class org.apache.kafka.streams.kstream.Joined
+
+
Create an instance of Joined with a key Serde.
+
+
keySerde(Serde<K>) - Static method in class org.apache.kafka.streams.kstream.Produced
+
+
Create a Produced instance with provided keySerde.
+
+
keySerializer() - Method in class org.apache.kafka.streams.state.StateSerdes
+
+
Return the key serializer.
+
+
keystore() - Method in interface org.apache.kafka.common.security.auth.SslEngineFactory
+
+
Returns keystore configured for this factory.
+
+
keyValue() - Method in class org.apache.kafka.streams.processor.MockProcessorContext.CapturedForward
+
+
Deprecated.
+
The data forwarded.
+
+
KeyValue<K,V> - Class in org.apache.kafka.streams
+
+
A key-value pair defined for a single Kafka Streams record.
+
+
KeyValue(K, V) - Constructor for class org.apache.kafka.streams.KeyValue
+
+
Create a new key-value pair.
+
+
KeyValueBytesStoreSupplier - Interface in org.apache.kafka.streams.state
+
+
A store supplier that can be used to create one or more KeyValueStore<Bytes, byte[]> instances of type <Bytes, byte[]>.
+
+
KeyValueIterator<K,V> - Interface in org.apache.kafka.streams.state
+
+
Iterator interface of KeyValue.
+
+
KeyValueMapper<K,V,VR> - Interface in org.apache.kafka.streams.kstream
+
+
The KeyValueMapper interface for mapping a key-value pair to a new value of arbitrary type.
+
+
keyValueStore() - Static method in class org.apache.kafka.streams.state.QueryableStoreTypes
+
+ +
+
keyValueStore(DslKeyValueParams) - Method in enum class org.apache.kafka.streams.kstream.Materialized.StoreType
+
 
+
keyValueStore(DslKeyValueParams) - Method in class org.apache.kafka.streams.state.BuiltInDslStoreSuppliers.InMemoryDslStoreSuppliers
+
 
+
keyValueStore(DslKeyValueParams) - Method in class org.apache.kafka.streams.state.BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers
+
 
+
keyValueStore(DslKeyValueParams) - Method in interface org.apache.kafka.streams.state.DslStoreSuppliers
+
 
+
KeyValueStore<K,V> - Interface in org.apache.kafka.streams.state
+
+
A key-value store that supports put/get/delete and range queries.
+
+
keyValueStoreBuilder(KeyValueBytesStoreSupplier, Serde<K>, Serde<V>) - Static method in class org.apache.kafka.streams.state.Stores
+
+
Creates a StoreBuilder that can be used to build a KeyValueStore.
+
+
KGroupedStream<K,V> - Interface in org.apache.kafka.streams.kstream
+
+
KGroupedStream is an abstraction of a grouped record stream of key-value pairs.
+
+
KGroupedTable<K,V> - Interface in org.apache.kafka.streams.kstream
+
+
KGroupedTable is an abstraction of a re-grouped changelog stream from a primary-keyed table, + on a different grouping key than the original primary key.
+
+
KStream<K,V> - Interface in org.apache.kafka.streams.kstream
+
+
KStream is an abstraction of a record stream of key-value pairs, i.e., each record is + an independent entity/event in the real world.
+
+
KTable<K,V> - Interface in org.apache.kafka.streams.kstream
+
+
KTable is an abstraction of a changelog stream from a primary-keyed table.
+
+
+

L

+
+
lagFor(TaskId) - Method in interface org.apache.kafka.streams.processor.assignment.KafkaStreamsState
+
+
Returns the total lag across all logged stores in the task.
+
+
LagInfo - Class in org.apache.kafka.streams
+
+
Encapsulates information about lag, at a store partition replica (active or standby).
+
+
lastCaughtUpTimestamp() - Method in class org.apache.kafka.clients.admin.QuorumInfo.ReplicaState
+
+
Return the last millisecond timestamp at which this replica was known to be + caught up with the leader.
+
+
lastFetchTimestamp() - Method in class org.apache.kafka.clients.admin.QuorumInfo.ReplicaState
+
+
Return the last millisecond timestamp that the leader received a + fetch from this replica.
+
+
lastHeader(String) - Method in interface org.apache.kafka.common.header.Headers
+
+
Returns just one (the very last) header for the given key, if present.
+
+
lastKnownElr() - Method in class org.apache.kafka.common.TopicPartitionInfo
+
+
Return the last known eligible leader replicas of the partition.
+
+
lastPollTimeout() - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
lastSequence() - Method in class org.apache.kafka.clients.admin.ProducerState
+
 
+
lastTimestamp() - Method in class org.apache.kafka.clients.admin.ProducerState
+
 
+
lastWithName(String) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
lastWithName(String) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Return the last Header with the specified key.
+
+
latest() - Static method in class org.apache.kafka.clients.admin.OffsetSpec
+
+
Used to retrieve the latest offset of a partition
+
+
latest() - Static method in class org.apache.kafka.streams.AutoOffsetReset
+
+
Creates an AutoOffsetReset instance representing "latest".
+
+
LATEST - Enum constant in enum class org.apache.kafka.clients.consumer.OffsetResetStrategy
+
+
Deprecated.
+
LATEST - Enum constant in enum class org.apache.kafka.streams.Topology.AutoOffsetReset
+
+
Deprecated.
+
LatestSpec() - Constructor for class org.apache.kafka.clients.admin.OffsetSpec.LatestSpec
+
 
+
latestTiered() - Static method in class org.apache.kafka.clients.admin.OffsetSpec
+
+
Used to retrieve the highest offset of data stored in remote storage.
+
+
LatestTieredSpec() - Constructor for class org.apache.kafka.clients.admin.OffsetSpec.LatestTieredSpec
+
 
+
leader() - Method in class org.apache.kafka.common.PartitionInfo
+
+
The node id of the node currently acting as a leader for this partition or null if there is no leader
+
+
leader() - Method in class org.apache.kafka.common.TopicPartitionInfo
+
+
Return the leader of the partition or null if there is none.
+
+
LEADER_EPOCH - Enum constant in enum class org.apache.kafka.server.log.remote.storage.RemoteStorageManager.IndexType
+
+
Represents leader epoch index.
+
+
leaderEpoch() - Method in class org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo
+
 
+
leaderEpoch() - Method in class org.apache.kafka.clients.admin.QuorumInfo
+
 
+
leaderEpoch() - Method in class org.apache.kafka.clients.consumer.ConsumerRecord
+
+
Get the leader epoch for the record if available
+
+
leaderEpoch() - Method in class org.apache.kafka.clients.consumer.OffsetAndMetadata
+
+
Get the leader epoch of the previously consumed record (if one is known).
+
+
leaderEpoch() - Method in class org.apache.kafka.clients.consumer.OffsetAndTimestamp
+
+
Get the leader epoch corresponding to the offset that was found (if one exists).
+
+
leaderEpochIndex() - Method in class org.apache.kafka.server.log.remote.storage.LogSegmentData
+
 
+
leaderFor(TopicPartition) - Method in class org.apache.kafka.common.Cluster
+
+
Get the current leader for the given topic-partition
+
+
leaderId() - Method in class org.apache.kafka.clients.admin.QuorumInfo
+
 
+
LeaderNotAvailableException - Exception in org.apache.kafka.common.errors
+
+
There is no currently available leader for the given partition (either because a leadership election is in progress + or because all replicas are down).
+
+
LeaderNotAvailableException(String) - Constructor for exception org.apache.kafka.common.errors.LeaderNotAvailableException
+
 
+
LeaderNotAvailableException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.LeaderNotAvailableException
+
 
+
LEAVE_GROUP - Enum constant in enum class org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation
+
 
+
leaveGroup(boolean) - Method in class org.apache.kafka.streams.KafkaStreams.CloseOptions
+
 
+
leftJoin(GlobalKTable<GlobalKey, GlobalValue>, KeyValueMapper<? super K, ? super V, ? extends GlobalKey>, ValueJoiner<? super V, ? super GlobalValue, ? extends VOut>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Join records of this stream with GlobalKTable's records using non-windowed left equi-join.
+
+
leftJoin(GlobalKTable<GlobalKey, GlobalValue>, KeyValueMapper<? super K, ? super V, ? extends GlobalKey>, ValueJoiner<? super V, ? super GlobalValue, ? extends VOut>, Named) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
leftJoin(GlobalKTable<GlobalKey, GlobalValue>, KeyValueMapper<? super K, ? super V, ? extends GlobalKey>, ValueJoinerWithKey<? super K, ? super V, ? super GlobalValue, ? extends VOut>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
leftJoin(GlobalKTable<GlobalKey, GlobalValue>, KeyValueMapper<? super K, ? super V, ? extends GlobalKey>, ValueJoinerWithKey<? super K, ? super V, ? super GlobalValue, ? extends VOut>, Named) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
leftJoin(KStream<K, VRight>, ValueJoiner<? super V, ? super VRight, ? extends VOut>, JoinWindows) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Join records of this (left) stream with another (right) KStream's records using a windowed left equi-join.
+
+
leftJoin(KStream<K, VRight>, ValueJoiner<? super V, ? super VRight, ? extends VOut>, JoinWindows, StreamJoined<K, V, VRight>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
leftJoin(KStream<K, VRight>, ValueJoinerWithKey<? super K, ? super V, ? super VRight, ? extends VOut>, JoinWindows) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
leftJoin(KStream<K, VRight>, ValueJoinerWithKey<? super K, ? super V, ? super VRight, ? extends VOut>, JoinWindows, StreamJoined<K, V, VRight>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
leftJoin(KTable<K, VO>, ValueJoiner<? super V, ? super VO, ? extends VR>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable (left input) with another KTable's (right input) records using + non-windowed left equi join, with default serializers, deserializers, and state store.
+
+
leftJoin(KTable<K, VO>, ValueJoiner<? super V, ? super VO, ? extends VR>, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable (left input) with another KTable's (right input) records using + non-windowed left equi join, with the Materialized instance for configuration of the key serde, + the result table's value serde, and state store.
+
+
leftJoin(KTable<K, VO>, ValueJoiner<? super V, ? super VO, ? extends VR>, Named) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable (left input) with another KTable's (right input) records using + non-windowed left equi join, with default serializers, deserializers, and state store.
+
+
leftJoin(KTable<K, VO>, ValueJoiner<? super V, ? super VO, ? extends VR>, Named, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable (left input) with another KTable's (right input) records using + non-windowed left equi join, with the Materialized instance for configuration of the key serde, + the result table's value serde, and state store.
+
+
leftJoin(KTable<K, VTable>, ValueJoiner<? super V, ? super VTable, ? extends VOut>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Join records of this stream with KTable's records using non-windowed left equi-join.
+
+
leftJoin(KTable<K, VTable>, ValueJoiner<? super V, ? super VTable, ? extends VOut>, Joined<K, V, VTable>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Join records of this stream with KTable's records using non-windowed left equi-join.
+
+
leftJoin(KTable<K, VTable>, ValueJoinerWithKey<? super K, ? super V, ? super VTable, ? extends VOut>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
leftJoin(KTable<K, VTable>, ValueJoinerWithKey<? super K, ? super V, ? super VTable, ? extends VOut>, Joined<K, V, VTable>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
leftJoin(KTable<KO, VO>, BiFunction<? super K, ? super V, ? extends KO>, ValueJoiner<? super V, ? super VO, ? extends VR>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable with another KTable using non-windowed left join.
+
+
leftJoin(KTable<KO, VO>, BiFunction<? super K, ? super V, ? extends KO>, ValueJoiner<? super V, ? super VO, ? extends VR>, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable with another KTable using non-windowed left join.
+
+
leftJoin(KTable<KO, VO>, BiFunction<? super K, ? super V, ? extends KO>, ValueJoiner<? super V, ? super VO, ? extends VR>, TableJoined<K, KO>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable with another KTable using non-windowed left join, + using the TableJoined instance for optional configurations including + partitioners when the tables being joined use non-default partitioning, + and also the base name for components of the join.
+
+
leftJoin(KTable<KO, VO>, BiFunction<? super K, ? super V, ? extends KO>, ValueJoiner<? super V, ? super VO, ? extends VR>, TableJoined<K, KO>, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable with another KTable using non-windowed left join, + using the TableJoined instance for optional configurations including + partitioners when the tables being joined use non-default partitioning, + and also the base name for components of the join.
+
+
leftJoin(KTable<KO, VO>, Function<? super V, ? extends KO>, ValueJoiner<? super V, ? super VO, ? extends VR>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable with another KTable using non-windowed left join.
+
+
leftJoin(KTable<KO, VO>, Function<? super V, ? extends KO>, ValueJoiner<? super V, ? super VO, ? extends VR>, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable with another KTable using non-windowed left join.
+
+
leftJoin(KTable<KO, VO>, Function<? super V, ? extends KO>, ValueJoiner<? super V, ? super VO, ? extends VR>, TableJoined<K, KO>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable with another KTable using non-windowed left join, + using the TableJoined instance for optional configurations including + partitioners when the tables being joined use non-default partitioning, + and also the base name for components of the join.
+
+
leftJoin(KTable<KO, VO>, Function<? super V, ? extends KO>, ValueJoiner<? super V, ? super VO, ? extends VR>, TableJoined<K, KO>, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable with another KTable using non-windowed left join, + using the TableJoined instance for optional configurations including + partitioners when the tables being joined use non-default partitioning, + and also the base name for components of the join.
+
+
lifetimeMs() - Method in interface org.apache.kafka.common.security.oauthbearer.OAuthBearerToken
+
+
The token's lifetime, expressed as the number of milliseconds since the + epoch, as per RFC + 6749 Section 1.4
+
+
LINEAR - Enum constant in enum class org.apache.kafka.common.metrics.stats.Percentiles.BucketSizing
+
 
+
LinearBinScheme(int, double) - Constructor for class org.apache.kafka.common.metrics.stats.Histogram.LinearBinScheme
+
+
Create a linear bin scheme with the specified number of bins and the maximum value to be counted in the bins.
+
+
LINGER_MS_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
linger.ms
+
+
LIST - Enum constant in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigType
+
 
+
LIST - Enum constant in enum class org.apache.kafka.common.config.ConfigDef.Type
+
+
Used for list values.
+
+
listClientMetricsResources() - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Deprecated, for removal: This API element is subject to removal in a future version. +
Since 4.1. Use Admin.listConfigResources() instead.
+
+
+
listClientMetricsResources(ListClientMetricsResourcesOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Deprecated, for removal: This API element is subject to removal in a future version. + +
+
+
listClientMetricsResources(ListClientMetricsResourcesOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
listClientMetricsResources(ListClientMetricsResourcesOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
ListClientMetricsResourcesOptions - Class in org.apache.kafka.clients.admin
+
+
Deprecated. +
Since 4.1. Use ListConfigResourcesOptions instead.
+
+
+
ListClientMetricsResourcesOptions() - Constructor for class org.apache.kafka.clients.admin.ListClientMetricsResourcesOptions
+
+
Deprecated.
+
ListClientMetricsResourcesResult - Class in org.apache.kafka.clients.admin
+
+
Deprecated. +
Since 4.1. Use ListConfigResourcesResult instead.
+
+
+
listConfigResources() - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List all configuration resources available in the cluster with the default options.
+
+
listConfigResources(Set<ConfigResource.Type>, ListConfigResourcesOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List the configuration resources available in the cluster which matches config resource type.
+
+
listConfigResources(Set<ConfigResource.Type>, ListConfigResourcesOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
listConfigResources(Set<ConfigResource.Type>, ListConfigResourcesOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
ListConfigResourcesOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
ListConfigResourcesOptions() - Constructor for class org.apache.kafka.clients.admin.ListConfigResourcesOptions
+
 
+
ListConfigResourcesResult - Class in org.apache.kafka.clients.admin
+
+
The result of the Admin.listConfigResources() call.
+
+
listConsumerGroupOffsets(String) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List the consumer group offsets available in the cluster with the default options.
+
+
listConsumerGroupOffsets(String, ListConsumerGroupOffsetsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List the consumer group offsets available in the cluster.
+
+
listConsumerGroupOffsets(Map<String, ListConsumerGroupOffsetsSpec>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List the consumer group offsets available in the cluster for the specified groups with the default options.
+
+
listConsumerGroupOffsets(Map<String, ListConsumerGroupOffsetsSpec>, ListConsumerGroupOffsetsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List the consumer group offsets available in the cluster for the specified consumer groups.
+
+
listConsumerGroupOffsets(Map<String, ListConsumerGroupOffsetsSpec>, ListConsumerGroupOffsetsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
listConsumerGroupOffsets(Map<String, ListConsumerGroupOffsetsSpec>, ListConsumerGroupOffsetsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
ListConsumerGroupOffsetsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
ListConsumerGroupOffsetsOptions() - Constructor for class org.apache.kafka.clients.admin.ListConsumerGroupOffsetsOptions
+
 
+
ListConsumerGroupOffsetsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
ListConsumerGroupOffsetsSpec - Class in org.apache.kafka.clients.admin
+
+
Specification of consumer group offsets to list using Admin.listConsumerGroupOffsets(java.util.Map).
+
+
ListConsumerGroupOffsetsSpec() - Constructor for class org.apache.kafka.clients.admin.ListConsumerGroupOffsetsSpec
+
 
+
listConsumerGroups() - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Deprecated, for removal: This API element is subject to removal in a future version. +
Since 4.1. Use Admin.listGroups(ListGroupsOptions) instead.
+
+
+
listConsumerGroups(ListConsumerGroupsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Deprecated, for removal: This API element is subject to removal in a future version. +
Since 4.1. Use Admin.listGroups(ListGroupsOptions) instead.
+
+
+
listConsumerGroups(ListConsumerGroupsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
+
Deprecated.
+
+
listConsumerGroups(ListConsumerGroupsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
+
Deprecated, for removal: This API element is subject to removal in a future version.
+
+
ListConsumerGroupsOptions - Class in org.apache.kafka.clients.admin
+
+
Deprecated. +
Since 4.1. Use Admin.listGroups(ListGroupsOptions) instead.
+
+
+
ListConsumerGroupsOptions() - Constructor for class org.apache.kafka.clients.admin.ListConsumerGroupsOptions
+
+
Deprecated.
+
ListConsumerGroupsResult - Class in org.apache.kafka.clients.admin
+
+
Deprecated. +
Since 4.1. Use Admin.listGroups(ListGroupsOptions) instead.
+
+
+
ListDeserializer<Inner> - Class in org.apache.kafka.common.serialization
+
 
+
ListDeserializer() - Constructor for class org.apache.kafka.common.serialization.ListDeserializer
+
 
+
ListDeserializer(Class<L>, Deserializer<Inner>) - Constructor for class org.apache.kafka.common.serialization.ListDeserializer
+
 
+
listener() - Method in class org.apache.kafka.clients.admin.RaftVoterEndpoint
+
+
The listener name for this endpoint.
+
+
listener() - Method in class org.apache.kafka.common.Endpoint
+
+
Returns the listener name of this endpoint.
+
+
listenerName() - Method in class org.apache.kafka.common.Endpoint
+
+
Deprecated, for removal: This API element is subject to removal in a future version. +
Since 4.1. Use Endpoint.listener() instead. This function will be removed in 5.0.
+
+
+
listenerName() - Method in interface org.apache.kafka.common.security.auth.AuthenticationContext
+
+
Name of the listener used for the connection
+
+
listenerName() - Method in class org.apache.kafka.common.security.auth.PlaintextAuthenticationContext
+
 
+
listenerName() - Method in class org.apache.kafka.common.security.auth.SaslAuthenticationContext
+
 
+
listenerName() - Method in class org.apache.kafka.common.security.auth.SslAuthenticationContext
+
 
+
listenerName() - Method in interface org.apache.kafka.server.authorizer.AuthorizableRequestContext
+
+
Returns name of listener on which request was received.
+
+
ListenerNotFoundException - Exception in org.apache.kafka.common.errors
+
+
The leader does not have an endpoint corresponding to the listener on which metadata was requested.
+
+
ListenerNotFoundException(String) - Constructor for exception org.apache.kafka.common.errors.ListenerNotFoundException
+
 
+
ListenerNotFoundException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.ListenerNotFoundException
+
 
+
listGroups() - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List the groups available in the cluster with the default options.
+
+
listGroups(ListGroupsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List the groups available in the cluster.
+
+
listGroups(ListGroupsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
listGroups(ListGroupsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
ListGroupsOptions - Class in org.apache.kafka.clients.admin
+
+
Options for Admin.listGroups().
+
+
ListGroupsOptions() - Constructor for class org.apache.kafka.clients.admin.ListGroupsOptions
+
 
+
ListGroupsResult - Class in org.apache.kafka.clients.admin
+
+
The result of the Admin.listGroups() call.
+
+
listings() - Method in class org.apache.kafka.clients.admin.ListTopicsResult
+
+
Return a future which yields a collection of TopicListing objects.
+
+
listInternal(boolean) - Method in class org.apache.kafka.clients.admin.ListTopicsOptions
+
+
Set whether we should list internal topics.
+
+
listOffsets(Map<TopicPartition, OffsetSpec>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List offset for the specified partitions and OffsetSpec.
+
+
listOffsets(Map<TopicPartition, OffsetSpec>, ListOffsetsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List offset for the specified partitions.
+
+
listOffsets(Map<TopicPartition, OffsetSpec>, ListOffsetsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
listOffsets(Map<TopicPartition, OffsetSpec>, ListOffsetsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
ListOffsetsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
ListOffsetsOptions() - Constructor for class org.apache.kafka.clients.admin.ListOffsetsOptions
+
 
+
ListOffsetsOptions(IsolationLevel) - Constructor for class org.apache.kafka.clients.admin.ListOffsetsOptions
+
 
+
ListOffsetsResult - Class in org.apache.kafka.clients.admin
+
+
The result of the Admin.listOffsets(Map) call.
+
+
ListOffsetsResult(Map<TopicPartition, KafkaFuture<ListOffsetsResult.ListOffsetsResultInfo>>) - Constructor for class org.apache.kafka.clients.admin.ListOffsetsResult
+
 
+
ListOffsetsResult.ListOffsetsResultInfo - Class in org.apache.kafka.clients.admin
+
 
+
ListOffsetsResultInfo(long, long, Optional<Integer>) - Constructor for class org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo
+
 
+
listPartitionReassignments() - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List all of the current partition reassignments + + This is a convenience method for Admin.listPartitionReassignments(ListPartitionReassignmentsOptions) + with default options.
+
+
listPartitionReassignments(Optional<Set<TopicPartition>>, ListPartitionReassignmentsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
 
+
listPartitionReassignments(Optional<Set<TopicPartition>>, ListPartitionReassignmentsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
listPartitionReassignments(Optional<Set<TopicPartition>>, ListPartitionReassignmentsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
listPartitionReassignments(Set<TopicPartition>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List the current reassignments for the given partitions + + This is a convenience method for Admin.listPartitionReassignments(Set, ListPartitionReassignmentsOptions) + with default options.
+
+
listPartitionReassignments(Set<TopicPartition>, ListPartitionReassignmentsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List the current reassignments for the given partitions
+
+
listPartitionReassignments(ListPartitionReassignmentsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List all of the current partition reassignments
+
+
ListPartitionReassignmentsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
ListPartitionReassignmentsOptions() - Constructor for class org.apache.kafka.clients.admin.ListPartitionReassignmentsOptions
+
 
+
ListPartitionReassignmentsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
listRemoteLogSegments(TopicIdPartition) - Method in interface org.apache.kafka.server.log.remote.storage.RemoteLogMetadataManager
+
+
Returns all the remote log segment metadata of the given topicIdPartition.
+
+
listRemoteLogSegments(TopicIdPartition, int) - Method in interface org.apache.kafka.server.log.remote.storage.RemoteLogMetadataManager
+
+
Returns iterator of remote log segment metadata, sorted by RemoteLogSegmentMetadata.startOffset() in + ascending order which contains the given leader epoch.
+
+
ListSerde() - Constructor for class org.apache.kafka.common.serialization.Serdes.ListSerde
+
 
+
ListSerde(Class<L>, Serde<Inner>) - Constructor for class org.apache.kafka.common.serialization.Serdes.ListSerde
+
 
+
ListSerde(Class<L>, Serde<Inner>) - Static method in class org.apache.kafka.common.serialization.Serdes
+
 
+
ListSerializer<Inner> - Class in org.apache.kafka.common.serialization
+
 
+
ListSerializer() - Constructor for class org.apache.kafka.common.serialization.ListSerializer
+
 
+
ListSerializer(Serializer<Inner>) - Constructor for class org.apache.kafka.common.serialization.ListSerializer
+
 
+
listShareGroupOffsets(Map<String, ListShareGroupOffsetsSpec>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List the share group offsets available in the cluster for the specified share groups with the default options.
+
+
listShareGroupOffsets(Map<String, ListShareGroupOffsetsSpec>, ListShareGroupOffsetsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List the share group offsets available in the cluster for the specified share groups.
+
+
listShareGroupOffsets(Map<String, ListShareGroupOffsetsSpec>, ListShareGroupOffsetsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
listShareGroupOffsets(Map<String, ListShareGroupOffsetsSpec>, ListShareGroupOffsetsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
ListShareGroupOffsetsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
ListShareGroupOffsetsOptions() - Constructor for class org.apache.kafka.clients.admin.ListShareGroupOffsetsOptions
+
 
+
ListShareGroupOffsetsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
ListShareGroupOffsetsSpec - Class in org.apache.kafka.clients.admin
+
+
Specification of share group offsets to list using Admin.listShareGroupOffsets(Map, ListShareGroupOffsetsOptions).
+
+
ListShareGroupOffsetsSpec() - Constructor for class org.apache.kafka.clients.admin.ListShareGroupOffsetsSpec
+
 
+
listStreamsGroupOffsets(Map<String, ListStreamsGroupOffsetsSpec>) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List the streams group offsets available in the cluster for the specified groups with the default options.
+
+
listStreamsGroupOffsets(Map<String, ListStreamsGroupOffsetsSpec>, ListStreamsGroupOffsetsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List the streams group offsets available in the cluster for the specified streams groups.
+
+
listStreamsGroupOffsets(Map<String, ListStreamsGroupOffsetsSpec>, ListStreamsGroupOffsetsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
listStreamsGroupOffsets(Map<String, ListStreamsGroupOffsetsSpec>, ListStreamsGroupOffsetsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
ListStreamsGroupOffsetsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
ListStreamsGroupOffsetsOptions() - Constructor for class org.apache.kafka.clients.admin.ListStreamsGroupOffsetsOptions
+
 
+
ListStreamsGroupOffsetsResult - Class in org.apache.kafka.clients.admin
+
+ +
+
ListStreamsGroupOffsetsSpec - Class in org.apache.kafka.clients.admin
+
+
Specification of streams group offsets to list using Admin.listStreamsGroupOffsets(Map, ListStreamsGroupOffsetsOptions).
+
+
ListStreamsGroupOffsetsSpec() - Constructor for class org.apache.kafka.clients.admin.ListStreamsGroupOffsetsSpec
+
 
+
listTopics() - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List the topics available in the cluster with the default options.
+
+
listTopics() - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
listTopics() - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Get metadata about partitions for all topics that the user is authorized to view.
+
+
listTopics() - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
listTopics(Duration) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
listTopics(Duration) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Get metadata about partitions for all topics that the user is authorized to view.
+
+
listTopics(Duration) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
listTopics(ListTopicsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List the topics available in the cluster.
+
+
listTopics(ListTopicsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
listTopics(ListTopicsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
ListTopicsOptions - Class in org.apache.kafka.clients.admin
+
+
Options for Admin.listTopics().
+
+
ListTopicsOptions() - Constructor for class org.apache.kafka.clients.admin.ListTopicsOptions
+
 
+
ListTopicsResult - Class in org.apache.kafka.clients.admin
+
+
The result of the Admin.listTopics() call.
+
+
listTransactions() - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List active transactions in the cluster.
+
+
listTransactions(ListTransactionsOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
List active transactions in the cluster.
+
+
listTransactions(ListTransactionsOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
listTransactions(ListTransactionsOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
ListTransactionsOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
ListTransactionsOptions() - Constructor for class org.apache.kafka.clients.admin.ListTransactionsOptions
+
 
+
ListTransactionsResult - Class in org.apache.kafka.clients.admin
+
+
The result of the Admin.listTransactions() call.
+
+
LITERAL - Enum constant in enum class org.apache.kafka.common.resource.PatternType
+
+
A literal resource name.
+
+
LOCAL_LOG_RETENTION_BYTES_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
LOCAL_LOG_RETENTION_BYTES_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
LOCAL_LOG_RETENTION_MS_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
LOCAL_LOG_RETENTION_MS_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
LockException - Exception in org.apache.kafka.streams.errors
+
+
Indicates that the state store directory lock could not be acquired because another thread holds the lock.
+
+
LockException(String) - Constructor for exception org.apache.kafka.streams.errors.LockException
+
 
+
LockException(String, Throwable) - Constructor for exception org.apache.kafka.streams.errors.LockException
+
 
+
LockException(Throwable) - Constructor for exception org.apache.kafka.streams.errors.LockException
+
 
+
log - Static variable in interface org.apache.kafka.streams.kstream.EmitStrategy
+
 
+
LOG - Static variable in interface org.apache.kafka.streams.state.RocksDBConfigSetter
+
 
+
LOG_SUMMARY_INTERVAL_MS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
log.summary.interval.ms
+
+
LogAndContinueExceptionHandler - Class in org.apache.kafka.streams.errors
+
+
Deserialization handler that logs a deserialization exception and then + signals the processing pipeline to continue processing more records.
+
+
LogAndContinueExceptionHandler() - Constructor for class org.apache.kafka.streams.errors.LogAndContinueExceptionHandler
+
 
+
LogAndContinueProcessingExceptionHandler - Class in org.apache.kafka.streams.errors
+
+
Processing exception handler that logs a processing exception and then + signals the processing pipeline to continue processing more records.
+
+
LogAndContinueProcessingExceptionHandler() - Constructor for class org.apache.kafka.streams.errors.LogAndContinueProcessingExceptionHandler
+
 
+
LogAndFailExceptionHandler - Class in org.apache.kafka.streams.errors
+
+
Deserialization handler that logs a deserialization exception and then + signals the processing pipeline to stop processing more records and fail.
+
+
LogAndFailExceptionHandler() - Constructor for class org.apache.kafka.streams.errors.LogAndFailExceptionHandler
+
 
+
LogAndFailProcessingExceptionHandler - Class in org.apache.kafka.streams.errors
+
+
Processing exception handler that logs a processing exception and then + signals the processing pipeline to stop processing more records and fail.
+
+
LogAndFailProcessingExceptionHandler() - Constructor for class org.apache.kafka.streams.errors.LogAndFailProcessingExceptionHandler
+
 
+
LogAndSkipOnInvalidTimestamp - Class in org.apache.kafka.streams.processor
+
+
Retrieves embedded metadata timestamps from Kafka messages.
+
+
LogAndSkipOnInvalidTimestamp() - Constructor for class org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp
+
 
+
logConfig() - Method in interface org.apache.kafka.streams.state.StoreBuilder
+
+
Returns a Map containing any log configs that will be used when creating the changelog for the StateStore.
+
+
LogDirDescription - Class in org.apache.kafka.clients.admin
+
+
A description of a log directory on a particular broker.
+
+
LogDirDescription(ApiException, Map<TopicPartition, ReplicaInfo>) - Constructor for class org.apache.kafka.clients.admin.LogDirDescription
+
 
+
LogDirDescription(ApiException, Map<TopicPartition, ReplicaInfo>, long, long) - Constructor for class org.apache.kafka.clients.admin.LogDirDescription
+
 
+
LogDirNotFoundException - Exception in org.apache.kafka.common.errors
+
+
Thrown when a request is made for a log directory that is not present on the broker
+
+
LogDirNotFoundException(String) - Constructor for exception org.apache.kafka.common.errors.LogDirNotFoundException
+
 
+
LogDirNotFoundException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.LogDirNotFoundException
+
 
+
LogDirNotFoundException(Throwable) - Constructor for exception org.apache.kafka.common.errors.LogDirNotFoundException
+
 
+
logEndOffset() - Method in class org.apache.kafka.clients.admin.QuorumInfo.ReplicaState
+
+
Return the logEndOffset known by the leader for this replica.
+
+
loggingEnabled() - Method in interface org.apache.kafka.streams.state.StoreBuilder
+
 
+
LOGICAL_NAME - Static variable in class org.apache.kafka.connect.data.Date
+
 
+
LOGICAL_NAME - Static variable in class org.apache.kafka.connect.data.Decimal
+
 
+
LOGICAL_NAME - Static variable in class org.apache.kafka.connect.data.Time
+
 
+
LOGICAL_NAME - Static variable in class org.apache.kafka.connect.data.Timestamp
+
 
+
logIfAllowed() - Method in class org.apache.kafka.server.authorizer.Action
+
+
Indicates if audit logs tracking ALLOWED access should include this action if result is + ALLOWED.
+
+
logIfDenied() - Method in class org.apache.kafka.server.authorizer.Action
+
+
Indicates if audit logs tracking DENIED access should include this action if result is + DENIED.
+
+
login() - Method in interface org.apache.kafka.common.security.auth.Login
+
+
Performs login for each login module specified for the login context of this instance.
+
+
login() - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule
+
 
+
login() - Method in class org.apache.kafka.common.security.plain.PlainLoginModule
+
 
+
login() - Method in class org.apache.kafka.common.security.scram.ScramLoginModule
+
 
+
Login - Interface in org.apache.kafka.common.security.auth
+
+
Login interface for authentication.
+
+
LogLevelConfig - Class in org.apache.kafka.common.config
+
+
This class holds definitions for log level configurations related to Kafka's application logging.
+
+
LogLevelConfig() - Constructor for class org.apache.kafka.common.config.LogLevelConfig
+
 
+
logout() - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule
+
 
+
logout() - Method in class org.apache.kafka.common.security.plain.PlainLoginModule
+
 
+
logout() - Method in class org.apache.kafka.common.security.scram.ScramLoginModule
+
 
+
logSegment() - Method in class org.apache.kafka.server.log.remote.storage.LogSegmentData
+
 
+
LogSegmentData - Class in org.apache.kafka.server.log.remote.storage
+
+
This represents all the required data and indexes for a specific log segment that needs to be stored in the remote + storage.
+
+
LogSegmentData(Path, Path, Path, Optional<Path>, Path, ByteBuffer) - Constructor for class org.apache.kafka.server.log.remote.storage.LogSegmentData
+
+
Creates a LogSegmentData instance with data and indexes.
+
+
LogTruncationException - Exception in org.apache.kafka.clients.consumer
+
+
In the event of an unclean leader election, the log will be truncated, + previously committed data will be lost, and new data will be written + over these offsets.
+
+
LogTruncationException(String, Map<TopicPartition, Long>, Map<TopicPartition, OffsetAndMetadata>) - Constructor for exception org.apache.kafka.clients.consumer.LogTruncationException
+
 
+
LogTruncationException(Map<TopicPartition, Long>, Map<TopicPartition, OffsetAndMetadata>) - Constructor for exception org.apache.kafka.clients.consumer.LogTruncationException
+
 
+
logUnused() - Method in class org.apache.kafka.common.config.AbstractConfig
+
+
Info level log for any unused configurations
+
+
Long() - Static method in class org.apache.kafka.common.serialization.Serdes
+
+
A serde for nullable Long type.
+
+
LONG - Enum constant in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigType
+
 
+
LONG - Enum constant in enum class org.apache.kafka.common.config.ConfigDef.Type
+
+
Used for numerical values within the Java Long range.
+
+
LONG - Enum constant in enum class org.apache.kafka.common.config.ConfigDef.Width
+
 
+
LongDecoder - Class in org.apache.kafka.tools.api
+
+
The long decoder translates bytes into longs.
+
+
LongDecoder() - Constructor for class org.apache.kafka.tools.api.LongDecoder
+
 
+
LongDeserializer - Class in org.apache.kafka.common.serialization
+
 
+
LongDeserializer() - Constructor for class org.apache.kafka.common.serialization.LongDeserializer
+
 
+
LongSerde() - Constructor for class org.apache.kafka.common.serialization.Serdes.LongSerde
+
 
+
LongSerializer - Class in org.apache.kafka.common.serialization
+
 
+
LongSerializer() - Constructor for class org.apache.kafka.common.serialization.LongSerializer
+
 
+
LOW - Enum constant in enum class org.apache.kafka.common.config.ConfigDef.Importance
+
 
+
lowerBound() - Method in class org.apache.kafka.streams.query.TimestampedRangeQuery
+
+
The lower bound of the query, if specified.
+
+
lowerBound(double) - Static method in class org.apache.kafka.common.metrics.Quota
+
 
+
lowWatermark() - Method in class org.apache.kafka.clients.admin.DeletedRecords
+
+
Return the "low watermark" for the topic partition on which the deletion was executed
+
+
lowWatermarks() - Method in class org.apache.kafka.clients.admin.DeleteRecordsResult
+
+
Return a map from topic partition to futures which can be used to check the status of + individual deletions.
+
+
lruMap(String, int) - Static method in class org.apache.kafka.streams.state.Stores
+
+
Create a LRU Map KeyValueBytesStoreSupplier.
+
+
+

M

+
+
main(String[]) - Static method in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
main(String[]) - Static method in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
main(String[]) - Static method in class org.apache.kafka.clients.producer.ProducerConfig
+
 
+
main(String[]) - Static method in class org.apache.kafka.streams.StreamsConfig
+
 
+
MAIN_CONSUMER_PREFIX - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Prefix used to override consumer configs for the main consumer client from + the general consumer client configs.
+
+
mainConsumerPrefix(String) - Static method in class org.apache.kafka.streams.StreamsConfig
+
+
Prefix a property with StreamsConfig.MAIN_CONSUMER_PREFIX.
+
+
make(V, long) - Static method in class org.apache.kafka.streams.state.ValueAndTimestamp
+
+
Create a new ValueAndTimestamp instance if the provided value is not null.
+
+
makeAllowNullable(V, long) - Static method in class org.apache.kafka.streams.state.ValueAndTimestamp
+
+
Create a new ValueAndTimestamp instance.
+
+
map() - Method in class org.apache.kafka.common.security.auth.SaslExtensions
+
+
Returns an immutable map of the extension names and their values
+
+
map(Schema, Schema) - Static method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
map(KeyValueMapper<? super K, ? super V, ? extends KeyValue<? extends KOut, ? extends VOut>>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Create a new KStream that consists of a modified record for each record in this stream.
+
+
map(KeyValueMapper<? super K, ? super V, ? extends KeyValue<? extends KOut, ? extends VOut>>, Named) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
MAP - Enum constant in enum class org.apache.kafka.connect.data.Schema.Type
+
+
A mapping from keys to values.
+
+
mapValues(ValueMapper<? super V, ? extends VOut>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Create a new KStream that consists of all records of this stream but with a modified value.
+
+
mapValues(ValueMapper<? super V, ? extends VOut>, Named) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
mapValues(ValueMapper<? super V, ? extends VR>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type) in the new KTable, with default serializers, deserializers, and state store.
+
+
mapValues(ValueMapper<? super V, ? extends VR>, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type) in the new KTable, with the key serde, value serde, + and the underlying materialized state storage configured in the Materialized + instance.
+
+
mapValues(ValueMapper<? super V, ? extends VR>, Named) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type) in the new KTable, with default serializers, deserializers, and state store.
+
+
mapValues(ValueMapper<? super V, ? extends VR>, Named, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type) in the new KTable, with the key serde, value serde, + and the underlying materialized state storage configured in the Materialized + instance.
+
+
mapValues(ValueMapperWithKey<? super K, ? super V, ? extends VOut>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
mapValues(ValueMapperWithKey<? super K, ? super V, ? extends VOut>, Named) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
mapValues(ValueMapperWithKey<? super K, ? super V, ? extends VR>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type) in the new KTable, with default serializers, deserializers, and state store.
+
+
mapValues(ValueMapperWithKey<? super K, ? super V, ? extends VR>, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type) in the new KTable, with the key serde, value serde, + and the underlying materialized state storage configured in the Materialized + instance.
+
+
mapValues(ValueMapperWithKey<? super K, ? super V, ? extends VR>, Named) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type) in the new KTable, with default serializers, deserializers, and state store.
+
+
mapValues(ValueMapperWithKey<? super K, ? super V, ? extends VR>, Named, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type) in the new KTable, with the key serde, value serde, + and the underlying materialized state storage configured in the Materialized + instance.
+
+
match() - Method in class org.apache.kafka.common.quota.ClientQuotaFilterComponent
+
 
+
MATCH - Enum constant in enum class org.apache.kafka.common.resource.PatternType
+
+
In a filter, will perform pattern matching.
+
+
matches(AccessControlEntry) - Method in class org.apache.kafka.common.acl.AccessControlEntryFilter
+
+
Returns true if this filter matches the given AccessControlEntry.
+
+
matches(AclBinding) - Method in class org.apache.kafka.common.acl.AclBindingFilter
+
+
Return true if the resource filter matches the binding's resource and the entry filter matches binding's entry.
+
+
matches(ResourcePattern) - Method in class org.apache.kafka.common.resource.ResourcePatternFilter
+
 
+
matchesAtMostOne() - Method in class org.apache.kafka.common.acl.AccessControlEntryFilter
+
+
Returns true if this filter could only match one ACE -- in other words, if + there are no ANY or UNKNOWN fields.
+
+
matchesAtMostOne() - Method in class org.apache.kafka.common.acl.AclBindingFilter
+
+
Return true if the resource and entry filters can only match one ACE.
+
+
matchesAtMostOne() - Method in class org.apache.kafka.common.resource.ResourcePatternFilter
+
 
+
Materialized<K,V,S extends StateStore> - Class in org.apache.kafka.streams.kstream
+
+
Used to describe how a StateStore should be materialized.
+
+
Materialized.StoreType - Enum Class in org.apache.kafka.streams.kstream
+
 
+
Max - Class in org.apache.kafka.common.metrics.stats
+
+
A SampledStat that gives the max over its samples.
+
+
Max() - Constructor for class org.apache.kafka.common.metrics.stats.Max
+
 
+
MAX_BLOCK_MS_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
max.block.ms
+
+
MAX_COMPACTION_LAG_MS_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
MAX_COMPACTION_LAG_MS_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
max.in.flight.requests.per.connection
+
+
MAX_MESSAGE_BYTES_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
MAX_MESSAGE_BYTES_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
MAX_PARTITION_FETCH_BYTES_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
max.partition.fetch.bytes
+
+
MAX_POLL_INTERVAL_MS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
max.poll.interval.ms
+
+
MAX_POLL_RECORDS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
max.poll.records
+
+
MAX_RACK_AWARE_ASSIGNMENT_TAG_KEY_LENGTH - Static variable in class org.apache.kafka.streams.StreamsConfig
+
 
+
MAX_RACK_AWARE_ASSIGNMENT_TAG_LIST_SIZE - Static variable in class org.apache.kafka.streams.StreamsConfig
+
 
+
MAX_RACK_AWARE_ASSIGNMENT_TAG_VALUE_LENGTH - Static variable in class org.apache.kafka.streams.StreamsConfig
+
 
+
MAX_RECORDING_LEVEL_KEY - Static variable in enum class org.apache.kafka.common.metrics.Sensor.RecordingLevel
+
 
+
MAX_REQUEST_SIZE_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
max.request.size
+
+
MAX_TASK_IDLE_MS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
max.task.idle.ms
+
+
MAX_TASK_IDLE_MS_DISABLED - Static variable in class org.apache.kafka.streams.StreamsConfig
+
 
+
MAX_TASK_IDLE_MS_DOC - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated.
+
+
MAX_WARMUP_REPLICAS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
max.warmup.replicas
+
+
maxBufferedSize - Variable in class org.apache.kafka.streams.TopologyConfig
+
 
+
maxBufferedSize - Variable in class org.apache.kafka.streams.TopologyConfig.TaskConfig
+
 
+
maxBytes(long) - Static method in interface org.apache.kafka.streams.kstream.Suppressed.BufferConfig
+
+
Create a size-constrained buffer in terms of the maximum number of bytes it will use.
+
+
maxlifeTimeMs() - Method in class org.apache.kafka.clients.admin.CreateDelegationTokenOptions
+
+
Deprecated. +
Since 4.0 and should not be used any longer.
+
+
+
maxlifeTimeMs(long) - Method in class org.apache.kafka.clients.admin.CreateDelegationTokenOptions
+
+
Deprecated. +
Since 4.0 and should not be used any longer.
+
+
+
maxLifetimeMs() - Method in class org.apache.kafka.clients.admin.CreateDelegationTokenOptions
+
 
+
maxLifetimeMs(long) - Method in class org.apache.kafka.clients.admin.CreateDelegationTokenOptions
+
 
+
maxRecords(long) - Static method in interface org.apache.kafka.streams.kstream.Suppressed.BufferConfig
+
+
Create a size-constrained buffer in terms of the maximum number of keys it will store.
+
+
maxTaskIdleMs - Variable in class org.apache.kafka.streams.TopologyConfig
+
 
+
maxTaskIdleMs - Variable in class org.apache.kafka.streams.TopologyConfig.TaskConfig
+
 
+
maxTimestamp() - Static method in class org.apache.kafka.clients.admin.OffsetSpec
+
+
Used to retrieve the offset with the largest timestamp of a partition + as message timestamps can be specified client side this may not match + the log end offset returned by LatestSpec
+
+
maxTimestamp() - Method in class org.apache.kafka.common.security.token.delegation.TokenInformation
+
 
+
maxTimestampMs() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata
+
 
+
MaxTimestampSpec() - Constructor for class org.apache.kafka.clients.admin.OffsetSpec.MaxTimestampSpec
+
 
+
maxVersion() - Method in class org.apache.kafka.clients.admin.SupportedVersionRange
+
 
+
maxVersionLevel() - Method in class org.apache.kafka.clients.admin.FeatureUpdate
+
 
+
maxVersionLevel() - Method in class org.apache.kafka.clients.admin.FinalizedVersionRange
+
 
+
maxWarmupReplicas() - Method in class org.apache.kafka.streams.processor.assignment.AssignmentConfigs
+
+
The maximum warmup replicas as configured via + StreamsConfig.MAX_WARMUP_REPLICAS_CONFIG
+
+
measurable() - Method in class org.apache.kafka.common.metrics.KafkaMetric
+
+
Get the underlying metric provider, which should be a Measurable
+
+
Measurable - Interface in org.apache.kafka.common.metrics
+
+
A measurable quantity that can be registered as a metric
+
+
MeasurableStat - Interface in org.apache.kafka.common.metrics
+
+
A MeasurableStat is a Stat that is also Measurable (i.e.
+
+
measure(MetricConfig, long) - Method in interface org.apache.kafka.common.metrics.Measurable
+
+
Measure this quantity and return the result as a double
+
+
measure(MetricConfig, long) - Method in class org.apache.kafka.common.metrics.stats.CumulativeSum
+
 
+
measure(MetricConfig, long) - Method in class org.apache.kafka.common.metrics.stats.Rate
+
 
+
measure(MetricConfig, long) - Method in class org.apache.kafka.common.metrics.stats.SampledStat
+
 
+
measure(MetricConfig, long) - Method in class org.apache.kafka.common.metrics.stats.TokenBucket
+
 
+
measure(MetricConfig, long) - Method in class org.apache.kafka.common.metrics.stats.Value
+
 
+
mechanism() - Method in class org.apache.kafka.clients.admin.ScramCredentialInfo
+
 
+
mechanism() - Method in class org.apache.kafka.clients.admin.UserScramCredentialDeletion
+
 
+
mechanismName() - Method in enum class org.apache.kafka.clients.admin.ScramMechanism
+
 
+
MEDIUM - Enum constant in enum class org.apache.kafka.common.config.ConfigDef.Importance
+
 
+
MEDIUM - Enum constant in enum class org.apache.kafka.common.config.ConfigDef.Width
+
 
+
memberAssignment(String) - Method in interface org.apache.kafka.coordinator.group.api.assignor.GroupSpec
+
+
Gets the current assignment of the member.
+
+
MemberAssignment - Class in org.apache.kafka.clients.admin
+
+
A description of the assignments of a specific group member.
+
+
MemberAssignment - Interface in org.apache.kafka.coordinator.group.api.assignor
+
+
The partition assignment for a consumer group member.
+
+
MemberAssignment(Set<TopicPartition>) - Constructor for class org.apache.kafka.clients.admin.MemberAssignment
+
+
Creates an instance with the specified parameters.
+
+
MemberDescription - Class in org.apache.kafka.clients.admin
+
+
A detailed description of a single group member in the cluster.
+
+
MemberDescription(String, String, String, MemberAssignment) - Constructor for class org.apache.kafka.clients.admin.MemberDescription
+
+ +
+
MemberDescription(String, Optional<String>, String, String, MemberAssignment) - Constructor for class org.apache.kafka.clients.admin.MemberDescription
+
+ +
+
MemberDescription(String, Optional<String>, String, String, MemberAssignment, Optional<MemberAssignment>) - Constructor for class org.apache.kafka.clients.admin.MemberDescription
+
+ +
+
MemberDescription(String, Optional<String>, String, String, MemberAssignment, Optional<MemberAssignment>, Optional<Integer>, Optional<Boolean>) - Constructor for class org.apache.kafka.clients.admin.MemberDescription
+
 
+
memberEpoch() - Method in class org.apache.kafka.clients.admin.MemberDescription
+
+
The epoch of the group member.
+
+
memberEpoch() - Method in class org.apache.kafka.clients.admin.ShareMemberDescription
+
+
The epoch of the group member.
+
+
memberEpoch() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription
+
+
The epoch of the group member.
+
+
memberId() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription
+
+
The id of the group member.
+
+
memberId() - Method in class org.apache.kafka.clients.consumer.ConsumerGroupMetadata
+
 
+
MemberIdRequiredException - Exception in org.apache.kafka.common.errors
+
 
+
MemberIdRequiredException(String) - Constructor for exception org.apache.kafka.common.errors.MemberIdRequiredException
+
 
+
MemberIdRequiredException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.MemberIdRequiredException
+
 
+
memberIds() - Method in interface org.apache.kafka.coordinator.group.api.assignor.GroupSpec
+
 
+
memberResult(MemberToRemove) - Method in class org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupResult
+
+
Returns the selected member future.
+
+
members() - Method in class org.apache.kafka.clients.admin.ClassicGroupDescription
+
+
A list of the members of the classic group.
+
+
members() - Method in class org.apache.kafka.clients.admin.ConsumerGroupDescription
+
+
A list of the members of the consumer group.
+
+
members() - Method in class org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupOptions
+
 
+
members() - Method in class org.apache.kafka.clients.admin.ShareGroupDescription
+
+
A list of the members of the share group.
+
+
members() - Method in class org.apache.kafka.clients.admin.StreamsGroupDescription
+
+
A list of the members of the streams group.
+
+
members() - Method in class org.apache.kafka.coordinator.group.api.assignor.GroupAssignment
+
 
+
memberSubscription(String) - Method in interface org.apache.kafka.coordinator.group.api.assignor.GroupSpec
+
+
Gets the member subscription specification for a member.
+
+
MemberSubscription - Interface in org.apache.kafka.coordinator.group.api.assignor
+
+
Interface representing the subscription metadata for a group member.
+
+
MemberToRemove - Class in org.apache.kafka.clients.admin
+
+
A struct containing information about the member to be removed.
+
+
MemberToRemove(String) - Constructor for class org.apache.kafka.clients.admin.MemberToRemove
+
 
+
merge(KStream<K, V>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Merge this KStream and the given KStream.
+
+
merge(KStream<K, V>, Named) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
merge(Position) - Method in class org.apache.kafka.streams.query.Position
+
+
Merges the provided Position into the current instance.
+
+
MERGE_REPARTITION_TOPICS - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "topology.optimization" + for enabling the specific optimization that merges duplicated repartition topics.
+
+
Merger<K,V> - Interface in org.apache.kafka.streams.kstream
+
+
The interface for merging aggregate values for SessionWindows with the given key.
+
+
MESSAGE_DOWNCONVERSION_ENABLE_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
+
Deprecated. +
down-conversion is not possible in Apache Kafka 4.0 and newer, hence this configuration is a no-op, + and it is deprecated for removal in Apache Kafka 5.0.
+
+
+
MESSAGE_DOWNCONVERSION_ENABLE_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
+ +
+
MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
MESSAGE_TIMESTAMP_AFTER_MAX_MS_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
MESSAGE_TIMESTAMP_BEFORE_MAX_MS_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
MESSAGE_TIMESTAMP_TYPE_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
MESSAGE_TIMESTAMP_TYPE_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
MessageFormatter - Interface in org.apache.kafka.common
+
+
This interface allows to define Formatters that can be used to parse and format records read by a + Consumer instance for display.
+
+
metadata() - Method in class org.apache.kafka.clients.consumer.OffsetAndMetadata
+
 
+
metadata() - Method in class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
METADATA_KEY - Static variable in class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
METADATA_MAX_AGE_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
METADATA_MAX_AGE_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
metadata.max.age.ms
+
+
METADATA_MAX_AGE_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
metadata.max.age.ms
+
+
METADATA_MAX_AGE_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
metadata.max.age.ms
+
+
METADATA_MAX_IDLE_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
metadata.max.idle.ms
+
+
METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
METADATA_RECOVERY_STRATEGY_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
METADATA_RECOVERY_STRATEGY_DOC - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
METADATA_TOPIC_ID - Static variable in class org.apache.kafka.common.Uuid
+
+
A UUID for the metadata topic in KRaft mode.
+
+
metadataForAllStreamsClients() - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Find all currently running KafkaStreams instances (potentially remotely) that use the same + application ID as this instance (i.e., all instances that belong to + the same Kafka Streams application) and return StreamsMetadata for each discovered instance.
+
+
metadataForLocalThreads() - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Returns runtime information about the local threads of this KafkaStreams instance.
+
+
Meter - Class in org.apache.kafka.common.metrics.stats
+
+
A compound stat that includes a rate metric and a cumulative total metric.
+
+
Meter(TimeUnit, MetricName, MetricName) - Constructor for class org.apache.kafka.common.metrics.stats.Meter
+
+
Construct a Meter with provided time unit
+
+
Meter(TimeUnit, SampledStat, MetricName, MetricName) - Constructor for class org.apache.kafka.common.metrics.stats.Meter
+
+
Construct a Meter with provided time unit
+
+
Meter(MetricName, MetricName) - Constructor for class org.apache.kafka.common.metrics.stats.Meter
+
+
Construct a Meter with seconds as time unit
+
+
Meter(SampledStat, MetricName, MetricName) - Constructor for class org.apache.kafka.common.metrics.stats.Meter
+
+
Construct a Meter with seconds as time unit
+
+
metric() - Method in exception org.apache.kafka.common.metrics.QuotaViolationException
+
 
+
metric(MetricName) - Method in class org.apache.kafka.common.metrics.Metrics
+
 
+
Metric - Interface in org.apache.kafka.common
+
+
A metric tracked for monitoring purposes.
+
+
METRIC_REPORTER_CLASSES_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
METRIC_REPORTER_CLASSES_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
metric.reporters
+
+
METRIC_REPORTER_CLASSES_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
metric.reporters
+
+
METRIC_REPORTER_CLASSES_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
metric.reporters
+
+
metricChange(KafkaMetric) - Method in class org.apache.kafka.common.metrics.JmxReporter
+
 
+
metricChange(KafkaMetric) - Method in interface org.apache.kafka.common.metrics.MetricsReporter
+
+
This is called whenever a metric is updated or added
+
+
MetricConfig - Class in org.apache.kafka.common.metrics
+
+
Configuration values for metrics
+
+
MetricConfig() - Constructor for class org.apache.kafka.common.metrics.MetricConfig
+
 
+
metricInstance(MetricNameTemplate, String...) - Method in class org.apache.kafka.common.metrics.Metrics
+
 
+
metricInstance(MetricNameTemplate, Map<String, String>) - Method in class org.apache.kafka.common.metrics.Metrics
+
 
+
metricName() - Method in interface org.apache.kafka.common.Metric
+
+
A name for this metric
+
+
metricName() - Method in class org.apache.kafka.common.metrics.KafkaMetric
+
+
Get the metric name
+
+
metricName(String, String) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Create a MetricName with the given name, group and default tags specified in the metric configuration.
+
+
metricName(String, String, String) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Create a MetricName with the given name, group, description, and default tags + specified in the metric configuration.
+
+
metricName(String, String, String, String...) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Create a MetricName with the given name, group, description, and keyValue as tags, plus default tags specified in the metric + configuration.
+
+
metricName(String, String, String, Map<String, String>) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Create a MetricName with the given name, group, description and tags, plus default tags specified in the metric + configuration.
+
+
metricName(String, String, LinkedHashMap<String, String>) - Method in interface org.apache.kafka.common.metrics.PluginMetrics
+
+
Create a MetricName with the given name, description and tags.
+
+
metricName(String, String, Map<String, String>) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Create a MetricName with the given name, group and tags, plus default tags specified in the metric + configuration.
+
+
MetricName - Class in org.apache.kafka.common
+
+
The MetricName class encapsulates a metric's name, logical group and its related attributes.
+
+
MetricName(String, String, String, Map<String, String>) - Constructor for class org.apache.kafka.common.MetricName
+
+
Please create MetricName by method Metrics.metricName(String, String, String, Map)
+
+
MetricNameTemplate - Class in org.apache.kafka.common
+
+
A template for a MetricName.
+
+
MetricNameTemplate(String, String, String, String...) - Constructor for class org.apache.kafka.common.MetricNameTemplate
+
+
Create a new template.
+
+
MetricNameTemplate(String, String, String, Set<String>) - Constructor for class org.apache.kafka.common.MetricNameTemplate
+
+
Create a new template.
+
+
metricRemoval(KafkaMetric) - Method in class org.apache.kafka.common.metrics.JmxReporter
+
 
+
metricRemoval(KafkaMetric) - Method in interface org.apache.kafka.common.metrics.MetricsReporter
+
+
This is called whenever a metric is removed
+
+
metrics() - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Get the metrics kept by the adminClient
+
+
metrics() - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
metrics() - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
metrics() - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
metrics() - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Get the metrics kept by the consumer
+
+
metrics() - Method in class org.apache.kafka.clients.consumer.KafkaShareConsumer
+
+
Get the metrics kept by the consumer
+
+
metrics() - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
metrics() - Method in class org.apache.kafka.clients.consumer.MockShareConsumer
+
 
+
metrics() - Method in interface org.apache.kafka.clients.consumer.ShareConsumer
+
 
+
metrics() - Method in class org.apache.kafka.clients.producer.KafkaProducer
+
+
Get the full set of internal metrics maintained by the producer.
+
+
metrics() - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
metrics() - Method in interface org.apache.kafka.clients.producer.Producer
+
+ +
+
metrics() - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Get all the metrics currently maintained indexed by metricName
+
+
metrics() - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Get read-only handle on global metrics registry, including streams client's own metrics plus + its embedded producer, consumer and admin clients' metrics.
+
+
metrics() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
 
+
metrics() - Method in interface org.apache.kafka.streams.processor.api.ProcessingContext
+
+
Return Metrics instance.
+
+
metrics() - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
metrics() - Method in interface org.apache.kafka.streams.processor.ProcessorContext
+
+
Return Metrics instance.
+
+
metrics() - Method in interface org.apache.kafka.streams.processor.StateStoreContext
+
+
Returns Metrics instance.
+
+
metrics() - Method in interface org.apache.kafka.streams.StreamsMetrics
+
+
Get read-only handle on global metrics registry.
+
+
metrics() - Method in class org.apache.kafka.streams.TopologyTestDriver
+
+
Get read-only handle on global metrics registry.
+
+
Metrics - Class in org.apache.kafka.common.metrics
+
+
A registry of sensors and metrics.
+
+
Metrics() - Constructor for class org.apache.kafka.common.metrics.Metrics
+
+
Create a metrics repository with no metric reporters and default configuration.
+
+
Metrics(MetricConfig) - Constructor for class org.apache.kafka.common.metrics.Metrics
+
+
Create a metrics repository with no reporters and the given default config.
+
+
Metrics(MetricConfig, List<MetricsReporter>, Time) - Constructor for class org.apache.kafka.common.metrics.Metrics
+
+
Create a metrics repository with a default config and the given metric reporters.
+
+
Metrics(MetricConfig, List<MetricsReporter>, Time, boolean) - Constructor for class org.apache.kafka.common.metrics.Metrics
+
+
Create a metrics repository with a default config, given metric reporters and the ability to expire eligible sensors
+
+
Metrics(MetricConfig, List<MetricsReporter>, Time, boolean, MetricsContext) - Constructor for class org.apache.kafka.common.metrics.Metrics
+
+
Create a metrics repository with a default config, given metric reporters, the ability to expire eligible sensors + and MetricContext
+
+
Metrics(MetricConfig, List<MetricsReporter>, Time, MetricsContext) - Constructor for class org.apache.kafka.common.metrics.Metrics
+
+
Create a metrics repository with a default config, metric reporters and metric context + Expiration of Sensors is disabled.
+
+
Metrics(MetricConfig, Time) - Constructor for class org.apache.kafka.common.metrics.Metrics
+
+
Create a metrics repository with no metric reporters and the given default configuration.
+
+
Metrics(Time) - Constructor for class org.apache.kafka.common.metrics.Metrics
+
+
Create a metrics repository with no metric reporters and default configuration.
+
+
METRICS_CONFIG_PREFIX - Static variable in class org.apache.kafka.common.metrics.JmxReporter
+
 
+
METRICS_LATEST - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "built.in.metrics.version" for the latest built-in metrics version.
+
+
METRICS_NUM_SAMPLES_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
METRICS_NUM_SAMPLES_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
metrics.num.samples
+
+
METRICS_NUM_SAMPLES_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
metrics.num.samples
+
+
METRICS_NUM_SAMPLES_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
metrics.num.samples
+
+
METRICS_RECORDING_LEVEL_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
METRICS_RECORDING_LEVEL_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
metrics.log.level
+
+
METRICS_RECORDING_LEVEL_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
metrics.recording.level
+
+
METRICS_RECORDING_LEVEL_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
metrics.record.level
+
+
METRICS_SAMPLE_WINDOW_MS_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
METRICS_SAMPLE_WINDOW_MS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
metrics.sample.window.ms
+
+
METRICS_SAMPLE_WINDOW_MS_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
metrics.sample.window.ms
+
+
METRICS_SAMPLE_WINDOW_MS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
metrics.sample.window.ms
+
+
MetricsContext - Interface in org.apache.kafka.common.metrics
+
+
MetricsContext encapsulates additional contextLabels about metrics exposed via a + MetricsReporter
+
+
MetricsReporter - Interface in org.apache.kafka.common.metrics
+
+
A plugin interface to allow things to listen as new metrics are created so they can be reported.
+
+
metricsScope() - Method in interface org.apache.kafka.streams.state.StoreSupplier
+
+
Return a String that is used as the scope for metrics recorded by Metered stores.
+
+
metricValue() - Method in interface org.apache.kafka.common.Metric
+
+
The value of the metric, which may be measurable or a non-measurable gauge
+
+
metricValue() - Method in class org.apache.kafka.common.metrics.KafkaMetric
+
+
Take the metric and return the value, which could be a Measurable or a Gauge
+
+
MetricValueProvider<T> - Interface in org.apache.kafka.common.metrics
+
+
Super-interface for Measurable or Gauge that provides + metric values.
+
+
MIGRATED - Enum constant in enum class org.apache.kafka.streams.processor.StandbyUpdateListener.SuspendReason
+
 
+
Min - Class in org.apache.kafka.common.metrics.stats
+
+
A SampledStat that gives the min over its samples.
+
+
Min() - Constructor for class org.apache.kafka.common.metrics.stats.Min
+
 
+
MIN_CLEANABLE_DIRTY_RATIO_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
MIN_CLEANABLE_DIRTY_RATIO_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
MIN_COMPACTION_LAG_MS_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
MIN_COMPACTION_LAG_MS_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
MIN_IN_SYNC_REPLICAS_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
MIN_IN_SYNC_REPLICAS_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
minVersion() - Method in class org.apache.kafka.clients.admin.SupportedVersionRange
+
 
+
minVersionLevel() - Method in class org.apache.kafka.clients.admin.FinalizedVersionRange
+
 
+
MirrorClient - Class in org.apache.kafka.connect.mirror
+
+
Client to interact with MirrorMaker internal topics (checkpoints, heartbeats) on a given cluster.
+
+
MirrorClient(Map<String, Object>) - Constructor for class org.apache.kafka.connect.mirror.MirrorClient
+
 
+
MirrorClient(MirrorClientConfig) - Constructor for class org.apache.kafka.connect.mirror.MirrorClient
+
 
+
MirrorClientConfig - Class in org.apache.kafka.connect.mirror
+
+
Configuration required for MirrorClient to talk to a given target cluster.
+
+
MismatchedEndpointTypeException - Exception in org.apache.kafka.common.errors
+
 
+
MismatchedEndpointTypeException(String) - Constructor for exception org.apache.kafka.common.errors.MismatchedEndpointTypeException
+
 
+
MISSING_PROCESS_ID - Enum constant in enum class org.apache.kafka.streams.processor.assignment.TaskAssignor.AssignmentError
+
 
+
MissingSourceTopicException - Exception in org.apache.kafka.streams.errors
+
 
+
MissingSourceTopicException(String) - Constructor for exception org.apache.kafka.streams.errors.MissingSourceTopicException
+
 
+
MOCK_MODE_KEY - Static variable in class org.apache.kafka.connect.tools.MockConnector
+
 
+
MockConnector - Class in org.apache.kafka.connect.tools
+
+
This connector provides support for mocking certain connector behaviors.
+
+
MockConnector() - Constructor for class org.apache.kafka.connect.tools.MockConnector
+
 
+
MockConsumer<K,V> - Class in org.apache.kafka.clients.consumer
+
+
A mock of the Consumer interface you can use for testing code that uses Kafka.
+
+
MockConsumer(String) - Constructor for class org.apache.kafka.clients.consumer.MockConsumer
+
+
A mock consumer is instantiated by providing ConsumerConfig.AUTO_OFFSET_RESET_CONFIG value as the input.
+
+
MockConsumer(OffsetResetStrategy) - Constructor for class org.apache.kafka.clients.consumer.MockConsumer
+
+
Deprecated. +
Since 4.0. Use MockConsumer(String) instead.
+
+
+
MockProcessorContext<KForward,VForward> - Class in org.apache.kafka.streams.processor.api
+
+
MockProcessorContext is a mock of ProcessorContext for users to test their Processor + implementations.
+
+
MockProcessorContext - Class in org.apache.kafka.streams.processor
+
+
Deprecated. +
Since 4.0. Use MockProcessorContext instead.
+
+
+
MockProcessorContext() - Constructor for class org.apache.kafka.streams.processor.api.MockProcessorContext
+
+
Create a MockProcessorContext with dummy config and taskId and null stateDir.
+
+
MockProcessorContext() - Constructor for class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
Create a MockProcessorContext with dummy config and taskId and null stateDir.
+
+
MockProcessorContext(Properties) - Constructor for class org.apache.kafka.streams.processor.api.MockProcessorContext
+
+
Create a MockProcessorContext with dummy taskId and null stateDir.
+
+
MockProcessorContext(Properties) - Constructor for class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
Create a MockProcessorContext with dummy taskId and null stateDir.
+
+
MockProcessorContext(Properties, TaskId, File) - Constructor for class org.apache.kafka.streams.processor.api.MockProcessorContext
+
+
Create a MockProcessorContext with a specified taskId and null stateDir.
+
+
MockProcessorContext(Properties, TaskId, File) - Constructor for class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
Create a MockProcessorContext with a specified taskId and null stateDir.
+
+
MockProcessorContext.CapturedForward<K,V> - Class in org.apache.kafka.streams.processor.api
+
 
+
MockProcessorContext.CapturedForward - Class in org.apache.kafka.streams.processor
+
+
Deprecated.
+
MockProcessorContext.CapturedPunctuator - Class in org.apache.kafka.streams.processor.api
+
+
MockProcessorContext.CapturedPunctuator holds captured punctuators, along with their scheduling information.
+
+
MockProcessorContext.CapturedPunctuator - Class in org.apache.kafka.streams.processor
+
+
Deprecated.
+
MockProcessorContext.CapturedPunctuator holds captured punctuators, along with their scheduling information.
+
+
MockProducer<K,V> - Class in org.apache.kafka.clients.producer
+
+
A mock of the producer interface you can use for testing code that uses Kafka.
+
+
MockProducer() - Constructor for class org.apache.kafka.clients.producer.MockProducer
+
+
Create a new mock producer with invented metadata.
+
+
MockProducer(boolean, Partitioner, Serializer<K>, Serializer<V>) - Constructor for class org.apache.kafka.clients.producer.MockProducer
+
+
Create a new mock producer with invented metadata the given autoComplete setting, partitioner and key\value serializers.
+
+
MockProducer(Cluster, boolean, Partitioner, Serializer<K>, Serializer<V>) - Constructor for class org.apache.kafka.clients.producer.MockProducer
+
+
Create a mock producer
+
+
MockShareConsumer<K,V> - Class in org.apache.kafka.clients.consumer
+
+
A mock of the ShareConsumer interface you can use for testing code that uses Kafka.
+
+
MockShareConsumer() - Constructor for class org.apache.kafka.clients.consumer.MockShareConsumer
+
 
+
MockSinkConnector - Class in org.apache.kafka.connect.tools
+
+
Mock sink implementation which delegates to MockConnector.
+
+
MockSinkConnector() - Constructor for class org.apache.kafka.connect.tools.MockSinkConnector
+
 
+
MockSinkTask - Class in org.apache.kafka.connect.tools
+
+
Task implementation for MockSinkConnector.
+
+
MockSinkTask() - Constructor for class org.apache.kafka.connect.tools.MockSinkTask
+
 
+
MockSourceConnector - Class in org.apache.kafka.connect.tools
+
+
Mock source implementation which delegates to MockConnector.
+
+
MockSourceConnector() - Constructor for class org.apache.kafka.connect.tools.MockSourceConnector
+
 
+
MockSourceTask - Class in org.apache.kafka.connect.tools
+
+
Task implementation for MockSourceConnector.
+
+
MockSourceTask() - Constructor for class org.apache.kafka.connect.tools.MockSourceTask
+
 
+
Monitorable - Interface in org.apache.kafka.common.metrics
+
+
Plugins can implement this interface to register their own metrics.
+
+
MULTIPLE_SCHEMA_CONFIG - Static variable in class org.apache.kafka.connect.tools.SchemaSourceTask
+
 
+
MultiVersionedKeyQuery<K,V> - Class in org.apache.kafka.streams.query
+
+
Interactive query for retrieving a set of records with the same specified key and different timestamps within the specified time range.
+
+
+

N

+
+
name - Variable in enum class org.apache.kafka.clients.consumer.GroupProtocol
+
+
String representation of the group protocol.
+
+
name - Variable in class org.apache.kafka.common.config.ConfigDef.ConfigKey
+
 
+
name - Variable in enum class org.apache.kafka.common.metrics.Sensor.RecordingLevel
+
+
an english description of the api--this is for debugging and can change
+
+
name - Variable in enum class org.apache.kafka.common.security.auth.SecurityProtocol
+
+
Name of the security protocol.
+
+
name - Variable in enum class org.apache.kafka.streams.errors.DeserializationExceptionHandler.DeserializationHandlerResponse
+
+
An english description for the used option.
+
+
name - Variable in enum class org.apache.kafka.streams.errors.ProcessingExceptionHandler.ProcessingHandlerResponse
+
+
An english description for the used option.
+
+
name - Variable in enum class org.apache.kafka.streams.errors.ProductionExceptionHandler.ProductionExceptionHandlerResponse
+
+
An english description for the used option.
+
+
name - Variable in enum class org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse
+
+
An english description for the used option.
+
+
name - Variable in enum class org.apache.kafka.streams.GroupProtocol
+
+
String representation of the group protocol.
+
+
name() - Method in class org.apache.kafka.clients.admin.ClientMetricsResourceListing
+
+
Deprecated.
+
name() - Method in class org.apache.kafka.clients.admin.ConfigEntry.ConfigSynonym
+
+
Returns the name of this configuration.
+
+
name() - Method in class org.apache.kafka.clients.admin.ConfigEntry
+
+
Return the config name.
+
+
name() - Method in class org.apache.kafka.clients.admin.NewTopic
+
+
The name of the topic to be created.
+
+
name() - Method in class org.apache.kafka.clients.admin.RaftVoterEndpoint
+
+
Deprecated, for removal: This API element is subject to removal in a future version. +
Since 4.1. Use RaftVoterEndpoint.listener() instead. This function will be removed in 5.0.
+
+
+
name() - Method in class org.apache.kafka.clients.admin.TopicDescription
+
+
The name of the topic.
+
+
name() - Method in class org.apache.kafka.clients.admin.TopicListing
+
+
The name of the topic.
+
+
name() - Method in class org.apache.kafka.clients.admin.UserScramCredentialsDescription
+
 
+
name() - Method in interface org.apache.kafka.clients.consumer.ConsumerPartitionAssignor
+
+
Unique name for this assignor (e.g.
+
+
name() - Method in class org.apache.kafka.clients.consumer.CooperativeStickyAssignor
+
 
+
name() - Method in class org.apache.kafka.clients.consumer.RangeAssignor
+
 
+
name() - Method in class org.apache.kafka.clients.consumer.RoundRobinAssignor
+
 
+
name() - Method in class org.apache.kafka.clients.consumer.StickyAssignor
+
 
+
name() - Method in class org.apache.kafka.common.config.ConfigResource
+
+
Return the resource name.
+
+
name() - Method in class org.apache.kafka.common.config.ConfigValue
+
 
+
name() - Method in class org.apache.kafka.common.MetricName
+
 
+
name() - Method in class org.apache.kafka.common.MetricNameTemplate
+
+
Get the name of the metric.
+
+
name() - Method in class org.apache.kafka.common.metrics.CompoundStat.NamedMeasurable
+
 
+
name() - Method in class org.apache.kafka.common.metrics.Sensor
+
+
The name this sensor is registered with.
+
+
name() - Method in class org.apache.kafka.common.metrics.stats.Frequency
+
+
Get the name of this metric.
+
+
name() - Method in class org.apache.kafka.common.metrics.stats.Percentile
+
 
+
name() - Method in class org.apache.kafka.common.resource.Resource
+
+
Return the resource name.
+
+
name() - Method in class org.apache.kafka.common.resource.ResourcePattern
+
 
+
name() - Method in class org.apache.kafka.common.resource.ResourcePatternFilter
+
 
+
name() - Method in class org.apache.kafka.connect.data.ConnectSchema
+
 
+
name() - Method in class org.apache.kafka.connect.data.Field
+
+
Get the name of this field.
+
+
name() - Method in interface org.apache.kafka.connect.data.Schema
+
 
+
name() - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
name() - Method in class org.apache.kafka.connect.health.ConnectorHealth
+
+
Provides the name of the connector.
+
+
name() - Method in interface org.apache.kafka.coordinator.group.api.assignor.PartitionAssignor
+
+
Unique name for this assignor.
+
+
name() - Method in interface org.apache.kafka.server.quota.ClientQuotaEntity.ConfigEntity
+
+
Returns the name of this entity.
+
+
name() - Method in interface org.apache.kafka.streams.processor.StateStore
+
+
The name of this store.
+
+
name() - Method in class org.apache.kafka.streams.state.DslKeyValueParams
+
 
+
name() - Method in class org.apache.kafka.streams.state.DslSessionParams
+
 
+
name() - Method in class org.apache.kafka.streams.state.DslWindowParams
+
 
+
name() - Method in interface org.apache.kafka.streams.state.StoreBuilder
+
+
Return the name of this state store builder.
+
+
name() - Method in interface org.apache.kafka.streams.state.StoreSupplier
+
+
Return the name of this state store supplier.
+
+
name() - Method in interface org.apache.kafka.streams.TopologyDescription.Node
+
+
The name of the node.
+
+
name(String) - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
+
Set the name of this schema.
+
+
NAME_CONFIG - Static variable in class org.apache.kafka.connect.tools.SchemaSourceTask
+
 
+
NAME_CONFIG - Static variable in class org.apache.kafka.connect.tools.VerifiableSinkTask
+
 
+
NAME_CONFIG - Static variable in class org.apache.kafka.connect.tools.VerifiableSourceTask
+
 
+
Named - Class in org.apache.kafka.streams.kstream
+
 
+
NAMED_TOPOLOGY_DELIMITER - Static variable in class org.apache.kafka.streams.processor.TaskId
+
 
+
NamedMeasurable(MetricName, Measurable) - Constructor for class org.apache.kafka.common.metrics.CompoundStat.NamedMeasurable
+
 
+
names() - Method in class org.apache.kafka.clients.admin.ListTopicsResult
+
+
Return a future which yields a collection of topic names.
+
+
names() - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Returns unmodifiable set of properties names defined in this ConfigDef
+
+
names() - Static method in enum class org.apache.kafka.common.security.auth.SecurityProtocol
+
 
+
NAMESPACE - Static variable in interface org.apache.kafka.common.metrics.MetricsContext
+
 
+
namesToListings() - Method in class org.apache.kafka.clients.admin.ListTopicsResult
+
+
Return a future which yields a map of topic names to TopicListing objects.
+
+
NETWORK_THREAD_PREFIX - Static variable in class org.apache.kafka.clients.producer.KafkaProducer
+
 
+
NetworkException - Exception in org.apache.kafka.common.errors
+
+
A misc.
+
+
NetworkException() - Constructor for exception org.apache.kafka.common.errors.NetworkException
+
 
+
NetworkException(String) - Constructor for exception org.apache.kafka.common.errors.NetworkException
+
 
+
NetworkException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.NetworkException
+
 
+
NetworkException(Throwable) - Constructor for exception org.apache.kafka.common.errors.NetworkException
+
 
+
newConfigDef() - Static method in class org.apache.kafka.connect.storage.ConverterConfig
+
+
Create a new ConfigDef instance containing the configurations defined by ConverterConfig.
+
+
NewLeaderElectedException - Exception in org.apache.kafka.common.errors
+
 
+
NewLeaderElectedException(String) - Constructor for exception org.apache.kafka.common.errors.NewLeaderElectedException
+
 
+
NewPartitionReassignment - Class in org.apache.kafka.clients.admin
+
+
A new partition reassignment, which can be applied via Admin.alterPartitionReassignments(Map, AlterPartitionReassignmentsOptions).
+
+
NewPartitionReassignment(List<Integer>) - Constructor for class org.apache.kafka.clients.admin.NewPartitionReassignment
+
 
+
NewPartitions - Class in org.apache.kafka.clients.admin
+
+
Describes new partitions for a particular topic in a call to Admin.createPartitions(Map).
+
+
newRecord(String, Integer, Schema, Object, Schema, Object, Long) - Method in class org.apache.kafka.connect.connector.ConnectRecord
+
+
Create a new record of the same type as itself, with the specified parameter values.
+
+
newRecord(String, Integer, Schema, Object, Schema, Object, Long) - Method in class org.apache.kafka.connect.sink.SinkRecord
+
 
+
newRecord(String, Integer, Schema, Object, Schema, Object, Long) - Method in class org.apache.kafka.connect.source.SourceRecord
+
 
+
newRecord(String, Integer, Schema, Object, Schema, Object, Long, Iterable<Header>) - Method in class org.apache.kafka.connect.connector.ConnectRecord
+
+
Create a new record of the same type as itself, with the specified parameter values.
+
+
newRecord(String, Integer, Schema, Object, Schema, Object, Long, Iterable<Header>) - Method in class org.apache.kafka.connect.sink.SinkRecord
+
 
+
newRecord(String, Integer, Schema, Object, Schema, Object, Long, Iterable<Header>) - Method in class org.apache.kafka.connect.source.SourceRecord
+
 
+
NewTopic - Class in org.apache.kafka.clients.admin
+
+
A new topic to be created via Admin.createTopics(Collection).
+
+
NewTopic(String, int, short) - Constructor for class org.apache.kafka.clients.admin.NewTopic
+
+
A new topic with the specified replication factor and number of partitions.
+
+
NewTopic(String, Map<Integer, List<Integer>>) - Constructor for class org.apache.kafka.clients.admin.NewTopic
+
+
A new topic with the specified replica assignment configuration.
+
+
NewTopic(String, Optional<Integer>, Optional<Short>) - Constructor for class org.apache.kafka.clients.admin.NewTopic
+
+
A new topic that optionally defaults numPartitions and replicationFactor to + the broker configurations for num.partitions and default.replication.factor + respectively.
+
+
nextOffsets() - Method in class org.apache.kafka.clients.consumer.ConsumerRecords
+
+
Get the next offsets and metadata corresponding to all topic partitions for which the position have been advanced in this poll call
+
+
nextSegmentWithTxnIndex(TopicIdPartition, int, long) - Method in interface org.apache.kafka.server.log.remote.storage.RemoteLogMetadataManager
+
+
Returns the next segment metadata that contains the aborted transaction entries for the given topic partition, epoch and offset.
+
+
NO_DEFAULT_VALUE - Static variable in class org.apache.kafka.common.config.ConfigDef
+
+
A unique Java object which represents the lack of a default value.
+
+
NO_OPTIMIZATION - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "topology.optimization" for disabling topology optimization
+
+
NO_TIMESTAMP - Static variable in class org.apache.kafka.clients.consumer.ConsumerRecord
+
 
+
Node - Class in org.apache.kafka.common
+
+
Information about a Kafka node
+
+
Node(int, String, int) - Constructor for class org.apache.kafka.common.Node
+
 
+
Node(int, String, int, String) - Constructor for class org.apache.kafka.common.Node
+
 
+
Node(int, String, int, String, boolean) - Constructor for class org.apache.kafka.common.Node
+
 
+
nodeById(int) - Method in class org.apache.kafka.common.Cluster
+
+
Get the node by the node id (or null if the node is not online or does not exist)
+
+
noDefaultBranch() - Method in interface org.apache.kafka.streams.kstream.BranchedKStream
+
+
Finalize the construction of branches without forming a default branch.
+
+
nodeId() - Method in class org.apache.kafka.clients.admin.QuorumInfo.Node
+
 
+
nodeIfOnline(TopicPartition, int) - Method in class org.apache.kafka.common.Cluster
+
+
Get the node by node id if the replica for the given partition is online
+
+
nodes() - Method in class org.apache.kafka.clients.admin.DescribeClusterResult
+
+
Returns a future which yields a collection of nodes.
+
+
nodes() - Method in class org.apache.kafka.clients.admin.QuorumInfo
+
 
+
nodes() - Method in class org.apache.kafka.common.Cluster
+
 
+
nodes() - Method in interface org.apache.kafka.streams.TopologyDescription.Subtopology
+
+
All nodes of this sub-topology.
+
+
NON_RECONFIGURABLE_CONFIGS - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
none() - Static method in class org.apache.kafka.streams.AutoOffsetReset
+
+
Creates an AutoOffsetReset instance representing "none".
+
+
NONE - Enum constant in enum class org.apache.kafka.clients.consumer.OffsetResetStrategy
+
+
Deprecated.
+
NONE - Enum constant in enum class org.apache.kafka.common.config.ConfigDef.Width
+
 
+
NONE - Enum constant in enum class org.apache.kafka.common.config.SslClientAuth
+
 
+
NONE - Enum constant in enum class org.apache.kafka.streams.processor.assignment.TaskAssignor.AssignmentError
+
 
+
NonEmptyString() - Constructor for class org.apache.kafka.common.config.ConfigDef.NonEmptyString
+
 
+
nonEmptyStringWithoutControlChars() - Static method in class org.apache.kafka.common.config.ConfigDef.NonEmptyStringWithoutControlChars
+
 
+
NonEmptyStringWithoutControlChars() - Constructor for class org.apache.kafka.common.config.ConfigDef.NonEmptyStringWithoutControlChars
+
 
+
nonInternalValues() - Method in class org.apache.kafka.common.config.AbstractConfig
+
 
+
NonNullValidator() - Constructor for class org.apache.kafka.common.config.ConfigDef.NonNullValidator
+
 
+
noNode() - Static method in class org.apache.kafka.common.Node
+
 
+
NoOffsetForPartitionException - Exception in org.apache.kafka.clients.consumer
+
+
Indicates that there is no stored offset for a partition and no defined offset + reset policy.
+
+
NoOffsetForPartitionException(Collection<TopicPartition>) - Constructor for exception org.apache.kafka.clients.consumer.NoOffsetForPartitionException
+
 
+
NoOffsetForPartitionException(TopicPartition) - Constructor for exception org.apache.kafka.clients.consumer.NoOffsetForPartitionException
+
 
+
NoReassignmentInProgressException - Exception in org.apache.kafka.common.errors
+
+
Thrown if a reassignment cannot be cancelled because none is in progress.
+
+
NoReassignmentInProgressException(String) - Constructor for exception org.apache.kafka.common.errors.NoReassignmentInProgressException
+
 
+
NoReassignmentInProgressException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.NoReassignmentInProgressException
+
 
+
NOT_ACTIVE - Enum constant in enum class org.apache.kafka.streams.query.FailureReason
+
+
The query required to execute on an active task (via StateQueryRequest.requireActive()), + but while executing the query, the task was either a Standby task, or it was an Active task + not in the RUNNING state.
+
+
NOT_AVAILABLE - Static variable in class org.apache.kafka.streams.KeyQueryMetadata
+
+
Sentinel to indicate that the KeyQueryMetadata is currently unavailable.
+
+
NOT_PRESENT - Enum constant in enum class org.apache.kafka.streams.query.FailureReason
+
+
Failure indicating that the requested store partition is not present on the local + KafkaStreams instance.
+
+
NOT_READY - Enum constant in enum class org.apache.kafka.common.GroupState
+
 
+
NOT_RUNNING - Enum constant in enum class org.apache.kafka.streams.KafkaStreams.State
+
 
+
NOT_UP_TO_BOUND - Enum constant in enum class org.apache.kafka.streams.query.FailureReason
+
+
Failure indicating that the store partition is not (yet) up to the desired bound.
+
+
NotControllerException - Exception in org.apache.kafka.common.errors
+
 
+
NotControllerException(String) - Constructor for exception org.apache.kafka.common.errors.NotControllerException
+
 
+
NotControllerException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.NotControllerException
+
 
+
NotCoordinatorException - Exception in org.apache.kafka.common.errors
+
+
In the context of the group coordinator, the broker returns this error code if it receives an offset fetch + or commit request for a group it's not the coordinator of.
+
+
NotCoordinatorException(String) - Constructor for exception org.apache.kafka.common.errors.NotCoordinatorException
+
 
+
NotCoordinatorException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.NotCoordinatorException
+
 
+
NotEnoughReplicasAfterAppendException - Exception in org.apache.kafka.common.errors
+
+
Number of insync replicas for the partition is lower than min.insync.replicas This exception is raised when the low + ISR size is discovered *after* the message was already appended to the log.
+
+
NotEnoughReplicasAfterAppendException(String) - Constructor for exception org.apache.kafka.common.errors.NotEnoughReplicasAfterAppendException
+
 
+
NotEnoughReplicasException - Exception in org.apache.kafka.common.errors
+
+
Number of insync replicas for the partition is lower than min.insync.replicas
+
+
NotEnoughReplicasException() - Constructor for exception org.apache.kafka.common.errors.NotEnoughReplicasException
+
 
+
NotEnoughReplicasException(String) - Constructor for exception org.apache.kafka.common.errors.NotEnoughReplicasException
+
 
+
NotEnoughReplicasException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.NotEnoughReplicasException
+
 
+
NotEnoughReplicasException(Throwable) - Constructor for exception org.apache.kafka.common.errors.NotEnoughReplicasException
+
 
+
NotFoundException - Exception in org.apache.kafka.connect.errors
+
+
Indicates that an operation attempted to modify or delete a connector or task that is not present on the worker.
+
+
NotFoundException(String) - Constructor for exception org.apache.kafka.connect.errors.NotFoundException
+
 
+
NotFoundException(String, Throwable) - Constructor for exception org.apache.kafka.connect.errors.NotFoundException
+
 
+
NotFoundException(Throwable) - Constructor for exception org.apache.kafka.connect.errors.NotFoundException
+
 
+
NotLeaderOrFollowerException - Exception in org.apache.kafka.common.errors
+
+
Broker returns this error if a request could not be processed because the broker is not the leader + or follower for a topic partition.
+
+
NotLeaderOrFollowerException() - Constructor for exception org.apache.kafka.common.errors.NotLeaderOrFollowerException
+
 
+
NotLeaderOrFollowerException(String) - Constructor for exception org.apache.kafka.common.errors.NotLeaderOrFollowerException
+
 
+
NotLeaderOrFollowerException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.NotLeaderOrFollowerException
+
 
+
NotLeaderOrFollowerException(Throwable) - Constructor for exception org.apache.kafka.common.errors.NotLeaderOrFollowerException
+
 
+
notUpToBound(Position, PositionBound, Integer) - Static method in interface org.apache.kafka.streams.query.QueryResult
+
+
Static factory method to create a failed query result object to indicate that the store has + not yet caught up to the requested position bound.
+
+
NULL - Static variable in class org.apache.kafka.connect.data.SchemaAndValue
+
 
+
NULL_SIZE - Static variable in class org.apache.kafka.clients.consumer.ConsumerRecord
+
 
+
NUM_MSGS_CONFIG - Static variable in class org.apache.kafka.connect.tools.SchemaSourceTask
+
 
+
NUM_STANDBY_REPLICAS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
num.standby.replicas
+
+
NUM_STREAM_THREADS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
num.stream.threads
+
+
numberOfPartitions(int) - Static method in class org.apache.kafka.streams.kstream.Repartitioned
+
+
Create a Repartitioned instance with provided number of partitions for repartition topic.
+
+
numPartitions() - Method in class org.apache.kafka.clients.admin.CreateTopicsResult.TopicMetadataAndConfig
+
 
+
numPartitions() - Method in class org.apache.kafka.clients.admin.NewTopic
+
+
The number of partitions for the new topic or -1 if a replica assignment has been specified.
+
+
numPartitions() - Method in class org.apache.kafka.server.policy.CreateTopicPolicy.RequestMetadata
+
+
Return the number of partitions to create or null if replicaAssignments is not null.
+
+
numPartitions(String) - Method in class org.apache.kafka.clients.admin.CreateTopicsResult
+
+
Returns a future that provides number of partitions in the topic when the request completes.
+
+
numPartitions(Uuid) - Method in interface org.apache.kafka.coordinator.group.api.assignor.SubscribedTopicDescriber
+
+
The number of partitions for the given topic Id.
+
+
numProcessingThreads() - Method in interface org.apache.kafka.streams.processor.assignment.KafkaStreamsState
+
+
Returns the number of processing threads available to work on tasks for this KafkaStreams client, + which represents its overall capacity for work relative to other KafkaStreams clients.
+
+
numStandbyReplicas() - Method in class org.apache.kafka.streams.processor.assignment.AssignmentConfigs
+
+
The number of standby replicas as configured via + StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG
+
+
+

O

+
+
OAUTHBEARER_MECHANISM - Static variable in class org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule
+
+
The SASL Mechanism name for OAuth 2: OAUTHBEARER
+
+
OAuthBearerExtensionsValidatorCallback - Class in org.apache.kafka.common.security.oauthbearer
+
+
A Callback for use by the SaslServer implementation when it + needs to validate the SASL extensions for the OAUTHBEARER mechanism + Callback handlers should use the OAuthBearerExtensionsValidatorCallback.valid(String) + method to communicate valid extensions back to the SASL server.
+
+
OAuthBearerExtensionsValidatorCallback(OAuthBearerToken, SaslExtensions) - Constructor for class org.apache.kafka.common.security.oauthbearer.OAuthBearerExtensionsValidatorCallback
+
 
+
OAuthBearerLoginCallbackHandler - Class in org.apache.kafka.common.security.oauthbearer
+
+
+ OAuthBearerLoginCallbackHandler is an AuthenticateCallbackHandler that + accepts OAuthBearerTokenCallback and SaslExtensionsCallback callbacks to + perform the steps to request a JWT from an OAuth/OIDC provider using the + client_credentials.
+
+
OAuthBearerLoginCallbackHandler() - Constructor for class org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler
+
 
+
OAuthBearerLoginModule - Class in org.apache.kafka.common.security.oauthbearer
+
+
The LoginModule for the SASL/OAUTHBEARER mechanism.
+
+
OAuthBearerLoginModule() - Constructor for class org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule
+
 
+
OAuthBearerToken - Interface in org.apache.kafka.common.security.oauthbearer
+
+
The b64token value as defined in + RFC 6750 Section + 2.1 along with the token's specific scope and lifetime and principal + name.
+
+
OAuthBearerTokenCallback - Class in org.apache.kafka.common.security.oauthbearer
+
+
A Callback for use by the SaslClient and Login + implementations when they require an OAuth 2 bearer token.
+
+
OAuthBearerTokenCallback() - Constructor for class org.apache.kafka.common.security.oauthbearer.OAuthBearerTokenCallback
+
 
+
OAuthBearerValidatorCallback - Class in org.apache.kafka.common.security.oauthbearer
+
+
A Callback for use by the SaslServer implementation when it + needs to provide an OAuth 2 bearer token compact serialization for + validation.
+
+
OAuthBearerValidatorCallback(String) - Constructor for class org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallback
+
+
Constructor
+
+
OAuthBearerValidatorCallbackHandler - Class in org.apache.kafka.common.security.oauthbearer
+
+
+ OAuthBearerValidatorCallbackHandler is an AuthenticateCallbackHandler that + accepts OAuthBearerValidatorCallback and OAuthBearerExtensionsValidatorCallback + callbacks to implement OAuth/OIDC validation.
+
+
OAuthBearerValidatorCallbackHandler() - Constructor for class org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallbackHandler
+
 
+
observers() - Method in class org.apache.kafka.clients.admin.QuorumInfo
+
 
+
of() - Static method in class org.apache.kafka.streams.kstream.UnlimitedWindows
+
+
Return an unlimited window starting at timestamp zero.
+
+
of(String) - Static method in enum class org.apache.kafka.clients.consumer.GroupProtocol
+
+
Case-insensitive group protocol lookup by string name.
+
+
of(String) - Static method in enum class org.apache.kafka.streams.GroupProtocol
+
+
Case-insensitive group protocol lookup by string name.
+
+
of(Duration) - Static method in class org.apache.kafka.streams.kstream.JoinWindows
+
+
Deprecated. + +
+
+
of(ConfigDef.Validator...) - Static method in class org.apache.kafka.common.config.ConfigDef.CompositeValidator
+
 
+
of(ApplicationState) - Static method in class org.apache.kafka.streams.processor.assignment.TaskAssignmentUtils.RackAwareOptimizationParams
+
+
Return a new config object with no overrides and the tasksToOptimize initialized to the set of all tasks in the given ApplicationState
+
+
of(ProcessId, Set<KafkaStreamsAssignment.AssignedTask>) - Static method in class org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment
+
+
Construct an instance of KafkaStreamsAssignment with this processId and the given set of + assigned tasks.
+
+
of(StreamsConfig) - Static method in class org.apache.kafka.streams.processor.assignment.AssignmentConfigs
+
 
+
ofDefaultEntity(String) - Static method in class org.apache.kafka.common.quota.ClientQuotaFilterComponent
+
+
Constructs and returns a filter component that matches the built-in default entity name + for the entity type.
+
+
ofEntity(String, String) - Static method in class org.apache.kafka.common.quota.ClientQuotaFilterComponent
+
+
Constructs and returns a filter component that exactly matches the provided entity + name for the entity type.
+
+
ofEntityType(String) - Static method in class org.apache.kafka.common.quota.ClientQuotaFilterComponent
+
+
Constructs and returns a filter component that matches any specified name for the + entity type.
+
+
offlineReplicas() - Method in class org.apache.kafka.common.PartitionInfo
+
+
The subset of the replicas that are offline
+
+
offset() - Method in class org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo
+
 
+
offset() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription.TaskOffset
+
+
The cumulative offset (sum of offsets in all input partitions).
+
+
offset() - Method in class org.apache.kafka.clients.consumer.ConsumerRecord
+
+
The position of this record in the corresponding Kafka partition.
+
+
offset() - Method in class org.apache.kafka.clients.consumer.OffsetAndMetadata
+
 
+
offset() - Method in class org.apache.kafka.clients.consumer.OffsetAndTimestamp
+
 
+
offset() - Method in class org.apache.kafka.clients.producer.RecordMetadata
+
+
The offset of the record in the topic/partition.
+
+
offset() - Method in exception org.apache.kafka.common.errors.RecordDeserializationException
+
 
+
offset() - Method in interface org.apache.kafka.streams.errors.ErrorHandlerContext
+
+
Return the offset of the current input record; could be -1 if it is not + available.
+
+
offset() - Method in interface org.apache.kafka.streams.processor.api.RecordMetadata
+
+
Return the offset of the current input record; could be -1 if it is not + available.
+
+
offset() - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
offset() - Method in interface org.apache.kafka.streams.processor.ProcessorContext
+
+
Return the offset of the current input record; could be -1 if it is not + available.
+
+
offset() - Method in interface org.apache.kafka.streams.processor.RecordContext
+
+
Return the offset of the current input record; could be -1 if it is not + available.
+
+
offset(Map<String, T>) - Method in interface org.apache.kafka.connect.storage.OffsetStorageReader
+
+
Get the offset for the specified partition.
+
+
offset(Map<TopicPartition, Long>) - Method in interface org.apache.kafka.connect.sink.SinkTaskContext
+
+
Reset the consumer offsets for the given topic partitions.
+
+
offset(TopicPartition, long) - Method in interface org.apache.kafka.connect.sink.SinkTaskContext
+
+
Reset the consumer offsets for the given topic partition.
+
+
OFFSET - Enum constant in enum class org.apache.kafka.server.log.remote.storage.RemoteStorageManager.IndexType
+
+
Represents offset index.
+
+
offsetAndMetadata() - Method in class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
OffsetAndMetadata - Class in org.apache.kafka.clients.consumer
+
+
The Kafka offset commit API allows users to provide additional metadata (in the form of a string) + when an offset is committed.
+
+
OffsetAndMetadata(long) - Constructor for class org.apache.kafka.clients.consumer.OffsetAndMetadata
+
+
Construct a new OffsetAndMetadata object for committing through KafkaConsumer.
+
+
OffsetAndMetadata(long, String) - Constructor for class org.apache.kafka.clients.consumer.OffsetAndMetadata
+
+
Construct a new OffsetAndMetadata object for committing through KafkaConsumer.
+
+
OffsetAndMetadata(long, Optional<Integer>, String) - Constructor for class org.apache.kafka.clients.consumer.OffsetAndMetadata
+
+
Construct a new OffsetAndMetadata object for committing through KafkaConsumer.
+
+
OffsetAndTimestamp - Class in org.apache.kafka.clients.consumer
+
+
A container class for offset and timestamp.
+
+
OffsetAndTimestamp(long, long) - Constructor for class org.apache.kafka.clients.consumer.OffsetAndTimestamp
+
 
+
OffsetAndTimestamp(long, long, Optional<Integer>) - Constructor for class org.apache.kafka.clients.consumer.OffsetAndTimestamp
+
 
+
OffsetCommitCallback - Interface in org.apache.kafka.clients.consumer
+
+
A callback interface that the user can implement to trigger custom actions when a commit request completes.
+
+
offsetIndex() - Method in class org.apache.kafka.server.log.remote.storage.LogSegmentData
+
 
+
offsetLag() - Method in class org.apache.kafka.clients.admin.ReplicaInfo
+
+
The lag of the log's LEO with respect to the partition's + high watermark (if it is the current log for the partition) + or the current replica's LEO (if it is the future log + for the partition).
+
+
offsetLag() - Method in class org.apache.kafka.streams.LagInfo
+
+
Get the measured lag between current and end offset positions, for this store partition replica
+
+
OffsetMetadataTooLarge - Exception in org.apache.kafka.common.errors
+
+
The client has tried to save its offset with associated metadata larger than the maximum size allowed by the server.
+
+
OffsetMetadataTooLarge() - Constructor for exception org.apache.kafka.common.errors.OffsetMetadataTooLarge
+
 
+
OffsetMetadataTooLarge(String) - Constructor for exception org.apache.kafka.common.errors.OffsetMetadataTooLarge
+
 
+
OffsetMetadataTooLarge(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.OffsetMetadataTooLarge
+
 
+
OffsetMetadataTooLarge(Throwable) - Constructor for exception org.apache.kafka.common.errors.OffsetMetadataTooLarge
+
 
+
OffsetMovedToTieredStorageException - Exception in org.apache.kafka.common.errors
+
 
+
OffsetMovedToTieredStorageException(String) - Constructor for exception org.apache.kafka.common.errors.OffsetMovedToTieredStorageException
+
 
+
OffsetMovedToTieredStorageException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.OffsetMovedToTieredStorageException
+
 
+
OffsetNotAvailableException - Exception in org.apache.kafka.common.errors
+
+
Indicates that the leader is not able to guarantee monotonically increasing offsets + due to the high watermark lagging behind the epoch start offset after a recent leader election
+
+
OffsetNotAvailableException(String) - Constructor for exception org.apache.kafka.common.errors.OffsetNotAvailableException
+
 
+
OffsetOutOfRangeException - Exception in org.apache.kafka.clients.consumer
+
+
No reset policy has been defined, and the offsets for these partitions are either larger or smaller + than the range of offsets the server has for the given partition.
+
+
OffsetOutOfRangeException - Exception in org.apache.kafka.common.errors
+
+
No reset policy has been defined, and the offsets for these partitions are either larger or smaller + than the range of offsets the server has for the given partition.
+
+
OffsetOutOfRangeException(String) - Constructor for exception org.apache.kafka.common.errors.OffsetOutOfRangeException
+
 
+
OffsetOutOfRangeException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.OffsetOutOfRangeException
+
 
+
OffsetOutOfRangeException(String, Map<TopicPartition, Long>) - Constructor for exception org.apache.kafka.clients.consumer.OffsetOutOfRangeException
+
 
+
OffsetOutOfRangeException(Map<TopicPartition, Long>) - Constructor for exception org.apache.kafka.clients.consumer.OffsetOutOfRangeException
+
 
+
offsetOutOfRangePartitions() - Method in exception org.apache.kafka.clients.consumer.OffsetOutOfRangeException
+
+
Get a map of the topic partitions and the respective out-of-range fetch offsets.
+
+
OffsetResetStrategy - Enum Class in org.apache.kafka.clients.consumer
+
+
Deprecated. +
Since 4.0. Use AutoOffsetResetStrategy instead.
+
+
+
offsets(Collection<Map<String, T>>) - Method in interface org.apache.kafka.connect.storage.OffsetStorageReader
+
+
+ Get a set of offsets for the specified partition identifiers.
+
+
offsetsForTimes(Map<TopicPartition, Long>) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
offsetsForTimes(Map<TopicPartition, Long>) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Look up the offsets for the given partitions by timestamp.
+
+
offsetsForTimes(Map<TopicPartition, Long>) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
offsetsForTimes(Map<TopicPartition, Long>, Duration) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
offsetsForTimes(Map<TopicPartition, Long>, Duration) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Look up the offsets for the given partitions by timestamp.
+
+
offsetsForTimes(Map<TopicPartition, Long>, Duration) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
OffsetSpec - Class in org.apache.kafka.clients.admin
+
+
This class allows to specify the desired offsets when using KafkaAdminClient.listOffsets(Map, ListOffsetsOptions)
+
+
OffsetSpec() - Constructor for class org.apache.kafka.clients.admin.OffsetSpec
+
 
+
OffsetSpec.EarliestLocalSpec - Class in org.apache.kafka.clients.admin
+
 
+
OffsetSpec.EarliestSpec - Class in org.apache.kafka.clients.admin
+
 
+
OffsetSpec.LatestSpec - Class in org.apache.kafka.clients.admin
+
 
+
OffsetSpec.LatestTieredSpec - Class in org.apache.kafka.clients.admin
+
 
+
OffsetSpec.MaxTimestampSpec - Class in org.apache.kafka.clients.admin
+
 
+
OffsetSpec.TimestampSpec - Class in org.apache.kafka.clients.admin
+
 
+
offsetStorageReader() - Method in interface org.apache.kafka.connect.source.SourceConnectorContext
+
+
Returns the OffsetStorageReader for this SourceConnectorContext.
+
+
offsetStorageReader() - Method in interface org.apache.kafka.connect.source.SourceTaskContext
+
+
Get the OffsetStorageReader for this SourceTask.
+
+
OffsetStorageReader - Interface in org.apache.kafka.connect.storage
+
+
+ OffsetStorageReader provides access to the offset storage used by sources.
+
+
offsetSyncsTopic(String) - Method in class org.apache.kafka.connect.mirror.DefaultReplicationPolicy
+
 
+
offsetSyncsTopic(String) - Method in interface org.apache.kafka.connect.mirror.ReplicationPolicy
+
+
Returns the name of the offset-syncs topic for given cluster alias.
+
+
ofInactivityGapAndGrace(Duration, Duration) - Static method in class org.apache.kafka.streams.kstream.SessionWindows
+
+
Creates a new window specification with the specified inactivity gap.
+
+
ofInactivityGapWithNoGrace(Duration) - Static method in class org.apache.kafka.streams.kstream.SessionWindows
+
+
Creates a new window specification with the specified inactivity gap.
+
+
ofSizeAndGrace(Duration, Duration) - Static method in class org.apache.kafka.streams.kstream.TimeWindows
+
+
Return a window definition with the given window size, and with the advance interval being equal to the window + size.
+
+
ofSizeWithNoGrace(Duration) - Static method in class org.apache.kafka.streams.kstream.TimeWindows
+
+
Return a window definition with the given window size, and with the advance interval being equal to the window + size.
+
+
ofTimeDifferenceAndGrace(Duration, Duration) - Static method in class org.apache.kafka.streams.kstream.JoinWindows
+
+
Specifies that records of the same key are joinable if their timestamps are within timeDifference, + i.e., the timestamp of a record from the secondary stream is max timeDifference before or after + the timestamp of the record from the primary stream.
+
+
ofTimeDifferenceAndGrace(Duration, Duration) - Static method in class org.apache.kafka.streams.kstream.SlidingWindows
+
+
Return a window definition with the window size based on the given maximum time difference (inclusive) between + records in the same window and given window grace period.
+
+
ofTimeDifferenceWithNoGrace(Duration) - Static method in class org.apache.kafka.streams.kstream.JoinWindows
+
+
Specifies that records of the same key are joinable if their timestamps are within timeDifference, + i.e., the timestamp of a record from the secondary stream is max timeDifference before or after + the timestamp of the record from the primary stream.
+
+
ofTimeDifferenceWithNoGrace(Duration) - Static method in class org.apache.kafka.streams.kstream.SlidingWindows
+
+
Return a window definition with the window size based on the given maximum time difference (inclusive) between + records in the same window and given window grace period.
+
+
ofTopicIds(Collection<Uuid>) - Static method in class org.apache.kafka.common.TopicCollection
+
 
+
ofTopicNames(Collection<String>) - Static method in class org.apache.kafka.common.TopicCollection
+
 
+
oldest(long) - Method in class org.apache.kafka.common.metrics.stats.SampledStat
+
 
+
ON_WINDOW_CLOSE - Enum constant in enum class org.apache.kafka.streams.kstream.EmitStrategy.StrategyType
+
 
+
ON_WINDOW_UPDATE - Enum constant in enum class org.apache.kafka.streams.kstream.EmitStrategy.StrategyType
+
 
+
onAcknowledgement(RecordMetadata, Exception) - Method in interface org.apache.kafka.clients.producer.ProducerInterceptor
+
+
This method is called when the record sent to the server has been acknowledged, or when sending the record fails before + it gets sent to the server.
+
+
onAcknowledgement(RecordMetadata, Exception, Headers) - Method in interface org.apache.kafka.clients.producer.ProducerInterceptor
+
+
This method is called when the record sent to the server has been acknowledged, or when sending the record fails before + it gets sent to the server.
+
+
onAssignment(ConsumerPartitionAssignor.Assignment, ConsumerGroupMetadata) - Method in interface org.apache.kafka.clients.consumer.ConsumerPartitionAssignor
+
+
Callback which is invoked when a group member receives its assignment from the leader.
+
+
onAssignment(ConsumerPartitionAssignor.Assignment, ConsumerGroupMetadata) - Method in class org.apache.kafka.clients.consumer.CooperativeStickyAssignor
+
 
+
onAssignment(ConsumerPartitionAssignor.Assignment, ConsumerGroupMetadata) - Method in class org.apache.kafka.clients.consumer.StickyAssignor
+
 
+
onAssignmentComputed(ConsumerPartitionAssignor.GroupAssignment, ConsumerPartitionAssignor.GroupSubscription, TaskAssignor.AssignmentError) - Method in interface org.apache.kafka.streams.processor.assignment.TaskAssignor
+
+
This callback can be used to observe the final assignment returned to the brokers and check for any errors that + were detected while processing the returned assignment.
+
+
onBatchLoaded(TopicPartition, String, TaskId, long, long, long) - Method in interface org.apache.kafka.streams.processor.StandbyUpdateListener
+
+
Method called after loading a batch of records.
+
+
onBatchRestored(TopicPartition, String, long, long) - Method in interface org.apache.kafka.streams.processor.StateRestoreListener
+
+
Method called after restoring a batch of records.
+
+
onChange(String, ConfigData) - Method in interface org.apache.kafka.common.config.ConfigChangeCallback
+
+
Performs an action when configuration data changes.
+
+
onChange(KafkaStreams.State, KafkaStreams.State) - Method in interface org.apache.kafka.streams.KafkaStreams.StateListener
+
+
Called when state changes.
+
+
onCommit() - Method in interface org.apache.kafka.streams.processor.CommitCallback
+
 
+
onCommit(Map<TopicPartition, OffsetAndMetadata>) - Method in interface org.apache.kafka.clients.consumer.ConsumerInterceptor
+
+
This is called when offsets get committed.
+
+
onComplete(Map<TopicIdPartition, Set<Long>>, Exception) - Method in interface org.apache.kafka.clients.consumer.AcknowledgementCommitCallback
+
+
A callback method the user can implement to provide asynchronous handling of acknowledgement completion.
+
+
onComplete(Map<TopicPartition, OffsetAndMetadata>, Exception) - Method in interface org.apache.kafka.clients.consumer.OffsetCommitCallback
+
+
A callback method the user can implement to provide asynchronous handling of commit request completion.
+
+
onCompletion(RecordMetadata, Exception) - Method in interface org.apache.kafka.clients.producer.Callback
+
+
A callback method the user can implement to provide asynchronous handling of request completion.
+
+
onConsume(ConsumerRecords<K, V>) - Method in interface org.apache.kafka.clients.consumer.ConsumerInterceptor
+
+
This is called just before the records are returned by + KafkaConsumer.poll(java.time.Duration)
+
+
ONE_UUID - Static variable in class org.apache.kafka.common.Uuid
+
+
A reserved UUID.
+
+
ONGOING - Enum constant in enum class org.apache.kafka.clients.admin.TransactionState
+
 
+
onInvalidTimestamp(ConsumerRecord<Object, Object>, long, long) - Method in class org.apache.kafka.streams.processor.FailOnInvalidTimestamp
+
+
Raises an exception on every call.
+
+
onInvalidTimestamp(ConsumerRecord<Object, Object>, long, long) - Method in class org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp
+
+
Writes a log WARN message when the extracted timestamp is invalid (negative) but returns the invalid timestamp as-is, + which ultimately causes the record to be skipped and not to be processed.
+
+
onInvalidTimestamp(ConsumerRecord<Object, Object>, long, long) - Method in class org.apache.kafka.streams.processor.UsePartitionTimeOnInvalidTimestamp
+
+
Returns the current stream-time as new timestamp for the record.
+
+
onPartitionLeadershipChanges(Set<TopicIdPartition>, Set<TopicIdPartition>) - Method in interface org.apache.kafka.server.log.remote.storage.RemoteLogMetadataManager
+
+
This method is invoked only when there are changes in leadership of the topic partitions that this broker is + responsible for.
+
+
onPartitionsAssigned(Collection<TopicPartition>) - Method in interface org.apache.kafka.clients.consumer.ConsumerRebalanceListener
+
+
A callback method the user can implement to provide handling of customized offsets on completion of a successful + partition re-assignment.
+
+
onPartitionsLost(Collection<TopicPartition>) - Method in interface org.apache.kafka.clients.consumer.ConsumerRebalanceListener
+
+
A callback method you can implement to provide handling of cleaning up resources for partitions that have already + been reassigned to other consumers.
+
+
onPartitionsRevoked(Collection<TopicPartition>) - Method in interface org.apache.kafka.clients.consumer.ConsumerRebalanceListener
+
+
A callback method the user can implement to provide handling of offset commits to a customized store.
+
+
onRestoreEnd(TopicPartition, String, long) - Method in interface org.apache.kafka.streams.processor.StateRestoreListener
+
+
Method called when restoring the StateStore is complete.
+
+
onRestoreStart(TopicPartition, String, long, long) - Method in interface org.apache.kafka.streams.processor.StateRestoreListener
+
+
Method called at the very beginning of StateStore restoration.
+
+
onRestoreSuspended(TopicPartition, String, long) - Method in interface org.apache.kafka.streams.processor.StateRestoreListener
+
+
Method called when restoring the StateStore is suspended due to the task being migrated out of the host.
+
+
onSend(ProducerRecord<K, V>) - Method in interface org.apache.kafka.clients.producer.ProducerInterceptor
+
+
This is called from KafkaProducer.send(ProducerRecord) and + KafkaProducer.send(ProducerRecord, Callback) methods, before key and value + get serialized and partition is assigned (if partition is not specified in ProducerRecord).
+
+
onStopPartitions(Set<TopicIdPartition>) - Method in interface org.apache.kafka.server.log.remote.storage.RemoteLogMetadataManager
+
+
This method is invoked only when the topic partitions are stopped on this broker.
+
+
onUpdate(ClusterResource) - Method in interface org.apache.kafka.common.ClusterResourceListener
+
+
A callback method that a user can implement to get updates for ClusterResource.
+
+
onUpdateStart(TopicPartition, String, long) - Method in interface org.apache.kafka.streams.processor.StandbyUpdateListener
+
+
A callback that will be invoked after registering the changelogs for each state store in a standby + task.
+
+
onUpdateSuspended(TopicPartition, String, long, long, StandbyUpdateListener.SuspendReason) - Method in interface org.apache.kafka.streams.processor.StandbyUpdateListener
+
+
This method is called when the corresponding standby task stops updating, for the provided reason.
+
+
onWindowClose() - Static method in interface org.apache.kafka.streams.kstream.EmitStrategy
+
+
This strategy indicates that the aggregated result for a window will only be emitted when the + window closes instead of when there's an update to the window.
+
+
onWindowUpdate() - Static method in interface org.apache.kafka.streams.kstream.EmitStrategy
+
+
This strategy indicates that the aggregated result for a window will be emitted every time + when there's an update to the window instead of when the window closes.
+
+
Op(String, Double) - Constructor for class org.apache.kafka.common.quota.ClientQuotaAlteration.Op
+
 
+
open(Collection<TopicPartition>) - Method in class org.apache.kafka.connect.sink.SinkTask
+
+
The SinkTask uses this method to create writers for newly assigned partitions in case of partition + rebalance.
+
+
operation() - Method in class org.apache.kafka.common.acl.AccessControlEntry
+
+
Return the AclOperation.
+
+
operation() - Method in class org.apache.kafka.common.acl.AccessControlEntryFilter
+
+
Return the AclOperation.
+
+
operation() - Method in class org.apache.kafka.server.authorizer.Action
+
 
+
OperationNotAttemptedException - Exception in org.apache.kafka.common.errors
+
+
Indicates that the broker did not attempt to execute this operation.
+
+
OperationNotAttemptedException(String) - Constructor for exception org.apache.kafka.common.errors.OperationNotAttemptedException
+
 
+
ops() - Method in class org.apache.kafka.common.quota.ClientQuotaAlteration
+
 
+
OPTIMIZE - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "topology.optimization" for enabling topology optimization
+
+
optimizeRackAwareActiveTasks(TaskAssignmentUtils.RackAwareOptimizationParams, Map<ProcessId, KafkaStreamsAssignment>) - Static method in class org.apache.kafka.streams.processor.assignment.TaskAssignmentUtils
+
+
Optimize active task assignment for rack awareness.
+
+
optimizeRackAwareStandbyTasks(TaskAssignmentUtils.RackAwareOptimizationParams, Map<ProcessId, KafkaStreamsAssignment>) - Static method in class org.apache.kafka.streams.processor.assignment.TaskAssignmentUtils
+
+
Optimize standby task assignment for rack awareness.
+
+
optional() - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
+
Set this schema as optional.
+
+
OPTIONAL_BOOLEAN_SCHEMA - Static variable in interface org.apache.kafka.connect.data.Schema
+
 
+
OPTIONAL_BYTES_SCHEMA - Static variable in interface org.apache.kafka.connect.data.Schema
+
 
+
OPTIONAL_FLOAT32_SCHEMA - Static variable in interface org.apache.kafka.connect.data.Schema
+
 
+
OPTIONAL_FLOAT64_SCHEMA - Static variable in interface org.apache.kafka.connect.data.Schema
+
 
+
OPTIONAL_INT16_SCHEMA - Static variable in interface org.apache.kafka.connect.data.Schema
+
 
+
OPTIONAL_INT32_SCHEMA - Static variable in interface org.apache.kafka.connect.data.Schema
+
 
+
OPTIONAL_INT64_SCHEMA - Static variable in interface org.apache.kafka.connect.data.Schema
+
 
+
OPTIONAL_INT8_SCHEMA - Static variable in interface org.apache.kafka.connect.data.Schema
+
 
+
OPTIONAL_STRING_SCHEMA - Static variable in interface org.apache.kafka.connect.data.Schema
+
 
+
opType() - Method in class org.apache.kafka.clients.admin.AlterConfigOp
+
 
+
orderInGroup - Variable in class org.apache.kafka.common.config.ConfigDef.ConfigKey
+
 
+
org.apache.kafka.clients.admin - package org.apache.kafka.clients.admin
+
+
Provides a Kafka client for performing administrative operations (such as creating topics and configuring brokers) on a Kafka cluster.
+
+
org.apache.kafka.clients.consumer - package org.apache.kafka.clients.consumer
+
+
Provides a Kafka client for consuming records from topics and/or partitions in a Kafka cluster.
+
+
org.apache.kafka.clients.producer - package org.apache.kafka.clients.producer
+
+
Provides a Kafka client for producing records to topics and/or partitions in a Kafka cluster.
+
+
org.apache.kafka.common - package org.apache.kafka.common
+
+
Provides shared functionality for Kafka clients and servers.
+
+
org.apache.kafka.common.acl - package org.apache.kafka.common.acl
+
+
Provides classes representing Access Control Lists for authorization of clients
+
+
org.apache.kafka.common.annotation - package org.apache.kafka.common.annotation
+
+
Provides annotations used on Kafka APIs.
+
+
org.apache.kafka.common.config - package org.apache.kafka.common.config
+
+
Provides common mechanisms for defining, parsing, validating, and documenting user-configurable parameters.
+
+
org.apache.kafka.common.config.provider - package org.apache.kafka.common.config.provider
+
+
Provides a pluggable interface and some implementations for late-binding in configuration values.
+
+
org.apache.kafka.common.errors - package org.apache.kafka.common.errors
+
+
Provides common exception classes.
+
+
org.apache.kafka.common.header - package org.apache.kafka.common.header
+
+
Provides API for application-defined metadata attached to Kafka records.
+
+
org.apache.kafka.common.metrics - package org.apache.kafka.common.metrics
+
+
Provides the API used by Kafka clients to emit metrics which are then exposed using the * MetricsReporter interface.
+
+
org.apache.kafka.common.metrics.stats - package org.apache.kafka.common.metrics.stats
+
+
Provides methods of statistically aggregating metrics upon emission.
+
+
org.apache.kafka.common.quota - package org.apache.kafka.common.quota
+
+
Provides mechanisms for enforcing resource quotas.
+
+
org.apache.kafka.common.resource - package org.apache.kafka.common.resource
+
+
Provides client handles representing logical resources in a Kafka cluster.
+
+
org.apache.kafka.common.security.auth - package org.apache.kafka.common.security.auth
+
+
Provides pluggable interfaces for implementing Kafka authentication mechanisms.
+
+
org.apache.kafka.common.security.oauthbearer - package org.apache.kafka.common.security.oauthbearer
+
+
Provides a LoginModule for using OAuth Bearer Token authentication with Kafka clusters.
+
+
org.apache.kafka.common.security.plain - package org.apache.kafka.common.security.plain
+
+
Provides implementation to use plaintext credentials authentication for securing Kafka clusters.
+
+
org.apache.kafka.common.security.scram - package org.apache.kafka.common.security.scram
+
+
Provides adaptor to use the Salted Challenge Response Authentication Mechanism for securing Kafka clusters.
+
+
org.apache.kafka.common.security.token.delegation - package org.apache.kafka.common.security.token.delegation
+
+
Provides mechanism for delegating authorization to a distinct Principal for securing Kafka clusters.
+
+
org.apache.kafka.common.serialization - package org.apache.kafka.common.serialization
+
+
Provides interface and some implementations of serialization/deserialization routines for various objects.
+
+
org.apache.kafka.connect.components - package org.apache.kafka.connect.components
+
+
Provides common interfaces used to describe pluggable components.
+
+
org.apache.kafka.connect.connector - package org.apache.kafka.connect.connector
+
+
Provides interfaces for Connector and Task implementations.
+
+
org.apache.kafka.connect.connector.policy - package org.apache.kafka.connect.connector.policy
+
+
Provides pluggable interfaces for policies controlling how users can configure connectors.
+
+
org.apache.kafka.connect.data - package org.apache.kafka.connect.data
+
+
Provides classes for representing data and schemas handled by Connect.
+
+
org.apache.kafka.connect.errors - package org.apache.kafka.connect.errors
+
+
Provides common exception classes for Connect, used by the framework and plugins to communicate failures.
+
+
org.apache.kafka.connect.header - package org.apache.kafka.connect.header
+
+
Provides an API for application-defined metadata attached to Connect records.
+
+
org.apache.kafka.connect.health - package org.apache.kafka.connect.health
+
+
Provides an API for describing the state of a running Connect cluster to + ConnectRestExtension instances.
+
+
org.apache.kafka.connect.mirror - package org.apache.kafka.connect.mirror
+
+
Provides APIs for the MirrorMaker connectors and utilities to manage MirrorMaker resources.
+
+
org.apache.kafka.connect.rest - package org.apache.kafka.connect.rest
+
+
Provides a pluggable interface for altering the behavior of the Connect REST API.
+
+
org.apache.kafka.connect.sink - package org.apache.kafka.connect.sink
+
+
Provides an API for implementing sink connectors which write Kafka records to external applications.
+
+
org.apache.kafka.connect.source - package org.apache.kafka.connect.source
+
+
Provides an API for implementing source connectors which read data from external applications into Kafka.
+
+
org.apache.kafka.connect.storage - package org.apache.kafka.connect.storage
+
+
Provides pluggable interfaces and some implementations for (de)serializing data to and from Kafka
+
+
org.apache.kafka.connect.tools - package org.apache.kafka.connect.tools
+
+
Provides source and sink connector implementations used for testing
+
+
org.apache.kafka.connect.transforms - package org.apache.kafka.connect.transforms
+
+
Provides a pluggable interface for altering data which is being moved by Connect.
+
+
org.apache.kafka.connect.transforms.predicates - package org.apache.kafka.connect.transforms.predicates
+
+
Provides a pluggable interface for describing when a Transformation should be applied to a record.
+
+
org.apache.kafka.connect.util - package org.apache.kafka.connect.util
+
+
Provides common utilities that can be used in component implementations.
+
+
org.apache.kafka.coordinator.group.api.assignor - package org.apache.kafka.coordinator.group.api.assignor
+
+
Provides the core functionality and metadata management for consumer group partition assignment.
+
+
org.apache.kafka.server.authorizer - package org.apache.kafka.server.authorizer
+
+
Provides pluggable interface for performing authorization on a Kafka server.
+
+
org.apache.kafka.server.log.remote.storage - package org.apache.kafka.server.log.remote.storage
+
+
Provides a pluggable API for defining remote storage and retrieval of Kafka log segments.
+
+
org.apache.kafka.server.policy - package org.apache.kafka.server.policy
+
+
Provides pluggable interfaces for expressing policies on topics and configs.
+
+
org.apache.kafka.server.quota - package org.apache.kafka.server.quota
+
+
Provides pluggable interface for enforcing client quotas from a Kafka server.
+
+
org.apache.kafka.server.telemetry - package org.apache.kafka.server.telemetry
+
+
Provides pluggable interface for capturing client telemetry metrics.
+
+
org.apache.kafka.streams - package org.apache.kafka.streams
+
+
Provides the Kafka Streams library for building streaming data applications.
+
+
org.apache.kafka.streams.errors - package org.apache.kafka.streams.errors
+
+
Provides common exception classes for Streams applications.
+
+
org.apache.kafka.streams.kstream - package org.apache.kafka.streams.kstream
+
+
Provides a high-level programming model (DSL) to express a (stateful) data flow computation over input streams and tables.
+
+
org.apache.kafka.streams.processor - package org.apache.kafka.streams.processor
+
+
Provides a low-level programming model (Processor API, aka, PAPI) to express a (stateful) data flow computation over input topics.
+
+
org.apache.kafka.streams.processor.api - package org.apache.kafka.streams.processor.api
+
+
Provides a low-level programming model (Processor API, aka, PAPI) to express a (stateful) data flow computation over input topics.
+
+
org.apache.kafka.streams.processor.assignment - package org.apache.kafka.streams.processor.assignment
+
+
Provides classes and interfaces used to manage and assign tasks within Kafka Streams applications.
+
+
org.apache.kafka.streams.processor.assignment.assignors - package org.apache.kafka.streams.processor.assignment.assignors
+
+
Provides classes for assigning tasks to stream threads.
+
+
org.apache.kafka.streams.query - package org.apache.kafka.streams.query
+
+
Provides a query API (aka Interactive Queries) over state stores, for extracting data from a stateful Kafka Streams application.
+
+
org.apache.kafka.streams.state - package org.apache.kafka.streams.state
+
+
Provides interfaces for managing the intermediate state of a stateful streams application.
+
+
org.apache.kafka.streams.test - package org.apache.kafka.streams.test
+
+
Provides classes for testing Kafka Streams applications with mocked inputs.
+
+
org.apache.kafka.tools.api - package org.apache.kafka.tools.api
+
+
Provides interfaces for writing plugins of kafka tools
+
+
origin() - Method in exception org.apache.kafka.common.errors.RecordDeserializationException
+
 
+
originalKafkaOffset() - Method in class org.apache.kafka.connect.sink.SinkRecord
+
+
Get the original offset for this sink record, before any transformations were applied.
+
+
originalKafkaPartition() - Method in class org.apache.kafka.connect.sink.SinkRecord
+
+
Get the original topic partition for this sink record, before any transformations were applied.
+
+
originals() - Method in class org.apache.kafka.common.config.AbstractConfig
+
 
+
originals(Map<String, Object>) - Method in class org.apache.kafka.common.config.AbstractConfig
+
 
+
originalsStrings() - Method in class org.apache.kafka.common.config.AbstractConfig
+
+
Get all the original settings, ensuring that all values are of type String.
+
+
originalsWithPrefix(String) - Method in class org.apache.kafka.common.config.AbstractConfig
+
+
Gets all original settings with the given prefix, stripping the prefix before adding it to the output.
+
+
originalsWithPrefix(String, boolean) - Method in class org.apache.kafka.common.config.AbstractConfig
+
+
Gets all original settings with the given prefix.
+
+
originalTopic() - Method in class org.apache.kafka.connect.sink.SinkRecord
+
+
Get the original topic for this sink record, before any transformations were applied.
+
+
originalTopic(String) - Method in interface org.apache.kafka.connect.mirror.ReplicationPolicy
+
+
Returns the name of the original topic, which may have been replicated multiple hops.
+
+
otherValueSerde() - Method in class org.apache.kafka.streams.kstream.Joined
+
+
Deprecated. +
Since 4.0 and should not be used any longer.
+
+
+
otherValueSerde(Serde<VRight>) - Static method in class org.apache.kafka.streams.kstream.Joined
+
+
Create an instance of Joined with another value Serde.
+
+
outerJoin(KStream<K, VRight>, ValueJoiner<? super V, ? super VRight, ? extends VOut>, JoinWindows) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Join records of this (left) stream with another (right) KStream's records using a windowed outer equi-join.
+
+
outerJoin(KStream<K, VRight>, ValueJoiner<? super V, ? super VRight, ? extends VOut>, JoinWindows, StreamJoined<K, V, VRight>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
outerJoin(KStream<K, VRight>, ValueJoinerWithKey<? super K, ? super V, ? super VRight, ? extends VOut>, JoinWindows) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
outerJoin(KStream<K, VRight>, ValueJoinerWithKey<? super K, ? super V, ? super VRight, ? extends VOut>, JoinWindows, StreamJoined<K, V, VRight>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
outerJoin(KTable<K, VO>, ValueJoiner<? super V, ? super VO, ? extends VR>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable (left input) with another KTable's (right input) records using + non-windowed outer equi join, with default serializers, deserializers, and state store.
+
+
outerJoin(KTable<K, VO>, ValueJoiner<? super V, ? super VO, ? extends VR>, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable (left input) with another KTable's (right input) records using + non-windowed outer equi join, with the Materialized instance for configuration of the key serde, + the result table's value serde, and state store.
+
+
outerJoin(KTable<K, VO>, ValueJoiner<? super V, ? super VO, ? extends VR>, Named) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable (left input) with another KTable's (right input) records using + non-windowed outer equi join, with default serializers, deserializers, and state store.
+
+
outerJoin(KTable<K, VO>, ValueJoiner<? super V, ? super VO, ? extends VR>, Named, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Join records of this KTable (left input) with another KTable's (right input) records using + non-windowed outer equi join, with the Materialized instance for configuration of the key serde, + the result table's value serde, and state store.
+
+
OutOfOrderSequenceException - Exception in org.apache.kafka.common.errors
+
+
This exception indicates that the broker received an unexpected sequence number from the producer, + which means that data may have been lost.
+
+
OutOfOrderSequenceException(String) - Constructor for exception org.apache.kafka.common.errors.OutOfOrderSequenceException
+
 
+
overlap(Window) - Method in class org.apache.kafka.streams.kstream.Window
+
+
Check if the given window overlaps with this window.
+
+
ownedPartitions() - Method in class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription
+
 
+
owner() - Method in class org.apache.kafka.clients.admin.CreateDelegationTokenOptions
+
 
+
owner() - Method in class org.apache.kafka.common.security.token.delegation.TokenInformation
+
 
+
owner(KafkaPrincipal) - Method in class org.apache.kafka.clients.admin.CreateDelegationTokenOptions
+
 
+
ownerAsString() - Method in class org.apache.kafka.common.security.token.delegation.TokenInformation
+
 
+
ownerOrRenewer(KafkaPrincipal) - Method in class org.apache.kafka.common.security.token.delegation.TokenInformation
+
 
+
owners() - Method in class org.apache.kafka.clients.admin.DescribeDelegationTokenOptions
+
 
+
owners(List<KafkaPrincipal>) - Method in class org.apache.kafka.clients.admin.DescribeDelegationTokenOptions
+
+
If owners is null, all the user owned tokens and tokens where user have Describe permission + will be returned.
+
+
+

P

+
+
pair(K, V) - Static method in class org.apache.kafka.streams.KeyValue
+
+
Create a new key-value pair.
+
+
parameter(String, String) - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
+
Set a schema parameter.
+
+
parameters() - Method in class org.apache.kafka.connect.data.ConnectSchema
+
 
+
parameters() - Method in interface org.apache.kafka.connect.data.Schema
+
+
Get a map of schema parameters.
+
+
parameters() - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
parameters(Map<String, String>) - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
+
Set schema parameters.
+
+
parse(String) - Static method in enum class org.apache.kafka.clients.admin.TransactionState
+
 
+
parse(String) - Static method in enum class org.apache.kafka.common.ClassicGroupState
+
+
Case-insensitive classic group state lookup by string name.
+
+
parse(String) - Static method in enum class org.apache.kafka.common.ConsumerGroupState
+
+
Deprecated.
+
Case-insensitive consumer group state lookup by string name.
+
+
parse(String) - Static method in enum class org.apache.kafka.common.GroupState
+
+
Case-insensitive group state lookup by string name.
+
+
parse(String) - Static method in enum class org.apache.kafka.common.GroupType
+
+
Parse a string into a consumer group type, in a case-insensitive manner.
+
+
parse(String) - Static method in class org.apache.kafka.streams.processor.TaskId
+
 
+
parse(Map<?, ?>) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Parse and validate configs against this configuration definition.
+
+
parseStoreType() - Method in class org.apache.kafka.streams.TopologyConfig
+
+
Deprecated.
+
+
parseString(String) - Static method in class org.apache.kafka.connect.data.Values
+
+
Parse the specified string representation of a value into its schema and value.
+
+
parseType(String, Object, ConfigDef.Type) - Static method in class org.apache.kafka.common.config.ConfigDef
+
+
Parse a value according to its expected type.
+
+
partition() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription.TaskOffset
+
+
The partition of the task.
+
+
partition() - Method in class org.apache.kafka.clients.consumer.ConsumerRecord
+
+
The partition from which this record is received
+
+
partition() - Method in class org.apache.kafka.clients.producer.ProducerRecord
+
 
+
partition() - Method in class org.apache.kafka.clients.producer.RecordMetadata
+
+
The partition the record was sent to
+
+
partition() - Method in class org.apache.kafka.common.PartitionInfo
+
+
The partition id
+
+
partition() - Method in class org.apache.kafka.common.TopicIdPartition
+
 
+
partition() - Method in class org.apache.kafka.common.TopicPartition
+
 
+
partition() - Method in class org.apache.kafka.common.TopicPartitionInfo
+
+
Return the partition id.
+
+
partition() - Method in class org.apache.kafka.common.TopicPartitionReplica
+
 
+
partition() - Method in interface org.apache.kafka.streams.errors.ErrorHandlerContext
+
+
Return the partition ID of the current input record; could be -1 if it is not + available.
+
+
partition() - Method in class org.apache.kafka.streams.KeyQueryMetadata
+
+
Get the store partition corresponding to the key.
+
+
partition() - Method in interface org.apache.kafka.streams.processor.api.RecordMetadata
+
+
Return the partition id of the current input record; could be -1 if it is not + available.
+
+
partition() - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
partition() - Method in interface org.apache.kafka.streams.processor.ProcessorContext
+
+
Return the partition id of the current input record; could be -1 if it is not + available.
+
+
partition() - Method in interface org.apache.kafka.streams.processor.RecordContext
+
+
Return the partition id of the current input record; could be -1 if it is not + available.
+
+
partition() - Method in class org.apache.kafka.streams.processor.TaskId
+
 
+
partition() - Method in class org.apache.kafka.streams.StoreQueryParameters
+
+
Get the store partition that will be queried.
+
+
partition(String, Object, byte[], Object, byte[], Cluster) - Method in interface org.apache.kafka.clients.producer.Partitioner
+
+
Compute the partition for the given record.
+
+
partition(String, Object, byte[], Object, byte[], Cluster) - Method in class org.apache.kafka.clients.producer.RoundRobinPartitioner
+
+
Compute the partition for the given record.
+
+
partition(TopicPartition) - Method in class org.apache.kafka.common.Cluster
+
+
Get the metadata for the specified partition
+
+
PARTITION_ASSIGNMENT_STRATEGY_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
partition.assignment.strategy
+
+
PARTITION_COUNT_CONFIG - Static variable in class org.apache.kafka.connect.tools.SchemaSourceTask
+
 
+
PARTITION_KEY - Static variable in class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
partitionAssignor() - Method in class org.apache.kafka.clients.admin.ConsumerGroupDescription
+
+
The consumer group partition assignor.
+
+
PartitionAssignor - Interface in org.apache.kafka.coordinator.group.api.assignor
+
+
Server-side partition assignor used by the GroupCoordinator.
+
+
PartitionAssignorException - Exception in org.apache.kafka.coordinator.group.api.assignor
+
+ +
+
PartitionAssignorException(String) - Constructor for exception org.apache.kafka.coordinator.group.api.assignor.PartitionAssignorException
+
 
+
PartitionAssignorException(String, Throwable) - Constructor for exception org.apache.kafka.coordinator.group.api.assignor.PartitionAssignorException
+
 
+
partitionCountForTopic(String) - Method in class org.apache.kafka.common.Cluster
+
+
Get the number of partitions for the given topic.
+
+
Partitioner - Interface in org.apache.kafka.clients.producer
+
+
Partitioner Interface +
+ Implement Monitorable to enable the partitioner to register metrics.
+
+
PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
partitioner.adaptive.partitioning.enable
+
+
PARTITIONER_AVAILABILITY_TIMEOUT_MS_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
partitioner.availability.timeout.ms
+
+
PARTITIONER_CLASS_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
partitioner.class
+
+
PARTITIONER_IGNORE_KEYS_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
partitioner.ignore.keys
+
+
PartitionInfo - Class in org.apache.kafka.common
+
+
This is used to describe per-partition state in the MetadataResponse.
+
+
PartitionInfo(String, int, Node, Node[], Node[]) - Constructor for class org.apache.kafka.common.PartitionInfo
+
 
+
PartitionInfo(String, int, Node, Node[], Node[], Node[]) - Constructor for class org.apache.kafka.common.PartitionInfo
+
 
+
PartitionProducerState(List<ProducerState>) - Constructor for class org.apache.kafka.clients.admin.DescribeProducersResult.PartitionProducerState
+
 
+
PartitionReassignment - Class in org.apache.kafka.clients.admin
+
+
A partition reassignment, which has been listed via Admin.listPartitionReassignments().
+
+
PartitionReassignment(List<Integer>, List<Integer>, List<Integer>) - Constructor for class org.apache.kafka.clients.admin.PartitionReassignment
+
 
+
partitionResult(TopicPartition) - Method in class org.apache.kafka.clients.admin.AlterConsumerGroupOffsetsResult
+
+
Return a future which can be used to check the result for a given partition.
+
+
partitionResult(TopicPartition) - Method in class org.apache.kafka.clients.admin.AlterShareGroupOffsetsResult
+
+
Return a future which can be used to check the result for a given partition.
+
+
partitionResult(TopicPartition) - Method in class org.apache.kafka.clients.admin.AlterStreamsGroupOffsetsResult
+
+
Return a future which can be used to check the result for a given partition.
+
+
partitionResult(TopicPartition) - Method in class org.apache.kafka.clients.admin.DeleteConsumerGroupOffsetsResult
+
+
Return a future which can be used to check the result for a given partition.
+
+
partitionResult(TopicPartition) - Method in class org.apache.kafka.clients.admin.DeleteStreamsGroupOffsetsResult
+
+
Return a future which can be used to check the result for a given topic.
+
+
partitionResult(TopicPartition) - Method in class org.apache.kafka.clients.admin.DescribeProducersResult
+
 
+
partitionResult(TopicPartition) - Method in class org.apache.kafka.clients.admin.ListOffsetsResult
+
+
Return a future which can be used to check the result for a given partition.
+
+
partitions() - Method in class org.apache.kafka.clients.admin.ElectLeadersResult
+
+
Get a future for the topic partitions for which a leader election was attempted.
+
+
partitions() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberAssignment.TaskIds
+
+
The partitions of the subtopology processed by this member.
+
+
partitions() - Method in class org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription.TopicInfo
+
+
The number of partitions in the topic.
+
+
partitions() - Method in class org.apache.kafka.clients.admin.TopicDescription
+
+
A list of partitions where the index represents the partition id and the element contains leadership and replica + information for that partition.
+
+
partitions() - Method in class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment
+
 
+
partitions() - Method in class org.apache.kafka.clients.consumer.ConsumerRecords
+
+
Get the partitions which have records contained in this record set.
+
+
partitions() - Method in exception org.apache.kafka.clients.consumer.InvalidOffsetException
+
 
+
partitions() - Method in exception org.apache.kafka.clients.consumer.NoOffsetForPartitionException
+
+
returns all partitions for which no offsets are defined.
+
+
partitions() - Method in exception org.apache.kafka.clients.consumer.OffsetOutOfRangeException
+
 
+
partitions() - Method in interface org.apache.kafka.coordinator.group.api.assignor.MemberAssignment
+
 
+
partitions(String, K, V, int) - Method in interface org.apache.kafka.streams.processor.StreamPartitioner
+
+
Determine the number(s) of the partition(s) to which a record with the given key and value should be sent, + for the given topic and current partition count
+
+
partitionsFor(String) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
partitionsFor(String) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Get metadata about the partitions for a given topic.
+
+
partitionsFor(String) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
partitionsFor(String) - Method in class org.apache.kafka.clients.producer.KafkaProducer
+
+
Get the partition metadata for the given topic.
+
+
partitionsFor(String) - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
partitionsFor(String) - Method in interface org.apache.kafka.clients.producer.Producer
+
+ +
+
partitionsFor(String, Duration) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
partitionsFor(String, Duration) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Get metadata about the partitions for a given topic.
+
+
partitionsFor(String, Duration) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
partitionsForException - Variable in class org.apache.kafka.clients.producer.MockProducer
+
 
+
partitionsForNode(int) - Method in class org.apache.kafka.common.Cluster
+
+
Get the list of partitions whose leader is this node
+
+
partitionsForTopic(String) - Method in class org.apache.kafka.common.Cluster
+
+
Get the list of partitions for this topic
+
+
partitionSizeLimitPerResponse() - Method in class org.apache.kafka.clients.admin.DescribeTopicsOptions
+
 
+
partitionSizeLimitPerResponse(int) - Method in class org.apache.kafka.clients.admin.DescribeTopicsOptions
+
+
Sets the maximum number of partitions to be returned in a single response.
+
+
partitionsToOffsetAndMetadata() - Method in class org.apache.kafka.clients.admin.ListConsumerGroupOffsetsResult
+
+
Return a future which yields a map of topic partitions to OffsetAndMetadata objects.
+
+
partitionsToOffsetAndMetadata(String) - Method in class org.apache.kafka.clients.admin.ListConsumerGroupOffsetsResult
+
+
Return a future which yields a map of topic partitions to OffsetAndMetadata objects for + the specified group.
+
+
partitionsToOffsetAndMetadata(String) - Method in class org.apache.kafka.clients.admin.ListShareGroupOffsetsResult
+
+
Return a future which yields a map of topic partitions to offsets for the specified group.
+
+
partitionsToOffsetAndMetadata(String) - Method in class org.apache.kafka.clients.admin.ListStreamsGroupOffsetsResult
+
+
Return a future which yields a map of topic partitions to offsets for the specified group.
+
+
password() - Method in class org.apache.kafka.clients.admin.UserScramCredentialUpsertion
+
 
+
password() - Method in class org.apache.kafka.common.security.plain.PlainAuthenticateCallback
+
+
Returns the password provided by the client during SASL/PLAIN authentication
+
+
PASSWORD - Enum constant in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigType
+
 
+
PASSWORD - Enum constant in enum class org.apache.kafka.common.config.ConfigDef.Type
+
+
Used for string values containing sensitive data such as a password or key.
+
+
pattern() - Method in class org.apache.kafka.clients.consumer.SubscriptionPattern
+
 
+
pattern() - Method in class org.apache.kafka.common.acl.AclBinding
+
 
+
patternFilter() - Method in class org.apache.kafka.common.acl.AclBindingFilter
+
 
+
patternType() - Method in class org.apache.kafka.common.resource.ResourcePattern
+
 
+
patternType() - Method in class org.apache.kafka.common.resource.ResourcePatternFilter
+
 
+
PatternType - Enum Class in org.apache.kafka.common.resource
+
+
Resource pattern type.
+
+
pause() - Method in class org.apache.kafka.streams.KafkaStreams
+
+
This method pauses processing for the KafkaStreams instance.
+
+
pause(Collection<TopicPartition>) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
pause(Collection<TopicPartition>) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Suspend fetching from the requested partitions.
+
+
pause(Collection<TopicPartition>) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
pause(TopicPartition...) - Method in interface org.apache.kafka.connect.sink.SinkTaskContext
+
+
Pause consumption of messages from the specified TopicPartitions.
+
+
paused() - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
paused() - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Get the set of partitions that were previously paused by a call to KafkaConsumer.pause(Collection).
+
+
paused() - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
peek(ForeachAction<? super K, ? super V>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Perform an action on each record of this KStream.
+
+
peek(ForeachAction<? super K, ? super V>, Named) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
peekNextKey() - Method in interface org.apache.kafka.streams.state.KeyValueIterator
+
+
Peek at the next key without advancing the iterator
+
+
PENDING_ERROR - Enum constant in enum class org.apache.kafka.streams.KafkaStreams.State
+
 
+
PENDING_SHUTDOWN - Enum constant in enum class org.apache.kafka.streams.KafkaStreams.State
+
 
+
percentile() - Method in class org.apache.kafka.common.metrics.stats.Percentile
+
 
+
Percentile - Class in org.apache.kafka.common.metrics.stats
+
 
+
Percentile(MetricName, double) - Constructor for class org.apache.kafka.common.metrics.stats.Percentile
+
 
+
Percentiles - Class in org.apache.kafka.common.metrics.stats
+
+
A compound stat that reports one or more percentiles
+
+
Percentiles(int, double, double, Percentiles.BucketSizing, Percentile...) - Constructor for class org.apache.kafka.common.metrics.stats.Percentiles
+
 
+
Percentiles(int, double, Percentiles.BucketSizing, Percentile...) - Constructor for class org.apache.kafka.common.metrics.stats.Percentiles
+
 
+
Percentiles.BucketSizing - Enum Class in org.apache.kafka.common.metrics.stats
+
 
+
permissionType() - Method in class org.apache.kafka.common.acl.AccessControlEntry
+
+
Return the AclPermissionType.
+
+
permissionType() - Method in class org.apache.kafka.common.acl.AccessControlEntryFilter
+
+
Return the AclPermissionType.
+
+
persistent() - Method in interface org.apache.kafka.streams.processor.StateStore
+
+
Return if the storage is persistent or not.
+
+
persistentKeyValueStore(String) - Static method in class org.apache.kafka.streams.state.Stores
+
+
Create a persistent KeyValueBytesStoreSupplier.
+
+
persistentSessionStore(String, Duration) - Static method in class org.apache.kafka.streams.state.Stores
+
+
Create a persistent SessionBytesStoreSupplier.
+
+
persistentTimestampedKeyValueStore(String) - Static method in class org.apache.kafka.streams.state.Stores
+
+
Create a persistent KeyValueBytesStoreSupplier.
+
+
persistentTimestampedWindowStore(String, Duration, Duration, boolean) - Static method in class org.apache.kafka.streams.state.Stores
+
+
Create a persistent WindowBytesStoreSupplier.
+
+
persistentVersionedKeyValueStore(String, Duration) - Static method in class org.apache.kafka.streams.state.Stores
+
+
Create a persistent versioned key-value store VersionedBytesStoreSupplier.
+
+
persistentVersionedKeyValueStore(String, Duration, Duration) - Static method in class org.apache.kafka.streams.state.Stores
+
+
Create a persistent versioned key-value store VersionedBytesStoreSupplier.
+
+
persistentWindowStore(String, Duration, Duration, boolean) - Static method in class org.apache.kafka.streams.state.Stores
+
+
Create a persistent WindowBytesStoreSupplier.
+
+
pipeInput(K, V) - Method in class org.apache.kafka.streams.TestInputTopic
+
+
Send an input record with the given key and value on the topic and then commit the records.
+
+
pipeInput(K, V, long) - Method in class org.apache.kafka.streams.TestInputTopic
+
+
Send an input record with the given key, value and timestamp on the topic and then commit the records.
+
+
pipeInput(K, V, Instant) - Method in class org.apache.kafka.streams.TestInputTopic
+
+
Send an input record with the given key, value and timestamp on the topic and then commit the records.
+
+
pipeInput(TestRecord<K, V>) - Method in class org.apache.kafka.streams.TestInputTopic
+
+
Send an input record with the given record on the topic and then commit the records.
+
+
pipeInput(V) - Method in class org.apache.kafka.streams.TestInputTopic
+
+
Send an input record with the given value on the topic and then commit the records.
+
+
pipeInput(V, Instant) - Method in class org.apache.kafka.streams.TestInputTopic
+
+
Send an input record with the given value and timestamp on the topic and then commit the records.
+
+
pipeKeyValueList(List<KeyValue<K, V>>) - Method in class org.apache.kafka.streams.TestInputTopic
+
+
Send input records with the given KeyValue list on the topic then commit each record individually.
+
+
pipeKeyValueList(List<KeyValue<K, V>>, Instant, Duration) - Method in class org.apache.kafka.streams.TestInputTopic
+
+
Send input records with the given KeyValue list on the topic then commit each record individually.
+
+
pipeRecordList(List<? extends TestRecord<K, V>>) - Method in class org.apache.kafka.streams.TestInputTopic
+
+
Send input records with the given KeyValue list on the topic then commit each record individually.
+
+
pipeValueList(List<V>) - Method in class org.apache.kafka.streams.TestInputTopic
+
+
Send input records with the given value list on the topic then commit each record individually.
+
+
pipeValueList(List<V>, Instant, Duration) - Method in class org.apache.kafka.streams.TestInputTopic
+
+
Send input records with the given value list on the topic then commit each record individually.
+
+
PlainAuthenticateCallback - Class in org.apache.kafka.common.security.plain
+
 
+
PlainAuthenticateCallback(char[]) - Constructor for class org.apache.kafka.common.security.plain.PlainAuthenticateCallback
+
+
Creates a callback with the password provided by the client
+
+
PlainLoginModule - Class in org.apache.kafka.common.security.plain
+
 
+
PlainLoginModule() - Constructor for class org.apache.kafka.common.security.plain.PlainLoginModule
+
 
+
PLAINTEXT - Enum constant in enum class org.apache.kafka.common.security.auth.SecurityProtocol
+
+
Un-authenticated, non-encrypted channel
+
+
PlaintextAuthenticationContext - Class in org.apache.kafka.common.security.auth
+
 
+
PlaintextAuthenticationContext(InetAddress, String) - Constructor for class org.apache.kafka.common.security.auth.PlaintextAuthenticationContext
+
 
+
pluginMetrics() - Method in interface org.apache.kafka.connect.connector.ConnectorContext
+
+
Get a PluginMetrics that can be used to define metrics
+
+
pluginMetrics() - Method in interface org.apache.kafka.connect.sink.SinkTaskContext
+
+
Get a PluginMetrics that can be used to define metrics
+
+
pluginMetrics() - Method in interface org.apache.kafka.connect.source.SourceTaskContext
+
+
Get a PluginMetrics that can be used to define metrics
+
+
PluginMetrics - Interface in org.apache.kafka.common.metrics
+
+
This allows plugins to register metrics and sensors.
+
+
PolicyViolationException - Exception in org.apache.kafka.common.errors
+
+
Exception thrown if a create topics request does not satisfy the configured policy for a topic.
+
+
PolicyViolationException(String) - Constructor for exception org.apache.kafka.common.errors.PolicyViolationException
+
 
+
PolicyViolationException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.PolicyViolationException
+
 
+
poll() - Method in class org.apache.kafka.connect.source.SourceTask
+
+
Poll this source task for new records.
+
+
poll() - Method in class org.apache.kafka.connect.tools.MockSourceTask
+
 
+
poll() - Method in class org.apache.kafka.connect.tools.SchemaSourceTask
+
 
+
poll() - Method in class org.apache.kafka.connect.tools.VerifiableSourceTask
+
 
+
poll(Duration) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
poll(Duration) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Fetch data for the topics or partitions specified using one of the subscribe/assign APIs.
+
+
poll(Duration) - Method in class org.apache.kafka.clients.consumer.KafkaShareConsumer
+
+
Deliver records for the topics specified using KafkaShareConsumer.subscribe(Collection).
+
+
poll(Duration) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
poll(Duration) - Method in class org.apache.kafka.clients.consumer.MockShareConsumer
+
 
+
poll(Duration) - Method in interface org.apache.kafka.clients.consumer.ShareConsumer
+
 
+
POLL - Enum constant in enum class org.apache.kafka.connect.source.SourceTask.TransactionBoundary
+
+
A new transaction will be started and committed for every batch of records returned by SourceTask.poll().
+
+
POLL_MS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
poll.ms
+
+
port() - Method in class org.apache.kafka.clients.admin.RaftVoterEndpoint
+
 
+
port() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription.Endpoint
+
 
+
port() - Method in class org.apache.kafka.common.Endpoint
+
+
Returns the port to which the listener is bound.
+
+
port() - Method in class org.apache.kafka.common.Node
+
+
The port for this node
+
+
port() - Method in class org.apache.kafka.streams.state.HostInfo
+
 
+
port() - Method in interface org.apache.kafka.streams.StreamsMetadata
+
+
Port on which the Streams client listens.
+
+
position() - Method in class org.apache.kafka.streams.query.PositionBound
+
+
Returns the specific position of this bound.
+
+
position(TopicPartition) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
position(TopicPartition) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Get the offset of the next record that will be fetched (if a record with that offset exists).
+
+
position(TopicPartition) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
position(TopicPartition, Duration) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
position(TopicPartition, Duration) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Get the offset of the next record that will be fetched (if a record with that offset exists).
+
+
position(TopicPartition, Duration) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
Position - Class in org.apache.kafka.streams.query
+
+
A representation of a position vector with respect to a set of topic partitions.
+
+
PositionBound - Class in org.apache.kafka.streams.query
+
+
A class bounding the processing state Position during queries.
+
+
PositionOutOfRangeException - Exception in org.apache.kafka.common.errors
+
 
+
PositionOutOfRangeException(String) - Constructor for exception org.apache.kafka.common.errors.PositionOutOfRangeException
+
 
+
PositionOutOfRangeException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.PositionOutOfRangeException
+
 
+
PREALLOCATE_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
PREALLOCATE_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
preCommit(Map<TopicPartition, OffsetAndMetadata>) - Method in class org.apache.kafka.connect.sink.SinkTask
+
+
Pre-commit hook invoked prior to an offset commit.
+
+
predecessors() - Method in interface org.apache.kafka.streams.TopologyDescription.Node
+
+
The predecessors of this node within a sub-topology.
+
+
Predicate<R extends ConnectRecord<R>> - Interface in org.apache.kafka.connect.transforms.predicates
+
+
A predicate on records.
+
+
Predicate<K,V> - Interface in org.apache.kafka.streams.kstream
+
+
The Predicate interface represents a predicate (boolean-valued function) of a KeyValue pair.
+
+
PREFERRED - Enum constant in enum class org.apache.kafka.common.ElectionType
+
 
+
PreferredLeaderNotAvailableException - Exception in org.apache.kafka.common.errors
+
 
+
PreferredLeaderNotAvailableException(String) - Constructor for exception org.apache.kafka.common.errors.PreferredLeaderNotAvailableException
+
 
+
PreferredLeaderNotAvailableException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.PreferredLeaderNotAvailableException
+
 
+
PREFIXED - Enum constant in enum class org.apache.kafka.common.resource.PatternType
+
+
A prefixed resource name.
+
+
prefixScan(P, PS) - Method in interface org.apache.kafka.streams.state.ReadOnlyKeyValueStore
+
+
Return an iterator over all keys with the specified prefix.
+
+
PREPARE_ABORT - Enum constant in enum class org.apache.kafka.clients.admin.TransactionState
+
 
+
PREPARE_COMMIT - Enum constant in enum class org.apache.kafka.clients.admin.TransactionState
+
 
+
PREPARE_EPOCH_FENCE - Enum constant in enum class org.apache.kafka.clients.admin.TransactionState
+
 
+
PreparedTxnState - Class in org.apache.kafka.clients.producer
+
+
Class containing the state of a transaction after it has been prepared for a two-phase commit.
+
+
PreparedTxnState() - Constructor for class org.apache.kafka.clients.producer.PreparedTxnState
+
+
Creates a new empty PreparedTxnState
+
+
PreparedTxnState(String) - Constructor for class org.apache.kafka.clients.producer.PreparedTxnState
+
+
Creates a new PreparedTxnState from a serialized string representation
+
+
PREPARING_REBALANCE - Enum constant in enum class org.apache.kafka.common.ClassicGroupState
+
 
+
PREPARING_REBALANCE - Enum constant in enum class org.apache.kafka.common.ConsumerGroupState
+
+
Deprecated.
+
PREPARING_REBALANCE - Enum constant in enum class org.apache.kafka.common.GroupState
+
 
+
previousActiveTasks() - Method in interface org.apache.kafka.streams.processor.assignment.KafkaStreamsState
+
 
+
previousStandbyTasks() - Method in interface org.apache.kafka.streams.processor.assignment.KafkaStreamsState
+
 
+
prevTasksByLag(String) - Method in interface org.apache.kafka.streams.processor.assignment.KafkaStreamsState
+
 
+
principal() - Method in class org.apache.kafka.common.acl.AccessControlEntry
+
+
Return the principal for this entry.
+
+
principal() - Method in class org.apache.kafka.common.acl.AccessControlEntryFilter
+
+
Return the principal or null.
+
+
principal() - Method in interface org.apache.kafka.server.authorizer.AuthorizableRequestContext
+
+
Returns authenticated principal for the connection on which request was received.
+
+
PrincipalDeserializationException - Exception in org.apache.kafka.common.errors
+
+
Exception used to indicate a kafka principal deserialization failure during request forwarding.
+
+
PrincipalDeserializationException(String) - Constructor for exception org.apache.kafka.common.errors.PrincipalDeserializationException
+
 
+
PrincipalDeserializationException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.PrincipalDeserializationException
+
 
+
principalName() - Method in interface org.apache.kafka.common.security.oauthbearer.OAuthBearerToken
+
+
The name of the principal to which this credential applies
+
+
print(Printed<K, V>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Print the records of this KStream using the options provided by Printed.
+
+
Printed<K,V> - Class in org.apache.kafka.streams.kstream
+
+
An object to define the options used when printing a KStream.
+
+
PROBING_REBALANCE_INTERVAL_MS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
probing.rebalance.interval.ms
+
+
probingRebalanceIntervalMs() - Method in class org.apache.kafka.streams.processor.assignment.AssignmentConfigs
+
+
The probing rebalance interval in milliseconds as configured via + StreamsConfig.PROBING_REBALANCE_INTERVAL_MS_CONFIG
+
+
process(FixedKeyRecord<KIn, VIn>) - Method in interface org.apache.kafka.streams.processor.api.FixedKeyProcessor
+
+
Process the record.
+
+
process(ProcessorSupplier<? super K, ? super V, ? extends KOut, ? extends VOut>, String...) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Process all records in this stream, one record at a time, by applying a Processor (provided by the given + ProcessorSupplier) to each input record.
+
+
process(ProcessorSupplier<? super K, ? super V, ? extends KOut, ? extends VOut>, Named, String...) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
process(Record<K, V>) - Method in class org.apache.kafka.streams.kstream.ForeachProcessor
+
+
Deprecated.
+
process(Record<KIn, VIn>) - Method in interface org.apache.kafka.streams.processor.api.Processor
+
+
Process the record.
+
+
processId() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription
+
+
Identity of the streams instance that may have multiple clients.
+
+
processId() - Method in class org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment
+
 
+
processId() - Method in interface org.apache.kafka.streams.processor.assignment.KafkaStreamsState
+
 
+
ProcessId - Class in org.apache.kafka.streams.processor.assignment
+
+
A simple wrapper around UUID that abstracts a Process ID
+
+
ProcessId(UUID) - Constructor for class org.apache.kafka.streams.processor.assignment.ProcessId
+
 
+
PROCESSING_EXCEPTION_HANDLER_CLASS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
processing.exception.handler
+
+
PROCESSING_EXCEPTION_HANDLER_CLASS_DOC - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated.
+
+
PROCESSING_GUARANTEE_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
processing.guarantee
+
+
PROCESSING_THREADS_ENABLED - Static variable in class org.apache.kafka.streams.StreamsConfig.InternalConfig
+
 
+
ProcessingContext - Interface in org.apache.kafka.streams.processor.api
+
+
Processor context interface.
+
+
processingExceptionHandler - Variable in class org.apache.kafka.streams.TopologyConfig.TaskConfig
+
 
+
processingExceptionHandler() - Method in class org.apache.kafka.streams.StreamsConfig
+
 
+
ProcessingExceptionHandler - Interface in org.apache.kafka.streams.errors
+
+
An interface that allows user code to inspect a record that has failed processing
+
+
ProcessingExceptionHandler.ProcessingHandlerResponse - Enum Class in org.apache.kafka.streams.errors
+
 
+
processingExceptionHandlerSupplier - Variable in class org.apache.kafka.streams.TopologyConfig
+
 
+
processingThreadsEnabled(Map<String, Object>) - Static method in class org.apache.kafka.streams.StreamsConfig.InternalConfig
+
 
+
processor() - Method in interface org.apache.kafka.streams.TopologyDescription.GlobalStore
+
+
The processor node maintaining the global store.
+
+
Processor<KIn,VIn,KOut,VOut> - Interface in org.apache.kafka.streams.processor.api
+
+
A processor of key-value pair records.
+
+
PROCESSOR_WRAPPER_CLASS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
processor.wrapper.class
+
+
ProcessorContext<KForward,VForward> - Interface in org.apache.kafka.streams.processor.api
+
+
Processor context interface for Record.
+
+
ProcessorContext - Interface in org.apache.kafka.streams.processor
+
+
Processor context interface.
+
+
processorNodeId() - Method in interface org.apache.kafka.streams.errors.ErrorHandlerContext
+
+
Return the current processor node ID.
+
+
ProcessorStateException - Exception in org.apache.kafka.streams.errors
+
+
Indicates a processor state operation (e.g.
+
+
ProcessorStateException(String) - Constructor for exception org.apache.kafka.streams.errors.ProcessorStateException
+
 
+
ProcessorStateException(String, Throwable) - Constructor for exception org.apache.kafka.streams.errors.ProcessorStateException
+
 
+
ProcessorStateException(Throwable) - Constructor for exception org.apache.kafka.streams.errors.ProcessorStateException
+
 
+
ProcessorSupplier<KIn,VIn,KOut,VOut> - Interface in org.apache.kafka.streams.processor.api
+
+
A processor supplier that can create one or more Processor instances.
+
+
ProcessorWrapper - Interface in org.apache.kafka.streams.processor.api
+
+
Wrapper class that can be used to inject custom wrappers around the processors of their application topology.
+
+
processValues(FixedKeyProcessorSupplier<? super K, ? super V, ? extends VOut>, String...) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Process all records in this stream, one record at a time, by applying a FixedKeyProcessor (provided by + the given FixedKeyProcessorSupplier) to each input record.
+
+
processValues(FixedKeyProcessorSupplier<? super K, ? super V, ? extends VOut>, Named, String...) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
PRODUCE - Enum constant in enum class org.apache.kafka.server.quota.ClientQuotaType
+
 
+
Produced<K,V> - Class in org.apache.kafka.streams.kstream
+
+
This class is used to provide the optional parameters when producing to new topics + using KStream.to(String, Produced).
+
+
producedTopicNames() - Method in class org.apache.kafka.streams.TopologyTestDriver
+
+
Get all the names of all the topics to which records have been produced during the test run.
+
+
Producer<K,V> - Interface in org.apache.kafka.clients.producer
+
+
The interface for the KafkaProducer
+
+
PRODUCER - Enum constant in enum class org.apache.kafka.connect.connector.policy.ConnectorClientConfigRequest.ClientType
+
 
+
PRODUCER_CLIENT_PREFIX - Static variable in class org.apache.kafka.connect.mirror.MirrorClientConfig
+
 
+
PRODUCER_METRIC_GROUP_NAME - Static variable in class org.apache.kafka.clients.producer.KafkaProducer
+
 
+
PRODUCER_PREFIX - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Prefix used to isolate producer configs from other client configs.
+
+
PRODUCER_SNAPSHOT - Enum constant in enum class org.apache.kafka.server.log.remote.storage.RemoteStorageManager.IndexType
+
+
Represents producer snapshot index.
+
+
producerClientIds() - Method in interface org.apache.kafka.streams.ThreadMetadata
+
+
Client IDs of the Kafka producers used by the stream thread.
+
+
producerConfig() - Method in class org.apache.kafka.connect.mirror.MirrorClientConfig
+
+
Sub-config for Producer clients.
+
+
ProducerConfig - Class in org.apache.kafka.clients.producer
+
+
Configuration for the Kafka Producer.
+
+
ProducerConfig(Map<String, Object>) - Constructor for class org.apache.kafka.clients.producer.ProducerConfig
+
 
+
ProducerConfig(Properties) - Constructor for class org.apache.kafka.clients.producer.ProducerConfig
+
 
+
producerEpoch() - Method in class org.apache.kafka.clients.admin.AbortTransactionSpec
+
 
+
producerEpoch() - Method in class org.apache.kafka.clients.admin.ProducerState
+
 
+
producerEpoch() - Method in class org.apache.kafka.clients.admin.TransactionDescription
+
 
+
ProducerFencedException - Exception in org.apache.kafka.common.errors
+
+
This fatal exception indicates that another producer with the same transactional.id has been + started.
+
+
ProducerFencedException(String) - Constructor for exception org.apache.kafka.common.errors.ProducerFencedException
+
 
+
producerId() - Method in class org.apache.kafka.clients.admin.AbortTransactionSpec
+
 
+
producerId() - Method in class org.apache.kafka.clients.admin.ProducerState
+
 
+
producerId() - Method in class org.apache.kafka.clients.admin.TransactionDescription
+
 
+
producerId() - Method in class org.apache.kafka.clients.admin.TransactionListing
+
 
+
producerId() - Method in class org.apache.kafka.clients.producer.PreparedTxnState
+
 
+
producerId(String) - Method in class org.apache.kafka.clients.admin.FenceProducersResult
+
+
Returns a future that provides the producer ID generated while initializing the given transaction when the request completes.
+
+
producerInstanceIds() - Method in interface org.apache.kafka.streams.ClientInstanceIds
+
+
Returns the client instance id of the producers.
+
+
ProducerInterceptor<K,V> - Interface in org.apache.kafka.clients.producer
+
+
A plugin interface that allows you to intercept (and possibly mutate) the records received by the producer before + they are published to the Kafka cluster.
+
+
producerPrefix(String) - Static method in class org.apache.kafka.streams.StreamsConfig
+
+
Prefix a property with StreamsConfig.PRODUCER_PREFIX.
+
+
ProducerRecord<K,V> - Class in org.apache.kafka.clients.producer
+
+
A key/value pair to be sent to Kafka.
+
+
ProducerRecord(String, Integer, Long, K, V) - Constructor for class org.apache.kafka.clients.producer.ProducerRecord
+
+
Creates a record with a specified timestamp to be sent to a specified topic and partition
+
+
ProducerRecord(String, Integer, Long, K, V, Iterable<Header>) - Constructor for class org.apache.kafka.clients.producer.ProducerRecord
+
+
Creates a record with a specified timestamp to be sent to a specified topic and partition
+
+
ProducerRecord(String, Integer, K, V) - Constructor for class org.apache.kafka.clients.producer.ProducerRecord
+
+
Creates a record to be sent to a specified topic and partition
+
+
ProducerRecord(String, Integer, K, V, Iterable<Header>) - Constructor for class org.apache.kafka.clients.producer.ProducerRecord
+
+
Creates a record to be sent to a specified topic and partition
+
+
ProducerRecord(String, K, V) - Constructor for class org.apache.kafka.clients.producer.ProducerRecord
+
+
Create a record to be sent to Kafka
+
+
ProducerRecord(String, V) - Constructor for class org.apache.kafka.clients.producer.ProducerRecord
+
+
Create a record with no key
+
+
producerSnapshotIndex() - Method in class org.apache.kafka.server.log.remote.storage.LogSegmentData
+
 
+
ProducerState - Class in org.apache.kafka.clients.admin
+
 
+
ProducerState(long, int, int, long, OptionalInt, OptionalLong) - Constructor for class org.apache.kafka.clients.admin.ProducerState
+
 
+
PRODUCTION_EXCEPTION_HANDLER_CLASS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
production.exception.handler
+
+
productionExceptionHandler() - Method in class org.apache.kafka.streams.StreamsConfig
+
 
+
ProductionExceptionHandler - Interface in org.apache.kafka.streams.errors
+
+
Interface that specifies how an exception when attempting to produce a result to + Kafka should be handled.
+
+
ProductionExceptionHandler.ProductionExceptionHandlerResponse - Enum Class in org.apache.kafka.streams.errors
+
 
+
ProductionExceptionHandler.SerializationExceptionOrigin - Enum Class in org.apache.kafka.streams.errors
+
 
+
project(Schema, Object, Schema) - Static method in class org.apache.kafka.connect.data.SchemaProjector
+
+
This method projects a value between compatible schemas and throws exceptions when non-compatible schemas are provided
+
+
PROMOTED - Enum constant in enum class org.apache.kafka.streams.processor.StandbyUpdateListener.SuspendReason
+
 
+
protocol() - Method in class org.apache.kafka.clients.admin.ClassicGroupDescription
+
+
The group protocol type.
+
+
protocol() - Method in class org.apache.kafka.clients.admin.GroupListing
+
+
The protocol of the group.
+
+
protocolData() - Method in class org.apache.kafka.clients.admin.ClassicGroupDescription
+
+
The group protocol data.
+
+
protocolTypes() - Method in class org.apache.kafka.clients.admin.ListGroupsOptions
+
+
Returns the list of protocol types that are requested or empty if no protocol types have been specified.
+
+
punctuate(long) - Method in interface org.apache.kafka.streams.processor.Punctuator
+
+
Perform the scheduled periodic operation.
+
+
PunctuationType - Enum Class in org.apache.kafka.streams.processor
+
+
Controls what notion of time is used for punctuation scheduled via + schedule: + + STREAM_TIME - uses "stream time", which is advanced by the processing of messages + in accordance with the timestamp as extracted by the TimestampExtractor in use.
+
+
Punctuator - Interface in org.apache.kafka.streams.processor
+
+
A functional interface used as an argument to + ProcessingContext.schedule(Duration, PunctuationType, Punctuator).
+
+
put(String, Object) - Method in class org.apache.kafka.connect.data.Struct
+
+
Set the value of a field.
+
+
put(Collection<SinkRecord>) - Method in class org.apache.kafka.connect.sink.SinkTask
+
+
Put the records in the sink.
+
+
put(Collection<SinkRecord>) - Method in class org.apache.kafka.connect.tools.MockSinkTask
+
 
+
put(Collection<SinkRecord>) - Method in class org.apache.kafka.connect.tools.VerifiableSinkTask
+
 
+
put(K, V) - Method in interface org.apache.kafka.streams.state.KeyValueStore
+
+
Update the value associated with this key.
+
+
put(K, V, long) - Method in interface org.apache.kafka.streams.state.VersionedKeyValueStore
+
+
Add a new record version associated with the specified key and timestamp.
+
+
put(K, V, long) - Method in interface org.apache.kafka.streams.state.WindowStore
+
+
Put a key-value pair into the window with given window start timestamp
+
+
put(Bytes, byte[], long) - Method in interface org.apache.kafka.streams.state.VersionedBytesStore
+
+ +
+
put(Field, Object) - Method in class org.apache.kafka.connect.data.Struct
+
+
Set the value of a field.
+
+
put(Windowed<K>, AGG) - Method in interface org.apache.kafka.streams.state.SessionStore
+
+
Write the aggregated value for the provided key to the store
+
+
PUT_RETURN_CODE_NOT_PUT - Static variable in interface org.apache.kafka.streams.state.VersionedKeyValueStore
+
 
+
PUT_RETURN_CODE_VALID_TO_UNDEFINED - Static variable in interface org.apache.kafka.streams.state.VersionedKeyValueStore
+
 
+
putAll(List<KeyValue<K, V>>) - Method in interface org.apache.kafka.streams.state.KeyValueStore
+
+
Update all the given key/value pairs.
+
+
putIfAbsent(K, V) - Method in interface org.apache.kafka.streams.state.KeyValueStore
+
+
Update the value associated with this key, unless a value is already associated with the key.
+
+
putRemotePartitionDeleteMetadata(RemotePartitionDeleteMetadata) - Method in interface org.apache.kafka.server.log.remote.storage.RemoteLogMetadataManager
+
+
This method is used to update the metadata about remote partition delete event asynchronously.
+
+
+

Q

+
+
query(Query<R>, PositionBound, QueryConfig) - Method in interface org.apache.kafka.streams.processor.StateStore
+
+
Execute a query.
+
+
query(StateQueryRequest<R>) - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Run an interactive query against a state store.
+
+
Query<R> - Interface in org.apache.kafka.streams.query
+
+
Marker interface that all interactive queries must implement (see KafkaStreams.query(StateQueryRequest)).
+
+
queryableStoreName() - Method in interface org.apache.kafka.streams.kstream.GlobalKTable
+
+
Get the name of the local state store that can be used to query this GlobalKTable.
+
+
queryableStoreName() - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Get the name of the local state store used that can be used to query this KTable.
+
+
queryableStoreType() - Method in class org.apache.kafka.streams.StoreQueryParameters
+
+
Get the queryable store type for which key is queried by the user.
+
+
QueryableStoreType<T> - Interface in org.apache.kafka.streams.state
+
+
Used to enable querying of custom StateStore types via the KafkaStreams API.
+
+
QueryableStoreTypes - Class in org.apache.kafka.streams.state
+
+
Provides access to the QueryableStoreTypes provided with KafkaStreams.
+
+
QueryableStoreTypes() - Constructor for class org.apache.kafka.streams.state.QueryableStoreTypes
+
 
+
QueryableStoreTypes.KeyValueStoreType<K,V> - Class in org.apache.kafka.streams.state
+
 
+
QueryableStoreTypes.SessionStoreType<K,V> - Class in org.apache.kafka.streams.state
+
 
+
QueryableStoreTypes.WindowStoreType<K,V> - Class in org.apache.kafka.streams.state
+
 
+
QueryConfig - Class in org.apache.kafka.streams.query
+
+
Runtime configuration parameters
+
+
QueryConfig(boolean) - Constructor for class org.apache.kafka.streams.query.QueryConfig
+
 
+
queryMetadataForKey(String, K, Serializer<K>) - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Finds the metadata containing the active hosts and standby hosts where the key being queried would reside.
+
+
queryMetadataForKey(String, K, StreamPartitioner<? super K, ?>) - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Finds the metadata containing the active hosts and standby hosts where the key being queried would reside.
+
+
QueryResult<R> - Interface in org.apache.kafka.streams.query
+
+
Container for a single partition's result when executing a StateQueryRequest.
+
+
quorumInfo() - Method in class org.apache.kafka.clients.admin.DescribeMetadataQuorumResult
+
+
Returns a future containing the QuorumInfo
+
+
QuorumInfo - Class in org.apache.kafka.clients.admin
+
+
This class is used to describe the state of the quorum received in DescribeQuorumResponse.
+
+
QuorumInfo.Node - Class in org.apache.kafka.clients.admin
+
 
+
QuorumInfo.ReplicaState - Class in org.apache.kafka.clients.admin
+
 
+
quota() - Method in class org.apache.kafka.common.metrics.MetricConfig
+
 
+
quota(Quota) - Method in class org.apache.kafka.common.metrics.MetricConfig
+
 
+
Quota - Class in org.apache.kafka.common.metrics
+
+
An upper or lower bound for metrics
+
+
Quota(double, boolean) - Constructor for class org.apache.kafka.common.metrics.Quota
+
 
+
quotaLimit(ClientQuotaType, Map<String, String>) - Method in interface org.apache.kafka.server.quota.ClientQuotaCallback
+
+
Returns the quota limit associated with the provided metric tags.
+
+
quotaMetricTags(ClientQuotaType, KafkaPrincipal, String) - Method in interface org.apache.kafka.server.quota.ClientQuotaCallback
+
+
Quota callback invoked to determine the quota metric tags to be applied for a request.
+
+
quotaResetRequired(ClientQuotaType) - Method in interface org.apache.kafka.server.quota.ClientQuotaCallback
+
+
Returns true if any of the existing quota configs may have been updated since the last call + to this method for the provided quota type.
+
+
QuotaViolationException - Exception in org.apache.kafka.common.metrics
+
+
Thrown when a sensor records a value that causes a metric to go outside the bounds configured as its quota
+
+
QuotaViolationException(KafkaMetric, double, double) - Constructor for exception org.apache.kafka.common.metrics.QuotaViolationException
+
 
+
+

R

+
+
rack() - Method in class org.apache.kafka.common.Node
+
+
The rack for this node
+
+
RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
rack.aware.assignment.non_overlap_cost
+
+
RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_DOC - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated.
+
+
RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY - Static variable in class org.apache.kafka.streams.StreamsConfig
+
 
+
RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
rack.aware.assignment.strategy
+
+
RACK_AWARE_ASSIGNMENT_STRATEGY_DOC - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated.
+
+
RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC - Static variable in class org.apache.kafka.streams.StreamsConfig
+
 
+
RACK_AWARE_ASSIGNMENT_STRATEGY_NONE - Static variable in class org.apache.kafka.streams.StreamsConfig
+
 
+
RACK_AWARE_ASSIGNMENT_TAGS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
rack.aware.assignment.tags
+
+
RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
rack.aware.assignment.traffic_cost
+
+
RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_DOC - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated.
+
+
rackAwareAssignmentStrategy() - Method in class org.apache.kafka.streams.processor.assignment.AssignmentConfigs
+
+
The rack-aware assignment strategy as configured via + StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG
+
+
rackAwareAssignmentTags() - Method in class org.apache.kafka.streams.processor.assignment.AssignmentConfigs
+
+
The rack-aware assignment tags as configured via + StreamsConfig.RACK_AWARE_ASSIGNMENT_TAGS_CONFIG
+
+
rackAwareNonOverlapCost() - Method in class org.apache.kafka.streams.processor.assignment.AssignmentConfigs
+
+
The rack-aware assignment non-overlap cost as configured via + StreamsConfig.RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG
+
+
rackAwareTrafficCost() - Method in class org.apache.kafka.streams.processor.assignment.AssignmentConfigs
+
+
The rack-aware assignment traffic cost as configured via + StreamsConfig.RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG
+
+
rackId() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription
+
+
The rack ID of the group member.
+
+
rackId() - Method in class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription
+
 
+
rackId() - Method in interface org.apache.kafka.coordinator.group.api.assignor.MemberSubscription
+
+
Gets the rack Id if present.
+
+
rackId() - Method in interface org.apache.kafka.streams.processor.assignment.KafkaStreamsState
+
 
+
rackIds() - Method in interface org.apache.kafka.streams.processor.assignment.TaskTopicPartition
+
 
+
racksForPartition(Uuid, int) - Method in interface org.apache.kafka.coordinator.group.api.assignor.SubscribedTopicDescriber
+
+
Returns all the available racks associated with the replicas of the given partition.
+
+
RaftVoterEndpoint - Class in org.apache.kafka.clients.admin
+
+
An endpoint for a raft quorum voter.
+
+
RaftVoterEndpoint(String, String, int) - Constructor for class org.apache.kafka.clients.admin.RaftVoterEndpoint
+
+
Create an endpoint for a metadata quorum voter.
+
+
raiseError(Exception) - Method in interface org.apache.kafka.connect.connector.ConnectorContext
+
+
Raise an unrecoverable exception to the Connect framework.
+
+
randomProcessId() - Static method in class org.apache.kafka.streams.processor.assignment.ProcessId
+
 
+
randomUuid() - Static method in class org.apache.kafka.common.Uuid
+
+
Static factory to retrieve a type 4 (pseudo randomly generated) UUID.
+
+
range(K, K) - Method in interface org.apache.kafka.streams.state.ReadOnlyKeyValueStore
+
+
Get an iterator over a given range of keys.
+
+
RANGE_ASSIGNOR_NAME - Static variable in class org.apache.kafka.clients.consumer.RangeAssignor
+
 
+
RangeAssignor - Class in org.apache.kafka.clients.consumer
+
+
The range assignor works on a per-topic basis.
+
+
RangeAssignor() - Constructor for class org.apache.kafka.clients.consumer.RangeAssignor
+
 
+
RangeQuery<K,V> - Class in org.apache.kafka.streams.query
+
+
Interactive query for issuing range queries and scans over KeyValue stores.
+
+
Rate - Class in org.apache.kafka.common.metrics.stats
+
+
The rate of the given quantity.
+
+
Rate() - Constructor for class org.apache.kafka.common.metrics.stats.Rate
+
 
+
Rate(TimeUnit) - Constructor for class org.apache.kafka.common.metrics.stats.Rate
+
 
+
Rate(TimeUnit, SampledStat) - Constructor for class org.apache.kafka.common.metrics.stats.Rate
+
 
+
Rate(SampledStat) - Constructor for class org.apache.kafka.common.metrics.stats.Rate
+
 
+
rawKey(K) - Method in class org.apache.kafka.streams.state.StateSerdes
+
+
Serialize the given key.
+
+
rawValue(V) - Method in class org.apache.kafka.streams.state.StateSerdes
+
+
Serialize the given value.
+
+
READ - Enum constant in enum class org.apache.kafka.common.acl.AclOperation
+
+
READ operation.
+
+
READ_COMMITTED - Enum constant in enum class org.apache.kafka.common.IsolationLevel
+
 
+
READ_UNCOMMITTED - Enum constant in enum class org.apache.kafka.common.IsolationLevel
+
 
+
readKeyValue() - Method in class org.apache.kafka.streams.TestOutputTopic
+
+
Read one record from the output topic and return its key and value as pair.
+
+
readKeyValuesToList() - Method in class org.apache.kafka.streams.TestOutputTopic
+
+
Read all KeyValues from topic to List.
+
+
readKeyValuesToMap() - Method in class org.apache.kafka.streams.TestOutputTopic
+
+
Read output to map.
+
+
ReadOnlyKeyValueStore<K,V> - Interface in org.apache.kafka.streams.state
+
+
A key-value store that only supports read operations.
+
+
ReadOnlySessionStore<K,AGG> - Interface in org.apache.kafka.streams.state
+
+
A session store that only supports read operations.
+
+
ReadOnlyWindowStore<K,V> - Interface in org.apache.kafka.streams.state
+
+
A window store that only supports read operations.
+
+
readRecord() - Method in class org.apache.kafka.streams.TestOutputTopic
+
+
Read one Record from output topic.
+
+
readRecords(InputStream) - Method in interface org.apache.kafka.tools.api.RecordReader
+
+
read byte array from input stream and then generate an iterator of producer record
+
+
readRecordsToList() - Method in class org.apache.kafka.streams.TestOutputTopic
+
+
Read output to List.
+
+
readValue() - Method in class org.apache.kafka.streams.TestOutputTopic
+
+
Read one record from the output topic and return record's value.
+
+
readValuesToList() - Method in class org.apache.kafka.streams.TestOutputTopic
+
+
Read all values from topic to List.
+
+
reason() - Method in class org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupOptions
+
 
+
reason(String) - Method in class org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupOptions
+
+
Sets an optional reason.
+
+
ReassignmentInProgressException - Exception in org.apache.kafka.common.errors
+
+
Thrown if a request cannot be completed because a partition reassignment is in progress.
+
+
ReassignmentInProgressException(String) - Constructor for exception org.apache.kafka.common.errors.ReassignmentInProgressException
+
 
+
ReassignmentInProgressException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.ReassignmentInProgressException
+
 
+
reassignments() - Method in class org.apache.kafka.clients.admin.ListPartitionReassignmentsResult
+
+
Return a future which yields a map containing each partition's reassignments
+
+
rebalance(Collection<TopicPartition>) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
+
Simulate a rebalance event.
+
+
RebalanceInProgressException - Exception in org.apache.kafka.common.errors
+
 
+
RebalanceInProgressException() - Constructor for exception org.apache.kafka.common.errors.RebalanceInProgressException
+
 
+
RebalanceInProgressException(String) - Constructor for exception org.apache.kafka.common.errors.RebalanceInProgressException
+
 
+
RebalanceInProgressException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.RebalanceInProgressException
+
 
+
RebalanceInProgressException(Throwable) - Constructor for exception org.apache.kafka.common.errors.RebalanceInProgressException
+
 
+
REBALANCING - Enum constant in enum class org.apache.kafka.streams.KafkaStreams.State
+
 
+
RebootstrapRequiredException - Exception in org.apache.kafka.common.errors
+
 
+
RebootstrapRequiredException(String) - Constructor for exception org.apache.kafka.common.errors.RebootstrapRequiredException
+
 
+
RebootstrapRequiredException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.RebootstrapRequiredException
+
 
+
RECEIVE_BUFFER_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
RECEIVE_BUFFER_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
receive.buffer.bytes
+
+
RECEIVE_BUFFER_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
receive.buffer.bytes
+
+
RECEIVE_BUFFER_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
receive.buffer.bytes
+
+
recommendedValues() - Method in class org.apache.kafka.common.config.ConfigValue
+
 
+
recommendedValues(List<Object>) - Method in class org.apache.kafka.common.config.ConfigValue
+
 
+
recommender - Variable in class org.apache.kafka.common.config.ConfigDef.ConfigKey
+
 
+
RECONCILING - Enum constant in enum class org.apache.kafka.common.ConsumerGroupState
+
+
Deprecated.
+
RECONCILING - Enum constant in enum class org.apache.kafka.common.GroupState
+
 
+
Reconfigurable - Interface in org.apache.kafka.common
+
+
Interface for reconfigurable classes that support dynamic configuration.
+
+
RECONFIGURABLE_CONFIGS - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
RECONFIGURABLE_CONFIGS - Static variable in class org.apache.kafka.common.metrics.JmxReporter
+
 
+
reconfigurableConfigs() - Method in class org.apache.kafka.common.metrics.JmxReporter
+
 
+
reconfigurableConfigs() - Method in interface org.apache.kafka.common.metrics.MetricsReporter
+
 
+
reconfigurableConfigs() - Method in interface org.apache.kafka.common.Reconfigurable
+
+
Returns the names of configs that may be reconfigured.
+
+
reconfigurableConfigs() - Method in interface org.apache.kafka.common.security.auth.SslEngineFactory
+
+
Returns the names of configs that may be reconfigured.
+
+
reconfigure(Map<String, ?>) - Method in class org.apache.kafka.common.metrics.JmxReporter
+
 
+
reconfigure(Map<String, ?>) - Method in interface org.apache.kafka.common.metrics.MetricsReporter
+
 
+
reconfigure(Map<String, ?>) - Method in interface org.apache.kafka.common.Reconfigurable
+
+
Reconfigures this instance with the given key-value pairs.
+
+
reconfigure(Map<String, String>) - Method in class org.apache.kafka.connect.connector.Connector
+
+
Reconfigure this Connector.
+
+
reconfigure(Map<String, String>) - Method in class org.apache.kafka.connect.tools.MockSinkConnector
+
 
+
reconfigure(Map<String, String>) - Method in class org.apache.kafka.connect.tools.MockSourceConnector
+
 
+
RECONNECT_BACKOFF_MAX_MS_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
+
reconnect.backoff.max.ms
+
+
RECONNECT_BACKOFF_MAX_MS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
reconnect.backoff.max.ms
+
+
RECONNECT_BACKOFF_MAX_MS_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
reconnect.backoff.max.ms
+
+
RECONNECT_BACKOFF_MAX_MS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
reconnect.backoff.max
+
+
RECONNECT_BACKOFF_MS_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
+
reconnect.backoff.ms
+
+
RECONNECT_BACKOFF_MS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
reconnect.backoff.ms
+
+
RECONNECT_BACKOFF_MS_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
reconnect.backoff.ms
+
+
RECONNECT_BACKOFF_MS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
reconnect.backoff.ms
+
+
record() - Method in class org.apache.kafka.common.metrics.Sensor
+
+
Record an occurrence, this is just short-hand for record(1.0)
+
+
record() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext.CapturedForward
+
+
The record that was forwarded.
+
+
record(double) - Method in class org.apache.kafka.common.metrics.Sensor
+
+
Record a value with this sensor
+
+
record(double) - Method in class org.apache.kafka.common.metrics.stats.Histogram
+
 
+
record(double, long) - Method in class org.apache.kafka.common.metrics.Sensor
+
+
Record a value at a known time.
+
+
record(double, long, boolean) - Method in class org.apache.kafka.common.metrics.Sensor
+
+
Record a value at a known time.
+
+
record(MetricConfig, double, long) - Method in interface org.apache.kafka.common.metrics.Stat
+
+
Record the given value
+
+
record(MetricConfig, double, long) - Method in class org.apache.kafka.common.metrics.stats.CumulativeCount
+
 
+
record(MetricConfig, double, long) - Method in class org.apache.kafka.common.metrics.stats.CumulativeSum
+
 
+
record(MetricConfig, double, long) - Method in class org.apache.kafka.common.metrics.stats.Meter
+
 
+
record(MetricConfig, double, long) - Method in class org.apache.kafka.common.metrics.stats.Rate
+
 
+
record(MetricConfig, double, long) - Method in class org.apache.kafka.common.metrics.stats.SampledStat
+
 
+
record(MetricConfig, double, long) - Method in class org.apache.kafka.common.metrics.stats.TokenBucket
+
 
+
record(MetricConfig, double, long) - Method in class org.apache.kafka.common.metrics.stats.Value
+
 
+
Record<K,V> - Class in org.apache.kafka.streams.processor.api
+
+
A data class representing an incoming record for processing in a Processor + or a record to forward to downstream processors via ProcessorContext.
+
+
Record(K, V, long) - Constructor for class org.apache.kafka.streams.processor.api.Record
+
+
Convenience constructor in case you do not wish to specify any headers.
+
+
Record(K, V, long, Headers) - Constructor for class org.apache.kafka.streams.processor.api.Record
+
+
The full constructor, specifying all the attributes of the record.
+
+
RecordBatchTooLargeException - Exception in org.apache.kafka.common.errors
+
+
This record batch is larger than the maximum allowable size
+
+
RecordBatchTooLargeException() - Constructor for exception org.apache.kafka.common.errors.RecordBatchTooLargeException
+
 
+
RecordBatchTooLargeException(String) - Constructor for exception org.apache.kafka.common.errors.RecordBatchTooLargeException
+
 
+
RecordBatchTooLargeException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.RecordBatchTooLargeException
+
 
+
RecordBatchTooLargeException(Throwable) - Constructor for exception org.apache.kafka.common.errors.RecordBatchTooLargeException
+
 
+
recordCollector() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
 
+
recordCollector() - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
RecordContext - Interface in org.apache.kafka.streams.processor
+
+
The context associated with the current record being processed by + a Processor
+
+
RecordDeserializationException - Exception in org.apache.kafka.common.errors
+
+
This exception is raised for any error that occurs while deserializing records received by the consumer using + the configured Deserializer.
+
+
RecordDeserializationException(RecordDeserializationException.DeserializationExceptionOrigin, TopicPartition, long, long, TimestampType, ByteBuffer, ByteBuffer, Headers, String, Throwable) - Constructor for exception org.apache.kafka.common.errors.RecordDeserializationException
+
 
+
RecordDeserializationException(TopicPartition, long, String, Throwable) - Constructor for exception org.apache.kafka.common.errors.RecordDeserializationException
+
+ +
+
RecordDeserializationException.DeserializationExceptionOrigin - Enum Class in org.apache.kafka.common.errors
+
 
+
recordLevel() - Method in class org.apache.kafka.common.metrics.MetricConfig
+
 
+
recordLevel(Sensor.RecordingLevel) - Method in class org.apache.kafka.common.metrics.MetricConfig
+
 
+
recordMetadata() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
 
+
recordMetadata() - Method in interface org.apache.kafka.streams.processor.api.ProcessingContext
+
+
Return the metadata of the current record if available.
+
+
recordMetadata() - Method in interface org.apache.kafka.streams.processor.StateStoreContext
+
+
Return the metadata of the current topic/partition/offset if available.
+
+
RecordMetadata - Class in org.apache.kafka.clients.producer
+
+
The metadata for a record that has been acknowledged by the server
+
+
RecordMetadata - Interface in org.apache.kafka.streams.processor.api
+
 
+
RecordMetadata(TopicPartition, long, int, long, int, int) - Constructor for class org.apache.kafka.clients.producer.RecordMetadata
+
+
Creates a new instance with the provided parameters.
+
+
RecordReader - Interface in org.apache.kafka.tools.api
+
+
Typical implementations of this interface convert data from an `InputStream` received via `readRecords` into a + iterator of `ProducerRecord` instance.
+
+
records(String) - Method in class org.apache.kafka.clients.consumer.ConsumerRecords
+
+
Get just the records for the given topic
+
+
records(TopicPartition) - Method in class org.apache.kafka.clients.consumer.ConsumerRecords
+
+
Get just the records for the given partition
+
+
RecordsToDelete - Class in org.apache.kafka.clients.admin
+
+
Describe records to delete in a call to Admin.deleteRecords(Map)
+
+
RecordTooLargeException - Exception in org.apache.kafka.common.errors
+
+
This record is larger than the maximum allowable size
+
+
RecordTooLargeException() - Constructor for exception org.apache.kafka.common.errors.RecordTooLargeException
+
 
+
RecordTooLargeException(String) - Constructor for exception org.apache.kafka.common.errors.RecordTooLargeException
+
 
+
RecordTooLargeException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.RecordTooLargeException
+
 
+
RecordTooLargeException(String, Map<TopicPartition, Long>) - Constructor for exception org.apache.kafka.common.errors.RecordTooLargeException
+
 
+
RecordTooLargeException(Throwable) - Constructor for exception org.apache.kafka.common.errors.RecordTooLargeException
+
 
+
recordTooLargePartitions() - Method in exception org.apache.kafka.common.errors.RecordTooLargeException
+
 
+
reduce(Reducer<V>) - Method in interface org.apache.kafka.streams.kstream.KGroupedStream
+
+
Combine the values of records in this stream by the grouped key.
+
+
reduce(Reducer<V>) - Method in interface org.apache.kafka.streams.kstream.SessionWindowedKStream
+
+
Combine the values of records in this stream by the grouped key and defined sessions.
+
+
reduce(Reducer<V>) - Method in interface org.apache.kafka.streams.kstream.TimeWindowedKStream
+
+
Combine the values of records in this stream by the grouped key and defined windows.
+
+
reduce(Reducer<V>, Materialized<K, V, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KGroupedStream
+
+
Combine the value of records in this stream by the grouped key.
+
+
reduce(Reducer<V>, Materialized<K, V, SessionStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.SessionWindowedKStream
+
+
Combine the values of records in this stream by the grouped key and defined sessions.
+
+
reduce(Reducer<V>, Materialized<K, V, WindowStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.TimeWindowedKStream
+
+
Combine the values of records in this stream by the grouped key and defined windows.
+
+
reduce(Reducer<V>, Named) - Method in interface org.apache.kafka.streams.kstream.SessionWindowedKStream
+
+
Combine the values of records in this stream by the grouped key and defined sessions.
+
+
reduce(Reducer<V>, Named) - Method in interface org.apache.kafka.streams.kstream.TimeWindowedKStream
+
+
Combine the values of records in this stream by the grouped key and defined windows.
+
+
reduce(Reducer<V>, Named, Materialized<K, V, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KGroupedStream
+
+
Combine the value of records in this stream by the grouped key.
+
+
reduce(Reducer<V>, Named, Materialized<K, V, SessionStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.SessionWindowedKStream
+
+
Combine the values of records in this stream by the grouped key and defined sessions.
+
+
reduce(Reducer<V>, Named, Materialized<K, V, WindowStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.TimeWindowedKStream
+
+
Combine the values of records in this stream by the grouped key and defined windows.
+
+
reduce(Reducer<V>, Reducer<V>) - Method in interface org.apache.kafka.streams.kstream.KGroupedTable
+
+
Combine the value of records of the original KTable that got mapped to the same key into a new instance of KTable.
+
+
reduce(Reducer<V>, Reducer<V>, Materialized<K, V, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KGroupedTable
+
+
Combine the value of records of the original KTable that got mapped to the same key into a new instance of KTable.
+
+
reduce(Reducer<V>, Reducer<V>, Named, Materialized<K, V, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KGroupedTable
+
+
Combine the value of records of the original KTable that got mapped to the same key into a new instance of KTable.
+
+
Reducer<V> - Interface in org.apache.kafka.streams.kstream
+
+
The Reducer interface for combining two values of the same type into a new value.
+
+
REFERENCE_CONTAINER_PARTITION_ASSIGNOR - Static variable in class org.apache.kafka.streams.StreamsConfig.InternalConfig
+
 
+
RefreshRetriableException - Exception in org.apache.kafka.common.errors
+
+
Indicates that an operation failed due to outdated or invalid metadata, + requiring a refresh (e.g., refreshing producer metadata) before retrying the request.
+
+
RefreshRetriableException() - Constructor for exception org.apache.kafka.common.errors.RefreshRetriableException
+
 
+
RefreshRetriableException(String) - Constructor for exception org.apache.kafka.common.errors.RefreshRetriableException
+
 
+
RefreshRetriableException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.RefreshRetriableException
+
 
+
RefreshRetriableException(Throwable) - Constructor for exception org.apache.kafka.common.errors.RefreshRetriableException
+
 
+
register(ConnectRestExtensionContext) - Method in interface org.apache.kafka.connect.rest.ConnectRestExtension
+
+
ConnectRestExtension implementations can register custom JAX-RS resources via this method.
+
+
register(StateStore, StateRestoreCallback) - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
register(StateStore, StateRestoreCallback) - Method in interface org.apache.kafka.streams.processor.ProcessorContext
+
+
Register and possibly restores the specified storage engine.
+
+
register(StateStore, StateRestoreCallback) - Method in interface org.apache.kafka.streams.processor.StateStoreContext
+
+
Registers and possibly restores the specified storage engine.
+
+
register(StateStore, StateRestoreCallback, CommitCallback) - Method in interface org.apache.kafka.streams.processor.StateStoreContext
+
+
Registers and possibly restores the specified storage engine.
+
+
registerMetricForSubscription(KafkaMetric) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Add the provided application metric for subscription.
+
+
registerMetricForSubscription(KafkaMetric) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
registerMetricForSubscription(KafkaMetric) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
registerMetricForSubscription(KafkaMetric) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
registerMetricForSubscription(KafkaMetric) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Add the provided application metric for subscription.
+
+
registerMetricForSubscription(KafkaMetric) - Method in class org.apache.kafka.clients.consumer.KafkaShareConsumer
+
+
Add the provided application metric for subscription.
+
+
registerMetricForSubscription(KafkaMetric) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
registerMetricForSubscription(KafkaMetric) - Method in class org.apache.kafka.clients.consumer.MockShareConsumer
+
 
+
registerMetricForSubscription(KafkaMetric) - Method in interface org.apache.kafka.clients.consumer.ShareConsumer
+
 
+
registerMetricForSubscription(KafkaMetric) - Method in class org.apache.kafka.clients.producer.KafkaProducer
+
+
Add the provided application metric for subscription.
+
+
registerMetricForSubscription(KafkaMetric) - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
registerMetricForSubscription(KafkaMetric) - Method in interface org.apache.kafka.clients.producer.Producer
+
 
+
REJECT - Enum constant in enum class org.apache.kafka.clients.consumer.AcknowledgeType
+
+
The record was not consumed successfully.
+
+
RELEASE - Enum constant in enum class org.apache.kafka.clients.consumer.AcknowledgeType
+
+
The record was not consumed successfully.
+
+
REMAIN_IN_GROUP - Enum constant in enum class org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation
+
 
+
REMOTE_COPY_BYTES_PER_SEC_METRIC - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
REMOTE_COPY_LAG_BYTES_METRIC - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
REMOTE_COPY_LAG_SEGMENTS_METRIC - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
REMOTE_COPY_REQUESTS_PER_SEC_METRIC - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
REMOTE_DELETE_LAG_BYTES_METRIC - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
REMOTE_DELETE_LAG_SEGMENTS_METRIC - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
REMOTE_DELETE_REQUESTS_PER_SEC_METRIC - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
REMOTE_FETCH_BYTES_PER_SEC_METRIC - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
REMOTE_FETCH_REQUESTS_PER_SEC_METRIC - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
REMOTE_LOG_COPY_DISABLE_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
REMOTE_LOG_COPY_DISABLE_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
REMOTE_LOG_DELETE_ON_DISABLE_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
REMOTE_LOG_DELETE_ON_DISABLE_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
REMOTE_LOG_MANAGER_TASKS_AVG_IDLE_PERCENT_METRIC - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
REMOTE_LOG_METADATA_COUNT_METRIC - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
REMOTE_LOG_READER_AVG_IDLE_PERCENT_METRIC - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
REMOTE_LOG_READER_FETCH_RATE_AND_TIME_METRIC - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
REMOTE_LOG_READER_TASK_QUEUE_SIZE_METRIC - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
REMOTE_LOG_SIZE_BYTES_METRIC - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
REMOTE_LOG_SIZE_COMPUTATION_TIME_METRIC - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
REMOTE_LOG_STORAGE_ENABLE_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
REMOTE_LOG_STORAGE_ENABLE_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
REMOTE_STORAGE_THREAD_POOL_METRICS - Static variable in class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
RemoteClusterUtils - Class in org.apache.kafka.connect.mirror
+
+
Convenience tool for multi-cluster environments.
+
+
remoteConsumerOffsets(String, String, Duration) - Method in class org.apache.kafka.connect.mirror.MirrorClient
+
+
Translates a remote consumer group's offsets into corresponding local offsets.
+
+
RemoteLogMetadata - Class in org.apache.kafka.server.log.remote.storage
+
+
Base class for remote log metadata objects like RemoteLogSegmentMetadata, RemoteLogSegmentMetadataUpdate, + and RemotePartitionDeleteMetadata.
+
+
RemoteLogMetadataManager - Interface in org.apache.kafka.server.log.remote.storage
+
+
This interface provides storing and fetching remote log segment metadata with strongly consistent semantics.
+
+
remoteLogSegmentId() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata
+
 
+
remoteLogSegmentId() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate
+
 
+
RemoteLogSegmentId - Class in org.apache.kafka.server.log.remote.storage
+
+
This class represents a universally unique identifier associated to a topic partition's log segment.
+
+
RemoteLogSegmentId(TopicIdPartition, Uuid) - Constructor for class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId
+
 
+
remoteLogSegmentMetadata(TopicIdPartition, int, long) - Method in interface org.apache.kafka.server.log.remote.storage.RemoteLogMetadataManager
+
+
Returns RemoteLogSegmentMetadata if it exists for the given topic partition containing the offset with + the given leader-epoch for the offset, else returns Optional.empty().
+
+
RemoteLogSegmentMetadata - Class in org.apache.kafka.server.log.remote.storage
+
+
It describes the metadata about a topic partition's remote log segment in the remote storage.
+
+
RemoteLogSegmentMetadata(RemoteLogSegmentId, long, long, long, int, long, int, Map<Integer, Long>) - Constructor for class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata
+
+
Creates an instance with the given metadata of remote log segment and its state as RemoteLogSegmentState.COPY_SEGMENT_STARTED.
+
+
RemoteLogSegmentMetadata(RemoteLogSegmentId, long, long, long, int, long, int, Map<Integer, Long>, boolean) - Constructor for class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata
+
+
Creates an instance with the given metadata of remote log segment and its state as RemoteLogSegmentState.COPY_SEGMENT_STARTED.
+
+
RemoteLogSegmentMetadata(RemoteLogSegmentId, long, long, long, int, long, int, Optional<RemoteLogSegmentMetadata.CustomMetadata>, RemoteLogSegmentState, Map<Integer, Long>) - Constructor for class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata
+
+
Creates an instance with the given metadata of remote log segment.
+
+
RemoteLogSegmentMetadata(RemoteLogSegmentId, long, long, long, int, long, int, Optional<RemoteLogSegmentMetadata.CustomMetadata>, RemoteLogSegmentState, Map<Integer, Long>, boolean) - Constructor for class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata
+
+
Creates an instance with the given metadata of remote log segment.
+
+
RemoteLogSegmentMetadata.CustomMetadata - Class in org.apache.kafka.server.log.remote.storage
+
+
Custom metadata from a RemoteStorageManager plugin.
+
+
RemoteLogSegmentMetadataUpdate - Class in org.apache.kafka.server.log.remote.storage
+
+
It describes the metadata update about the log segment in the remote storage.
+
+
RemoteLogSegmentMetadataUpdate(RemoteLogSegmentId, long, Optional<RemoteLogSegmentMetadata.CustomMetadata>, RemoteLogSegmentState, int) - Constructor for class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate
+
 
+
RemoteLogSegmentState - Enum Class in org.apache.kafka.server.log.remote.storage
+
+
This enum indicates the state of the remote log segment.
+
+
remoteLogSize(TopicIdPartition, int) - Method in interface org.apache.kafka.server.log.remote.storage.RemoteLogMetadataManager
+
+
Returns total size of the log for the given leader epoch in remote storage.
+
+
RemotePartitionDeleteMetadata - Class in org.apache.kafka.server.log.remote.storage
+
+
This class represents the metadata about the remote partition.
+
+
RemotePartitionDeleteMetadata(TopicIdPartition, RemotePartitionDeleteState, long, int) - Constructor for class org.apache.kafka.server.log.remote.storage.RemotePartitionDeleteMetadata
+
+
Creates an instance of this class with the given metadata.
+
+
RemotePartitionDeleteState - Enum Class in org.apache.kafka.server.log.remote.storage
+
+
This enum indicates the deletion state of the remote topic partition.
+
+
RemoteResourceNotFoundException - Exception in org.apache.kafka.server.log.remote.storage
+
+
Exception thrown when a resource is not found on the remote storage.
+
+
RemoteResourceNotFoundException(String) - Constructor for exception org.apache.kafka.server.log.remote.storage.RemoteResourceNotFoundException
+
 
+
RemoteResourceNotFoundException(String, Throwable) - Constructor for exception org.apache.kafka.server.log.remote.storage.RemoteResourceNotFoundException
+
 
+
RemoteResourceNotFoundException(Throwable) - Constructor for exception org.apache.kafka.server.log.remote.storage.RemoteResourceNotFoundException
+
 
+
RemoteStorageException - Exception in org.apache.kafka.server.log.remote.storage
+
+
Exception thrown when there is a remote storage error.
+
+
RemoteStorageException(String) - Constructor for exception org.apache.kafka.server.log.remote.storage.RemoteStorageException
+
 
+
RemoteStorageException(String, Throwable) - Constructor for exception org.apache.kafka.server.log.remote.storage.RemoteStorageException
+
 
+
RemoteStorageException(Throwable) - Constructor for exception org.apache.kafka.server.log.remote.storage.RemoteStorageException
+
 
+
RemoteStorageManager - Interface in org.apache.kafka.server.log.remote.storage
+
+
This interface provides the lifecycle of remote log segments that includes copy, fetch, and delete from remote + storage.
+
+
RemoteStorageManager.IndexType - Enum Class in org.apache.kafka.server.log.remote.storage
+
+
Type of the index file.
+
+
RemoteStorageMetrics - Class in org.apache.kafka.server.log.remote.storage
+
+
This class contains the metrics related to tiered storage feature, which is to have a centralized + place to store them, so that we can verify all of them easily.
+
+
RemoteStorageMetrics() - Constructor for class org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
+
 
+
remoteTopics() - Method in class org.apache.kafka.connect.mirror.MirrorClient
+
+
Finds all remote topics on this cluster.
+
+
remoteTopics(String) - Method in class org.apache.kafka.connect.mirror.MirrorClient
+
+
Finds all remote topics that have been replicated directly from the given source cluster.
+
+
remove(String) - Method in interface org.apache.kafka.common.header.Headers
+
+
Removes all headers for the given key returning if the operation succeeded.
+
+
remove(String) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
remove(String) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Removes all Header objects whose key matches the specified key.
+
+
remove(Windowed<K>) - Method in interface org.apache.kafka.streams.state.SessionStore
+
+
Remove the session aggregated with provided Windowed key from the store
+
+
removeAll() - Method in class org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupOptions
+
 
+
removeMembersFromConsumerGroup(String, RemoveMembersFromConsumerGroupOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Remove members from the consumer group by given member identities.
+
+
removeMembersFromConsumerGroup(String, RemoveMembersFromConsumerGroupOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
removeMembersFromConsumerGroup(String, RemoveMembersFromConsumerGroupOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
RemoveMembersFromConsumerGroupOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
RemoveMembersFromConsumerGroupOptions() - Constructor for class org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupOptions
+
 
+
RemoveMembersFromConsumerGroupOptions(Collection<MemberToRemove>) - Constructor for class org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupOptions
+
 
+
RemoveMembersFromConsumerGroupResult - Class in org.apache.kafka.clients.admin
+
+ +
+
removeMetric(MetricName) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Remove a metric if it exists and return it.
+
+
removeMetric(MetricName) - Method in interface org.apache.kafka.common.metrics.PluginMetrics
+
+
Remove a metric if it exists.
+
+
removeQuota(ClientQuotaType, ClientQuotaEntity) - Method in interface org.apache.kafka.server.quota.ClientQuotaCallback
+
+
Quota configuration removal callback that is invoked when quota configuration for an entity is + removed in the quorum.
+
+
removeRaftVoter(int, Uuid) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Remove a voter node from the KRaft metadata quorum.
+
+
removeRaftVoter(int, Uuid, RemoveRaftVoterOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Remove a voter node from the KRaft metadata quorum.
+
+
removeRaftVoter(int, Uuid, RemoveRaftVoterOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
removeRaftVoter(int, Uuid, RemoveRaftVoterOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
RemoveRaftVoterOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
RemoveRaftVoterOptions() - Constructor for class org.apache.kafka.clients.admin.RemoveRaftVoterOptions
+
 
+
RemoveRaftVoterResult - Class in org.apache.kafka.clients.admin
+
+ +
+
removeReporter(MetricsReporter) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Remove a MetricReporter
+
+
removeSensor(String) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Remove a sensor (if it exists), associated metrics and its children.
+
+
removeSensor(String) - Method in interface org.apache.kafka.common.metrics.PluginMetrics
+
+
Remove a Sensor and its associated metrics.
+
+
removeSensor(Sensor) - Method in interface org.apache.kafka.streams.StreamsMetrics
+
+
Remove a sensor.
+
+
removeStreamThread() - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Removes one stream thread out of the running stream threads from this Kafka Streams client.
+
+
removeStreamThread(Duration) - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Removes one stream thread out of the running stream threads from this Kafka Streams client.
+
+
removeTask(KafkaStreamsAssignment.AssignedTask) - Method in class org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment
+
 
+
removingReplicas() - Method in class org.apache.kafka.clients.admin.PartitionReassignment
+
+
The brokers that we are removing this partition from as part of a reassignment.
+
+
rename(String) - Method in interface org.apache.kafka.connect.header.Header
+
+
Return a new Header object that has the same schema and value but with the supplied key.
+
+
renewDelegationToken(byte[]) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Renew a Delegation Token.
+
+
renewDelegationToken(byte[], RenewDelegationTokenOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Renew a Delegation Token.
+
+
renewDelegationToken(byte[], RenewDelegationTokenOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
renewDelegationToken(byte[], RenewDelegationTokenOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
RenewDelegationTokenOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
RenewDelegationTokenOptions() - Constructor for class org.apache.kafka.clients.admin.RenewDelegationTokenOptions
+
 
+
RenewDelegationTokenResult - Class in org.apache.kafka.clients.admin
+
+ +
+
renewers() - Method in class org.apache.kafka.clients.admin.CreateDelegationTokenOptions
+
 
+
renewers() - Method in class org.apache.kafka.common.security.token.delegation.TokenInformation
+
 
+
renewers(List<KafkaPrincipal>) - Method in class org.apache.kafka.clients.admin.CreateDelegationTokenOptions
+
 
+
renewersAsString() - Method in class org.apache.kafka.common.security.token.delegation.TokenInformation
+
 
+
renewTimePeriodMs() - Method in class org.apache.kafka.clients.admin.RenewDelegationTokenOptions
+
 
+
renewTimePeriodMs(long) - Method in class org.apache.kafka.clients.admin.RenewDelegationTokenOptions
+
 
+
repartition() - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Materialize this stream to an auto-generated repartition topic and create a new KStream + from the auto-generated topic.
+
+
repartition(Repartitioned<K, V>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
REPARTITION_PURGE_INTERVAL_MS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
repartition.purge.interval.ms
+
+
Repartitioned<K,V> - Class in org.apache.kafka.streams.kstream
+
+
This class is used to provide the optional parameters for internal repartition topics.
+
+
repartitionSinkTopics() - Method in class org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription
+
+
The repartition topics the topology writes to.
+
+
repartitionSourceTopics() - Method in class org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription
+
+
The set of source topics that are internally created repartition topics.
+
+
REPLACE_THREAD - Enum constant in enum class org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse
+
+
Replace the failed thread with a new one.
+
+
replicaDirectoryId() - Method in class org.apache.kafka.clients.admin.QuorumInfo.ReplicaState
+
+
Return the directory id of the replica if configured, or Uuid.ZERO_UUID if not.
+
+
replicaId() - Method in class org.apache.kafka.clients.admin.QuorumInfo.ReplicaState
+
+
Return the ID for this replica.
+
+
ReplicaInfo - Class in org.apache.kafka.clients.admin
+
+
A description of a replica on a particular broker.
+
+
ReplicaInfo(long, long, boolean) - Constructor for class org.apache.kafka.clients.admin.ReplicaInfo
+
 
+
replicaInfos() - Method in class org.apache.kafka.clients.admin.LogDirDescription
+
+
A map from topic partition to replica information for that partition + in this log directory.
+
+
ReplicaNotAvailableException - Exception in org.apache.kafka.common.errors
+
+
The replica is not available for the requested topic partition.
+
+
ReplicaNotAvailableException(String) - Constructor for exception org.apache.kafka.common.errors.ReplicaNotAvailableException
+
 
+
ReplicaNotAvailableException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.ReplicaNotAvailableException
+
 
+
ReplicaNotAvailableException(Throwable) - Constructor for exception org.apache.kafka.common.errors.ReplicaNotAvailableException
+
 
+
replicas() - Method in class org.apache.kafka.clients.admin.PartitionReassignment
+
+
The brokers which this partition currently resides on.
+
+
replicas() - Method in class org.apache.kafka.common.PartitionInfo
+
+
The complete set of replicas for this partition regardless of whether they are alive or up-to-date
+
+
replicas() - Method in class org.apache.kafka.common.TopicPartitionInfo
+
+
Return the replicas of the partition in the same order as the replica assignment.
+
+
replicasAssignments() - Method in class org.apache.kafka.clients.admin.NewTopic
+
+
A map from partition id to replica ids (i.e.
+
+
replicasAssignments() - Method in class org.apache.kafka.server.policy.CreateTopicPolicy.RequestMetadata
+
+
Return a map from partition id to replica (broker) ids or null if numPartitions and replicationFactor are + set instead.
+
+
REPLICATION_FACTOR_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
replication.factor
+
+
REPLICATION_POLICY_CLASS - Static variable in class org.apache.kafka.connect.mirror.MirrorClientConfig
+
 
+
REPLICATION_POLICY_CLASS_DEFAULT - Static variable in class org.apache.kafka.connect.mirror.MirrorClientConfig
+
 
+
REPLICATION_POLICY_SEPARATOR - Static variable in class org.apache.kafka.connect.mirror.MirrorClientConfig
+
 
+
REPLICATION_POLICY_SEPARATOR_DEFAULT - Static variable in class org.apache.kafka.connect.mirror.MirrorClientConfig
+
 
+
replicationFactor() - Method in class org.apache.kafka.clients.admin.CreateTopicsResult.TopicMetadataAndConfig
+
 
+
replicationFactor() - Method in class org.apache.kafka.clients.admin.NewTopic
+
+
The replication factor for the new topic or -1 if a replica assignment has been specified.
+
+
replicationFactor() - Method in class org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription.TopicInfo
+
+
The replication factor of the topic.
+
+
replicationFactor() - Method in class org.apache.kafka.server.policy.CreateTopicPolicy.RequestMetadata
+
+
Return the number of replicas to create or null if replicaAssignments is not null.
+
+
replicationFactor(String) - Method in class org.apache.kafka.clients.admin.CreateTopicsResult
+
+
Returns a future that provides replication factor for the topic when the request completes.
+
+
replicationHops(String) - Method in class org.apache.kafka.connect.mirror.MirrorClient
+
+
Computes the shortest number of hops from an upstream source cluster.
+
+
replicationHops(Map<String, Object>, String) - Static method in class org.apache.kafka.connect.mirror.RemoteClusterUtils
+
+
Finds the shortest number of hops from an upstream cluster.
+
+
replicationPolicy() - Method in class org.apache.kafka.connect.mirror.MirrorClient
+
+
Gets the ReplicationPolicy instance used to interpret remote topics.
+
+
replicationPolicy() - Method in class org.apache.kafka.connect.mirror.MirrorClientConfig
+
 
+
ReplicationPolicy - Interface in org.apache.kafka.connect.mirror
+
+
An interface used by the MirrorMaker connectors to manage topics names between source and target clusters.
+
+
report(SinkRecord, Throwable) - Method in interface org.apache.kafka.connect.sink.ErrantRecordReporter
+
+
Report a problematic record and the corresponding error to be written to the sink + connector's dead letter queue (DLQ).
+
+
reporters() - Method in class org.apache.kafka.common.metrics.Metrics
+
 
+
REQUEST - Enum constant in enum class org.apache.kafka.server.quota.ClientQuotaType
+
 
+
REQUEST_TIMEOUT_MS_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
+
request.timeout.ms
+
+
REQUEST_TIMEOUT_MS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
request.timeout.ms
+
+
REQUEST_TIMEOUT_MS_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
request.timeout.ms
+
+
REQUEST_TIMEOUT_MS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
request.timeout.ms
+
+
requestCommit() - Method in interface org.apache.kafka.connect.sink.SinkTaskContext
+
+
Request an offset commit.
+
+
REQUESTED - Enum constant in enum class org.apache.kafka.common.config.SslClientAuth
+
 
+
RequestMetadata(String, Integer, Short, Map<Integer, List<Integer>>, Map<String, String>) - Constructor for class org.apache.kafka.server.policy.CreateTopicPolicy.RequestMetadata
+
+
Create an instance of this class with the provided parameters.
+
+
RequestMetadata(ConfigResource, Map<String, String>) - Constructor for class org.apache.kafka.server.policy.AlterConfigPolicy.RequestMetadata
+
+
Create an instance of this class with the provided parameters.
+
+
requestTaskReconfiguration() - Method in interface org.apache.kafka.connect.connector.ConnectorContext
+
+
Requests that the runtime reconfigure the Tasks for this source.
+
+
requestType() - Method in interface org.apache.kafka.server.authorizer.AuthorizableRequestContext
+
+
16-bit API key of the request from the request header.
+
+
requestVersion() - Method in interface org.apache.kafka.server.authorizer.AuthorizableRequestContext
+
+
Returns the request version from the request header.
+
+
requireActive() - Method in class org.apache.kafka.streams.query.StateQueryRequest
+
+
Specifies that this query should only run on partitions for which this instance is the leader + (aka "active").
+
+
required() - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
+
Set this schema as required.
+
+
REQUIRED - Enum constant in enum class org.apache.kafka.common.config.SslClientAuth
+
 
+
requireStable() - Method in class org.apache.kafka.clients.admin.ListConsumerGroupOffsetsOptions
+
 
+
requireStable() - Method in class org.apache.kafka.clients.admin.ListStreamsGroupOffsetsOptions
+
 
+
requireStable(boolean) - Method in class org.apache.kafka.clients.admin.ListConsumerGroupOffsetsOptions
+
+
Sets an optional requireStable flag.
+
+
requireStable(boolean) - Method in class org.apache.kafka.clients.admin.ListStreamsGroupOffsetsOptions
+
+
Sets an optional requireStable flag.
+
+
RESERVED - Static variable in class org.apache.kafka.common.Uuid
+
+
The set of reserved UUIDs that will never be returned by the randomUuid method.
+
+
resetCommit() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
+
Reset the commit capture to false (whether or not it was previously true).
+
+
resetCommit() - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
Reset the commit capture to false (whether or not it was previously true).
+
+
resetForwards() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
+
Clear the captured forwarded data.
+
+
resetForwards() - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
Clear the captured forwarded data.
+
+
resetShouldRebalance() - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
resolveDslStoreSuppliers() - Method in class org.apache.kafka.streams.TopologyConfig
+
 
+
resource() - Method in exception org.apache.kafka.common.errors.DuplicateResourceException
+
 
+
resource() - Method in exception org.apache.kafka.common.errors.ResourceNotFoundException
+
 
+
resource() - Method in class org.apache.kafka.server.policy.AlterConfigPolicy.RequestMetadata
+
 
+
Resource - Class in org.apache.kafka.common.resource
+
+
Represents a cluster resource with a tuple of (type, name).
+
+
Resource(ResourceType, String) - Constructor for class org.apache.kafka.common.resource.Resource
+
+
Create an instance of this class with the provided parameters.
+
+
ResourceNotFoundException - Exception in org.apache.kafka.common.errors
+
+
Exception thrown due to a request for a resource that does not exist.
+
+
ResourceNotFoundException(String) - Constructor for exception org.apache.kafka.common.errors.ResourceNotFoundException
+
+
Constructor
+
+
ResourceNotFoundException(String, String) - Constructor for exception org.apache.kafka.common.errors.ResourceNotFoundException
+
+
Constructor
+
+
ResourceNotFoundException(String, String, Throwable) - Constructor for exception org.apache.kafka.common.errors.ResourceNotFoundException
+
+
Constructor
+
+
ResourceNotFoundException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.ResourceNotFoundException
+
 
+
resourcePattern() - Method in class org.apache.kafka.server.authorizer.Action
+
 
+
ResourcePattern - Class in org.apache.kafka.common.resource
+
+
Represents a pattern that is used by ACLs to match zero or more + Resources.
+
+
ResourcePattern(ResourceType, String, PatternType) - Constructor for class org.apache.kafka.common.resource.ResourcePattern
+
+
Create a pattern using the supplied parameters.
+
+
ResourcePatternFilter - Class in org.apache.kafka.common.resource
+
+
Represents a filter that can match ResourcePattern.
+
+
ResourcePatternFilter(ResourceType, String, PatternType) - Constructor for class org.apache.kafka.common.resource.ResourcePatternFilter
+
+
Create a filter using the supplied parameters.
+
+
resourceReferenceCount() - Method in class org.apache.kafka.server.authorizer.Action
+
+
Number of times the resource being authorized is referenced within the request.
+
+
resourceType() - Method in class org.apache.kafka.common.resource.Resource
+
+
Return the resource type.
+
+
resourceType() - Method in class org.apache.kafka.common.resource.ResourcePattern
+
 
+
resourceType() - Method in class org.apache.kafka.common.resource.ResourcePatternFilter
+
 
+
ResourceType - Enum Class in org.apache.kafka.common.resource
+
+
Represents a type of resource which an ACL can be applied to.
+
+
restore(byte[], byte[]) - Method in interface org.apache.kafka.streams.processor.BatchingStateRestoreCallback
+
 
+
restore(byte[], byte[]) - Method in interface org.apache.kafka.streams.processor.StateRestoreCallback
+
 
+
RESTORE_CONSUMER_PREFIX - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Prefix used to override consumer configs for the restore consumer client from + the general consumer client configs.
+
+
restoreAll(Collection<KeyValue<byte[], byte[]>>) - Method in interface org.apache.kafka.streams.processor.BatchingStateRestoreCallback
+
+
Called to restore a number of records.
+
+
restoreConsumerClientId() - Method in interface org.apache.kafka.streams.ThreadMetadata
+
+
Client ID of the restore Kafka consumer used by the stream thread
+
+
restoreConsumerPrefix(String) - Static method in class org.apache.kafka.streams.StreamsConfig
+
+ +
+
result() - Method in class org.apache.kafka.clients.admin.TerminateTransactionResult
+
+
Return a future which indicates whether the transaction was successfully terminated.
+
+
resultOrder() - Method in class org.apache.kafka.streams.query.MultiVersionedKeyQuery
+
+
The order of the returned records by timestamp.
+
+
resultOrder() - Method in class org.apache.kafka.streams.query.RangeQuery
+
+
Determines if the serialized byte[] of the keys in ascending or descending or unordered order.
+
+
resultOrder() - Method in class org.apache.kafka.streams.query.TimestampedRangeQuery
+
+
Determines if the serialized byte[] of the keys in ascending or descending or unordered order.
+
+
ResultOrder - Enum Class in org.apache.kafka.streams.query
+
 
+
resume() - Method in class org.apache.kafka.streams.KafkaStreams
+
+
This method resumes processing for the KafkaStreams instance.
+
+
resume(Collection<TopicPartition>) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
resume(Collection<TopicPartition>) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Resume specified partitions which have been paused with KafkaConsumer.pause(Collection).
+
+
resume(Collection<TopicPartition>) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
resume(TopicPartition...) - Method in interface org.apache.kafka.connect.sink.SinkTaskContext
+
+
Resume consumption of messages from previously paused TopicPartitions.
+
+
retainDuplicates() - Method in class org.apache.kafka.streams.state.DslWindowParams
+
 
+
retainDuplicates() - Method in interface org.apache.kafka.streams.state.WindowBytesStoreSupplier
+
+
Whether or not this store is retaining duplicate keys.
+
+
retainLatest() - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
retainLatest() - Method in interface org.apache.kafka.connect.header.Headers
+
+
Removes all but the last Header object with each key.
+
+
retainLatest(String) - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
retainLatest(String) - Method in interface org.apache.kafka.connect.header.Headers
+
+
Removes all but the latest Header objects whose key matches the specified key.
+
+
RETENTION_BYTES_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
RETENTION_BYTES_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
RETENTION_MS_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
RETENTION_MS_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
retentionPeriod() - Method in class org.apache.kafka.streams.state.DslSessionParams
+
 
+
retentionPeriod() - Method in class org.apache.kafka.streams.state.DslWindowParams
+
 
+
retentionPeriod() - Method in interface org.apache.kafka.streams.state.SessionBytesStoreSupplier
+
+
The time period for which the SessionStore will retain historic data.
+
+
retentionPeriod() - Method in interface org.apache.kafka.streams.state.WindowBytesStoreSupplier
+
+
The time period for which the WindowStore will retain historic data.
+
+
RetriableCommitFailedException - Exception in org.apache.kafka.clients.consumer
+
 
+
RetriableCommitFailedException(String) - Constructor for exception org.apache.kafka.clients.consumer.RetriableCommitFailedException
+
 
+
RetriableCommitFailedException(String, Throwable) - Constructor for exception org.apache.kafka.clients.consumer.RetriableCommitFailedException
+
 
+
RetriableCommitFailedException(Throwable) - Constructor for exception org.apache.kafka.clients.consumer.RetriableCommitFailedException
+
 
+
RetriableException - Exception in org.apache.kafka.common.errors
+
+
A retriable exception is a transient exception that if retried may succeed.
+
+
RetriableException - Exception in org.apache.kafka.connect.errors
+
+
An exception that indicates the operation can be reattempted.
+
+
RetriableException() - Constructor for exception org.apache.kafka.common.errors.RetriableException
+
 
+
RetriableException(String) - Constructor for exception org.apache.kafka.common.errors.RetriableException
+
 
+
RetriableException(String) - Constructor for exception org.apache.kafka.connect.errors.RetriableException
+
 
+
RetriableException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.RetriableException
+
 
+
RetriableException(String, Throwable) - Constructor for exception org.apache.kafka.connect.errors.RetriableException
+
 
+
RetriableException(Throwable) - Constructor for exception org.apache.kafka.common.errors.RetriableException
+
 
+
RetriableException(Throwable) - Constructor for exception org.apache.kafka.connect.errors.RetriableException
+
 
+
RETRIES_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
RETRIES_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
retries
+
+
retrieve() - Method in class org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever
+
 
+
retrieve() - Method in class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
+
 
+
retrieve() - Method in class org.apache.kafka.common.security.oauthbearer.FileJwtRetriever
+
 
+
retrieve() - Method in class org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever
+
 
+
retrieve() - Method in interface org.apache.kafka.common.security.oauthbearer.JwtRetriever
+
+
Retrieves a JWT access token in its serialized three-part form.
+
+
RETRY - Enum constant in enum class org.apache.kafka.streams.errors.ProductionExceptionHandler.ProductionExceptionHandlerResponse
+
+
Retry the failed operation.
+
+
RETRY_BACKOFF_MAX_MS_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
+
retry.backoff.max.ms
+
+
RETRY_BACKOFF_MAX_MS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
retry.backoff.max.ms
+
+
RETRY_BACKOFF_MAX_MS_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
retry.backoff.max.ms
+
+
RETRY_BACKOFF_MS_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
+
retry.backoff.ms
+
+
RETRY_BACKOFF_MS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
retry.backoff.ms
+
+
RETRY_BACKOFF_MS_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
retry.backoff.ms
+
+
RETRY_BACKOFF_MS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
retry.backoff.ms
+
+
retryOnQuotaViolation(boolean) - Method in class org.apache.kafka.clients.admin.CreatePartitionsOptions
+
+
Set to true if quota violation should be automatically retried.
+
+
retryOnQuotaViolation(boolean) - Method in class org.apache.kafka.clients.admin.CreateTopicsOptions
+
+
Set to true if quota violation should be automatically retried.
+
+
retryOnQuotaViolation(boolean) - Method in class org.apache.kafka.clients.admin.DeleteTopicsOptions
+
+
Set to true if quota violation should be automatically retried.
+
+
REUSE_KTABLE_SOURCE_TOPICS - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "topology.optimization" + for enabling the specific optimization that reuses source topic as changelog topic + for KTables.
+
+
reverseAll() - Method in interface org.apache.kafka.streams.state.ReadOnlyKeyValueStore
+
+
Return a reverse iterator over all keys in this store.
+
+
reverseRange(K, K) - Method in interface org.apache.kafka.streams.state.ReadOnlyKeyValueStore
+
+
Get a reverse iterator over a given range of keys.
+
+
ROCKS_DB - Enum constant in enum class org.apache.kafka.streams.kstream.Materialized.StoreType
+
 
+
ROCKS_DB - Static variable in class org.apache.kafka.streams.state.BuiltInDslStoreSuppliers
+
 
+
ROCKS_DB - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated.
+
+
ROCKSDB_CONFIG_SETTER_CLASS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
rocksdb.config.setter
+
+
RocksDBConfigSetter - Interface in org.apache.kafka.streams.state
+
+
An interface to that allows developers to customize the RocksDB settings for a given Store.
+
+
RocksDBDslStoreSuppliers() - Constructor for class org.apache.kafka.streams.state.BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers
+
 
+
ROUNDROBIN_ASSIGNOR_NAME - Static variable in class org.apache.kafka.clients.consumer.RoundRobinAssignor
+
 
+
RoundRobinAssignor - Class in org.apache.kafka.clients.consumer
+
+
The round robin assignor lays out all the available partitions and all the available consumers.
+
+
RoundRobinAssignor() - Constructor for class org.apache.kafka.clients.consumer.RoundRobinAssignor
+
 
+
RoundRobinPartitioner - Class in org.apache.kafka.clients.producer
+
+
The "Round-Robin" partitioner + + This partitioning strategy can be used when user wants + to distribute the writes to all partitions equally.
+
+
RoundRobinPartitioner() - Constructor for class org.apache.kafka.clients.producer.RoundRobinPartitioner
+
 
+
RUNNING - Enum constant in enum class org.apache.kafka.streams.KafkaStreams.State
+
 
+
+

S

+
+
SAFE_DOWNGRADE - Enum constant in enum class org.apache.kafka.clients.admin.FeatureUpdate.UpgradeType
+
 
+
salt() - Method in class org.apache.kafka.clients.admin.UserScramCredentialUpsertion
+
 
+
salt() - Method in class org.apache.kafka.common.security.scram.ScramCredential
+
+
Returns the salt used to process this credential using the SCRAM algorithm.
+
+
SampledStat - Class in org.apache.kafka.common.metrics.stats
+
+
A SampledStat records a single scalar value measured over one or more samples.
+
+
SampledStat(double) - Constructor for class org.apache.kafka.common.metrics.stats.SampledStat
+
 
+
samples() - Method in class org.apache.kafka.common.metrics.MetricConfig
+
 
+
samples(int) - Method in class org.apache.kafka.common.metrics.MetricConfig
+
 
+
SASL_CLIENT_CALLBACK_HANDLER_CLASS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_CLIENT_CALLBACK_HANDLER_CLASS_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_JAAS_CONFIG - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_JAAS_CONFIG_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_KERBEROS_KINIT_CMD - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_KERBEROS_KINIT_CMD_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_KERBEROS_MIN_TIME_BEFORE_RELOGIN - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_KERBEROS_MIN_TIME_BEFORE_RELOGIN_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_KERBEROS_SERVICE_NAME - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_KERBEROS_SERVICE_NAME_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_KERBEROS_TICKET_RENEW_JITTER - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_KERBEROS_TICKET_RENEW_JITTER_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_KERBEROS_TICKET_RENEW_WINDOW_FACTOR - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_KERBEROS_TICKET_RENEW_WINDOW_FACTOR_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_LOGIN_CALLBACK_HANDLER_CLASS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_LOGIN_CALLBACK_HANDLER_CLASS_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_LOGIN_CLASS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_LOGIN_CLASS_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_LOGIN_CONNECT_TIMEOUT_MS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_LOGIN_CONNECT_TIMEOUT_MS_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_LOGIN_READ_TIMEOUT_MS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_LOGIN_READ_TIMEOUT_MS_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_LOGIN_REFRESH_BUFFER_SECONDS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_LOGIN_REFRESH_BUFFER_SECONDS_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_LOGIN_REFRESH_MIN_PERIOD_SECONDS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_LOGIN_REFRESH_MIN_PERIOD_SECONDS_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_LOGIN_REFRESH_WINDOW_FACTOR - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_LOGIN_REFRESH_WINDOW_FACTOR_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_LOGIN_REFRESH_WINDOW_JITTER - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_LOGIN_REFRESH_WINDOW_JITTER_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_LOGIN_RETRY_BACKOFF_MAX_MS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_LOGIN_RETRY_BACKOFF_MAX_MS_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_LOGIN_RETRY_BACKOFF_MS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_LOGIN_RETRY_BACKOFF_MS_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_MECHANISM - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
+
SASL mechanism configuration - standard mechanism names are listed here.
+
+
SASL_MECHANISM_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_ALGORITHM - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_ALGORITHM_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_FILE - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_FILE_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_EXPECTED_AUDIENCE - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_EXPECTED_AUDIENCE_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_EXPECTED_ISSUER - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_EXPECTED_ISSUER_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_HEADER_URLENCODE - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_HEADER_URLENCODE_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_JWKS_ENDPOINT_URL - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_JWKS_ENDPOINT_URL_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_SCOPE - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_SCOPE_CLAIM_NAME - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_SCOPE_CLAIM_NAME_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_SCOPE_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_SUB_CLAIM_NAME - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_SUB_CLAIM_NAME_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL_DOC - Static variable in class org.apache.kafka.common.config.SaslConfigs
+
 
+
SASL_PLAINTEXT - Enum constant in enum class org.apache.kafka.common.security.auth.SecurityProtocol
+
+
SASL authenticated, non-encrypted channel
+
+
SASL_SSL - Enum constant in enum class org.apache.kafka.common.security.auth.SecurityProtocol
+
+
SASL authenticated, SSL channel
+
+
SaslAuthenticationContext - Class in org.apache.kafka.common.security.auth
+
 
+
SaslAuthenticationContext(SaslServer, SecurityProtocol, InetAddress, String) - Constructor for class org.apache.kafka.common.security.auth.SaslAuthenticationContext
+
 
+
SaslAuthenticationContext(SaslServer, SecurityProtocol, InetAddress, String, Optional<SSLSession>) - Constructor for class org.apache.kafka.common.security.auth.SaslAuthenticationContext
+
 
+
SaslAuthenticationException - Exception in org.apache.kafka.common.errors
+
+
This exception indicates that SASL authentication has failed.
+
+
SaslAuthenticationException(String) - Constructor for exception org.apache.kafka.common.errors.SaslAuthenticationException
+
 
+
SaslAuthenticationException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.SaslAuthenticationException
+
 
+
SaslConfigs - Class in org.apache.kafka.common.config
+
 
+
SaslConfigs() - Constructor for class org.apache.kafka.common.config.SaslConfigs
+
 
+
SaslExtensions - Class in org.apache.kafka.common.security.auth
+
+
A simple immutable value object class holding customizable SASL extensions.
+
+
SaslExtensions(Map<String, String>) - Constructor for class org.apache.kafka.common.security.auth.SaslExtensions
+
 
+
SaslExtensionsCallback - Class in org.apache.kafka.common.security.auth
+
+
Optional callback used for SASL mechanisms if any extensions need to be set + in the SASL exchange.
+
+
SaslExtensionsCallback() - Constructor for class org.apache.kafka.common.security.auth.SaslExtensionsCallback
+
 
+
SCALE_FIELD - Static variable in class org.apache.kafka.connect.data.Decimal
+
 
+
schedule(Duration, PunctuationType, Punctuator) - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
 
+
schedule(Duration, PunctuationType, Punctuator) - Method in interface org.apache.kafka.streams.processor.api.ProcessingContext
+
+
Schedule a periodic operation for processors.
+
+
schedule(Duration, PunctuationType, Punctuator) - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
schedule(Duration, PunctuationType, Punctuator) - Method in interface org.apache.kafka.streams.processor.ProcessorContext
+
+
Schedule a periodic operation for processors.
+
+
scheduledPunctuators() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
+
Get the punctuators scheduled so far.
+
+
scheduledPunctuators() - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
Get the punctuators scheduled so far.
+
+
scheduleNopPollTask() - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
schedulePollTask(Runnable) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
+
Schedule a task to be executed during a poll().
+
+
schema() - Method in class org.apache.kafka.connect.data.ConnectSchema
+
 
+
schema() - Method in class org.apache.kafka.connect.data.Field
+
+
Get the schema of this field
+
+
schema() - Method in interface org.apache.kafka.connect.data.Schema
+
+
Return a concrete instance of the Schema
+
+
schema() - Method in class org.apache.kafka.connect.data.SchemaAndValue
+
 
+
schema() - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
+
Return a concrete instance of the Schema specified by this builder
+
+
schema() - Method in class org.apache.kafka.connect.data.Struct
+
+
Get the schema for this Struct.
+
+
schema() - Method in interface org.apache.kafka.connect.header.Header
+
+
Return the Schema associated with this header, if there is one.
+
+
schema(int) - Static method in class org.apache.kafka.connect.data.Decimal
+
 
+
Schema - Interface in org.apache.kafka.connect.data
+
+
+ Definition of an abstract data type.
+
+
SCHEMA - Static variable in class org.apache.kafka.connect.data.Date
+
 
+
SCHEMA - Static variable in class org.apache.kafka.connect.data.Time
+
 
+
SCHEMA - Static variable in class org.apache.kafka.connect.data.Timestamp
+
 
+
Schema.Type - Enum Class in org.apache.kafka.connect.data
+
+
The type of a schema.
+
+
SchemaAndValue - Class in org.apache.kafka.connect.data
+
+
A composite containing a Schema and associated value
+
+
SchemaAndValue(Schema, Object) - Constructor for class org.apache.kafka.connect.data.SchemaAndValue
+
 
+
SchemaBuilder - Class in org.apache.kafka.connect.data
+
+
+ SchemaBuilder provides a fluent API for constructing Schema objects.
+
+
SchemaBuilder(Schema.Type) - Constructor for class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
SchemaBuilderException - Exception in org.apache.kafka.connect.errors
+
+
Indicates an error while building a schema via SchemaBuilder
+
+
SchemaBuilderException(String) - Constructor for exception org.apache.kafka.connect.errors.SchemaBuilderException
+
 
+
SchemaBuilderException(String, Throwable) - Constructor for exception org.apache.kafka.connect.errors.SchemaBuilderException
+
 
+
SchemaBuilderException(Throwable) - Constructor for exception org.apache.kafka.connect.errors.SchemaBuilderException
+
 
+
SchemaProjector - Class in org.apache.kafka.connect.data
+
+
+ SchemaProjector is a utility to project a value between compatible schemas and throw exceptions + when non compatible schemas are provided.
+
+
SchemaProjector() - Constructor for class org.apache.kafka.connect.data.SchemaProjector
+
 
+
SchemaProjectorException - Exception in org.apache.kafka.connect.errors
+
+
Indicates an error while projecting a schema via SchemaProjector
+
+
SchemaProjectorException(String) - Constructor for exception org.apache.kafka.connect.errors.SchemaProjectorException
+
 
+
SchemaProjectorException(String, Throwable) - Constructor for exception org.apache.kafka.connect.errors.SchemaProjectorException
+
 
+
SchemaProjectorException(Throwable) - Constructor for exception org.apache.kafka.connect.errors.SchemaProjectorException
+
 
+
SchemaSourceConnector - Class in org.apache.kafka.connect.tools
+
+
A simple source connector that is capable of producing static data with + Struct schemas.
+
+
SchemaSourceConnector() - Constructor for class org.apache.kafka.connect.tools.SchemaSourceConnector
+
 
+
SchemaSourceTask - Class in org.apache.kafka.connect.tools
+
+
Task implementation for SchemaSourceConnector.
+
+
SchemaSourceTask() - Constructor for class org.apache.kafka.connect.tools.SchemaSourceTask
+
 
+
schemaType(Class<?>) - Static method in class org.apache.kafka.connect.data.ConnectSchema
+
+
Get the Schema.Type associated with the given class.
+
+
scope() - Method in interface org.apache.kafka.common.security.oauthbearer.OAuthBearerToken
+
+
The token's scope of access, as per + RFC 6749 Section + 1.4
+
+
SCOPE_CONFIG - Static variable in class org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler
+
 
+
SCOPE_DOC - Static variable in class org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler
+
 
+
SCRAM_SHA_256 - Enum constant in enum class org.apache.kafka.clients.admin.ScramMechanism
+
 
+
SCRAM_SHA_512 - Enum constant in enum class org.apache.kafka.clients.admin.ScramMechanism
+
 
+
scramCredential() - Method in class org.apache.kafka.common.security.scram.ScramCredentialCallback
+
+
Returns the SCRAM credential if set on this instance.
+
+
scramCredential(ScramCredential) - Method in class org.apache.kafka.common.security.scram.ScramCredentialCallback
+
+
Sets the SCRAM credential for this instance.
+
+
ScramCredential - Class in org.apache.kafka.common.security.scram
+
+
SCRAM credential class that encapsulates the credential data persisted for each user that is + accessible to the server.
+
+
ScramCredential(byte[], byte[], byte[], int) - Constructor for class org.apache.kafka.common.security.scram.ScramCredential
+
+
Constructs a new credential.
+
+
ScramCredentialCallback - Class in org.apache.kafka.common.security.scram
+
+
Callback used for SCRAM mechanisms.
+
+
ScramCredentialCallback() - Constructor for class org.apache.kafka.common.security.scram.ScramCredentialCallback
+
 
+
ScramCredentialInfo - Class in org.apache.kafka.clients.admin
+
+
Mechanism and iterations for a SASL/SCRAM credential associated with a user.
+
+
ScramCredentialInfo(ScramMechanism, int) - Constructor for class org.apache.kafka.clients.admin.ScramCredentialInfo
+
 
+
ScramExtensionsCallback - Class in org.apache.kafka.common.security.scram
+
+
Optional callback used for SCRAM mechanisms if any extensions need to be set + in the SASL/SCRAM exchange.
+
+
ScramExtensionsCallback() - Constructor for class org.apache.kafka.common.security.scram.ScramExtensionsCallback
+
 
+
ScramLoginModule - Class in org.apache.kafka.common.security.scram
+
 
+
ScramLoginModule() - Constructor for class org.apache.kafka.common.security.scram.ScramLoginModule
+
 
+
ScramMechanism - Enum Class in org.apache.kafka.clients.admin
+
+
Representation of a SASL/SCRAM Mechanism.
+
+
SECURITY_PROTOCOL_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
SECURITY_PROTOCOL_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
security.protocol
+
+
SECURITY_PROVIDERS_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
+
security.providers
+
+
SECURITY_PROVIDERS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
security.providers
+
+
SECURITY_PROVIDERS_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
security.providers
+
+
SECURITY_PROVIDERS_CONFIG - Static variable in class org.apache.kafka.common.config.SecurityConfig
+
 
+
SECURITY_PROVIDERS_DOC - Static variable in class org.apache.kafka.common.config.SecurityConfig
+
 
+
SecurityConfig - Class in org.apache.kafka.common.config
+
+
Contains the common security config for SSL and SASL
+
+
SecurityConfig() - Constructor for class org.apache.kafka.common.config.SecurityConfig
+
 
+
SecurityDisabledException - Exception in org.apache.kafka.common.errors
+
+
An error indicating that security is disabled on the broker.
+
+
SecurityDisabledException(String) - Constructor for exception org.apache.kafka.common.errors.SecurityDisabledException
+
 
+
SecurityDisabledException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.SecurityDisabledException
+
 
+
securityProtocol() - Method in class org.apache.kafka.common.Endpoint
+
+
Returns the security protocol of this endpoint.
+
+
securityProtocol() - Method in interface org.apache.kafka.common.security.auth.AuthenticationContext
+
+
Underlying security protocol of the authentication session.
+
+
securityProtocol() - Method in class org.apache.kafka.common.security.auth.PlaintextAuthenticationContext
+
 
+
securityProtocol() - Method in class org.apache.kafka.common.security.auth.SaslAuthenticationContext
+
 
+
securityProtocol() - Method in class org.apache.kafka.common.security.auth.SslAuthenticationContext
+
 
+
securityProtocol() - Method in interface org.apache.kafka.server.authorizer.AuthorizableRequestContext
+
+
Returns the security protocol for the listener on which request was received.
+
+
SecurityProtocol - Enum Class in org.apache.kafka.common.security.auth
+
 
+
SecurityProviderCreator - Interface in org.apache.kafka.common.security.auth
+
+
An interface for generating security providers.
+
+
seek(TopicPartition, long) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
seek(TopicPartition, long) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Overrides the fetch offsets that the consumer will use on the next poll(timeout).
+
+
seek(TopicPartition, long) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
seek(TopicPartition, OffsetAndMetadata) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
seek(TopicPartition, OffsetAndMetadata) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Overrides the fetch offsets that the consumer will use on the next poll(timeout).
+
+
seek(TopicPartition, OffsetAndMetadata) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
seekToBeginning(Collection<TopicPartition>) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
seekToBeginning(Collection<TopicPartition>) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Seek to the first offset for each of the given partitions.
+
+
seekToBeginning(Collection<TopicPartition>) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
seekToEnd(Collection<TopicPartition>) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
seekToEnd(Collection<TopicPartition>) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Seek to the last offset for each of the given partitions.
+
+
seekToEnd(Collection<TopicPartition>) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
SEGMENT_BYTES_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
SEGMENT_BYTES_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
SEGMENT_INDEX_BYTES_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
SEGMENT_INDEX_BYTES_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
SEGMENT_JITTER_MS_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
SEGMENT_JITTER_MS_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
SEGMENT_MS_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
SEGMENT_MS_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
segmentIntervalMs() - Method in interface org.apache.kafka.streams.state.SessionBytesStoreSupplier
+
+
The size of a segment, in milliseconds.
+
+
segmentIntervalMs() - Method in interface org.apache.kafka.streams.state.WindowBytesStoreSupplier
+
+
The size of the segments (in milliseconds) the store has.
+
+
segmentLeaderEpochs() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata
+
 
+
segmentSizeInBytes() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata
+
 
+
selectKey(KeyValueMapper<? super K, ? super V, ? extends KOut>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Create a new KStream that consists of all records of this stream but with a modified key.
+
+
selectKey(KeyValueMapper<? super K, ? super V, ? extends KOut>, Named) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
send(ProducerRecord<K, V>) - Method in class org.apache.kafka.clients.producer.KafkaProducer
+
+
Asynchronously send a record to a topic.
+
+
send(ProducerRecord<K, V>) - Method in class org.apache.kafka.clients.producer.MockProducer
+
+
Adds the record to the list of sent records.
+
+
send(ProducerRecord<K, V>) - Method in interface org.apache.kafka.clients.producer.Producer
+
+ +
+
send(ProducerRecord<K, V>, Callback) - Method in class org.apache.kafka.clients.producer.KafkaProducer
+
+
Asynchronously send a record to a topic and invoke the provided callback when the send has been acknowledged.
+
+
send(ProducerRecord<K, V>, Callback) - Method in class org.apache.kafka.clients.producer.MockProducer
+
+
Adds the record to the list of sent records.
+
+
send(ProducerRecord<K, V>, Callback) - Method in interface org.apache.kafka.clients.producer.Producer
+
+ +
+
SEND_BUFFER_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
 
+
SEND_BUFFER_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
send.buffer.bytes
+
+
SEND_BUFFER_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
send.buffer.bytes
+
+
SEND_BUFFER_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
send.buffer.bytes
+
+
sendException - Variable in class org.apache.kafka.clients.producer.MockProducer
+
 
+
sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata>, ConsumerGroupMetadata) - Method in class org.apache.kafka.clients.producer.KafkaProducer
+
+
Sends a list of specified offsets to the consumer group coordinator, and also marks + those offsets as part of the current transaction.
+
+
sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata>, ConsumerGroupMetadata) - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata>, ConsumerGroupMetadata) - Method in interface org.apache.kafka.clients.producer.Producer
+
+ +
+
sendOffsetsToTransactionException - Variable in class org.apache.kafka.clients.producer.MockProducer
+
 
+
sensor(String) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Get or create a sensor with the given unique name and no parent sensors.
+
+
sensor(String, MetricConfig, long, Sensor...) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Get or create a sensor with the given unique name and zero or more parent sensors.
+
+
sensor(String, MetricConfig, long, Sensor.RecordingLevel, Sensor...) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Get or create a sensor with the given unique name and zero or more parent sensors.
+
+
sensor(String, MetricConfig, Sensor...) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Get or create a sensor with the given unique name and zero or more parent sensors.
+
+
sensor(String, MetricConfig, Sensor.RecordingLevel, Sensor...) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Get or create a sensor with the given unique name and zero or more parent sensors.
+
+
sensor(String, Sensor...) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Get or create a sensor with the given unique name and zero or more parent sensors.
+
+
sensor(String, Sensor.RecordingLevel) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Get or create a sensor with the given unique name and no parent sensors and with a given + recording level.
+
+
sensor(String, Sensor.RecordingLevel, Sensor...) - Method in class org.apache.kafka.common.metrics.Metrics
+
+
Get or create a sensor with the given unique name and zero or more parent sensors.
+
+
Sensor - Class in org.apache.kafka.common.metrics
+
+
A sensor applies a continuous sequence of numerical values to a set of associated metrics.
+
+
Sensor.RecordingLevel - Enum Class in org.apache.kafka.common.metrics
+
 
+
sentOffsets() - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
SEPARATOR_CONFIG - Static variable in class org.apache.kafka.connect.mirror.DefaultReplicationPolicy
+
 
+
SEPARATOR_DEFAULT - Static variable in class org.apache.kafka.connect.mirror.DefaultReplicationPolicy
+
 
+
Serde<T> - Interface in org.apache.kafka.common.serialization
+
+
The interface for wrapping a serializer and deserializer for the given data type.
+
+
serdeFrom(Class<T>) - Static method in class org.apache.kafka.common.serialization.Serdes
+
 
+
serdeFrom(Serializer<T>, Deserializer<T>) - Static method in class org.apache.kafka.common.serialization.Serdes
+
+
Construct a serde object from separate serializer and deserializer
+
+
Serdes - Class in org.apache.kafka.common.serialization
+
+
Factory for creating serializers / deserializers.
+
+
Serdes() - Constructor for class org.apache.kafka.common.serialization.Serdes
+
 
+
Serdes.BooleanSerde - Class in org.apache.kafka.common.serialization
+
 
+
Serdes.ByteArraySerde - Class in org.apache.kafka.common.serialization
+
 
+
Serdes.ByteBufferSerde - Class in org.apache.kafka.common.serialization
+
 
+
Serdes.BytesSerde - Class in org.apache.kafka.common.serialization
+
 
+
Serdes.DoubleSerde - Class in org.apache.kafka.common.serialization
+
 
+
Serdes.FloatSerde - Class in org.apache.kafka.common.serialization
+
 
+
Serdes.IntegerSerde - Class in org.apache.kafka.common.serialization
+
 
+
Serdes.ListSerde<Inner> - Class in org.apache.kafka.common.serialization
+
 
+
Serdes.LongSerde - Class in org.apache.kafka.common.serialization
+
 
+
Serdes.ShortSerde - Class in org.apache.kafka.common.serialization
+
 
+
Serdes.StringSerde - Class in org.apache.kafka.common.serialization
+
 
+
Serdes.UUIDSerde - Class in org.apache.kafka.common.serialization
+
 
+
Serdes.VoidSerde - Class in org.apache.kafka.common.serialization
+
 
+
Serdes.WrapperSerde<T> - Class in org.apache.kafka.common.serialization
+
 
+
SerializationException - Exception in org.apache.kafka.common.errors
+
+
Any exception during serialization in the producer
+
+
SerializationException() - Constructor for exception org.apache.kafka.common.errors.SerializationException
+
 
+
SerializationException(String) - Constructor for exception org.apache.kafka.common.errors.SerializationException
+
 
+
SerializationException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.SerializationException
+
 
+
SerializationException(Throwable) - Constructor for exception org.apache.kafka.common.errors.SerializationException
+
 
+
serialize(String, byte[]) - Method in class org.apache.kafka.common.serialization.ByteArraySerializer
+
 
+
serialize(String, Boolean) - Method in class org.apache.kafka.common.serialization.BooleanSerializer
+
 
+
serialize(String, Double) - Method in class org.apache.kafka.common.serialization.DoubleSerializer
+
 
+
serialize(String, Float) - Method in class org.apache.kafka.common.serialization.FloatSerializer
+
 
+
serialize(String, Integer) - Method in class org.apache.kafka.common.serialization.IntegerSerializer
+
 
+
serialize(String, Long) - Method in class org.apache.kafka.common.serialization.LongSerializer
+
 
+
serialize(String, Short) - Method in class org.apache.kafka.common.serialization.ShortSerializer
+
 
+
serialize(String, String) - Method in class org.apache.kafka.common.serialization.StringSerializer
+
 
+
serialize(String, Void) - Method in class org.apache.kafka.common.serialization.VoidSerializer
+
 
+
serialize(String, ByteBuffer) - Method in class org.apache.kafka.common.serialization.ByteBufferSerializer
+
 
+
serialize(String, List<Inner>) - Method in class org.apache.kafka.common.serialization.ListSerializer
+
 
+
serialize(String, UUID) - Method in class org.apache.kafka.common.serialization.UUIDSerializer
+
 
+
serialize(String, Headers, T) - Method in interface org.apache.kafka.common.serialization.Serializer
+
+
Convert data into a byte array.
+
+
serialize(String, Bytes) - Method in class org.apache.kafka.common.serialization.BytesSerializer
+
 
+
serialize(String, Windowed<T>) - Method in class org.apache.kafka.streams.kstream.SessionWindowedSerializer
+
 
+
serialize(String, Windowed<T>) - Method in class org.apache.kafka.streams.kstream.TimeWindowedSerializer
+
 
+
serialize(String, T) - Method in interface org.apache.kafka.common.serialization.Serializer
+
+
Convert data into a byte array.
+
+
serialize(KafkaPrincipal) - Method in interface org.apache.kafka.common.security.auth.KafkaPrincipalSerde
+
+
Serialize a KafkaPrincipal into byte array.
+
+
serializeBaseKey(String, Windowed<T>) - Method in class org.apache.kafka.streams.kstream.SessionWindowedSerializer
+
 
+
serializeBaseKey(String, Windowed<T>) - Method in class org.apache.kafka.streams.kstream.TimeWindowedSerializer
+
 
+
serializedKeySize() - Method in class org.apache.kafka.clients.consumer.ConsumerRecord
+
+
The size of the serialized, uncompressed key in bytes.
+
+
serializedKeySize() - Method in class org.apache.kafka.clients.producer.RecordMetadata
+
+
The size of the serialized, uncompressed key in bytes.
+
+
serializedValueSize() - Method in class org.apache.kafka.clients.consumer.ConsumerRecord
+
+
The size of the serialized, uncompressed value in bytes.
+
+
serializedValueSize() - Method in class org.apache.kafka.clients.producer.RecordMetadata
+
+
The size of the serialized, uncompressed value in bytes.
+
+
serializer() - Method in interface org.apache.kafka.common.serialization.Serde
+
 
+
serializer() - Method in class org.apache.kafka.common.serialization.Serdes.WrapperSerde
+
 
+
Serializer<T> - Interface in org.apache.kafka.common.serialization
+
+
An interface for converting objects to bytes.
+
+
server() - Method in class org.apache.kafka.common.security.auth.SaslAuthenticationContext
+
 
+
serverKey() - Method in class org.apache.kafka.common.security.scram.ScramCredential
+
+
Server key computed from the client password using the SCRAM algorithm.
+
+
serviceName() - Method in interface org.apache.kafka.common.security.auth.Login
+
+
Returns the service name to be used for SASL.
+
+
session() - Method in class org.apache.kafka.common.security.auth.SslAuthenticationContext
+
 
+
SESSION_TIMEOUT_MS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
session.timeout.ms
+
+
SessionBytesStoreSupplier - Interface in org.apache.kafka.streams.state
+
+
A store supplier that can be used to create one or more SessionStore<Byte, byte[]> instances.
+
+
sessionStore() - Static method in class org.apache.kafka.streams.state.QueryableStoreTypes
+
+ +
+
sessionStore(DslSessionParams) - Method in enum class org.apache.kafka.streams.kstream.Materialized.StoreType
+
 
+
sessionStore(DslSessionParams) - Method in class org.apache.kafka.streams.state.BuiltInDslStoreSuppliers.InMemoryDslStoreSuppliers
+
 
+
sessionStore(DslSessionParams) - Method in class org.apache.kafka.streams.state.BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers
+
 
+
sessionStore(DslSessionParams) - Method in interface org.apache.kafka.streams.state.DslStoreSuppliers
+
 
+
SessionStore<K,AGG> - Interface in org.apache.kafka.streams.state
+
+
Interface for storing the aggregated values of sessions.
+
+
sessionStoreBuilder(SessionBytesStoreSupplier, Serde<K>, Serde<V>) - Static method in class org.apache.kafka.streams.state.Stores
+
+
Creates a StoreBuilder that can be used to build a SessionStore.
+
+
SessionWindowedCogroupedKStream<K,V> - Interface in org.apache.kafka.streams.kstream
+
+
Same as a SessionWindowedKStream, however, for multiple co-grouped KStreams.
+
+
SessionWindowedDeserializer<T> - Class in org.apache.kafka.streams.kstream
+
 
+
SessionWindowedDeserializer() - Constructor for class org.apache.kafka.streams.kstream.SessionWindowedDeserializer
+
 
+
SessionWindowedDeserializer(Deserializer<T>) - Constructor for class org.apache.kafka.streams.kstream.SessionWindowedDeserializer
+
 
+
SessionWindowedKStream<K,V> - Interface in org.apache.kafka.streams.kstream
+
+
SessionWindowedKStream is an abstraction of a windowed record stream of key-value pairs.
+
+
SessionWindowedSerde() - Constructor for class org.apache.kafka.streams.kstream.WindowedSerdes.SessionWindowedSerde
+
 
+
SessionWindowedSerde(Serde<T>) - Constructor for class org.apache.kafka.streams.kstream.WindowedSerdes.SessionWindowedSerde
+
 
+
sessionWindowedSerdeFrom(Class<T>) - Static method in class org.apache.kafka.streams.kstream.WindowedSerdes
+
+
Construct a SessionWindowedSerde object for the specified inner class type.
+
+
SessionWindowedSerializer<T> - Class in org.apache.kafka.streams.kstream
+
 
+
SessionWindowedSerializer() - Constructor for class org.apache.kafka.streams.kstream.SessionWindowedSerializer
+
 
+
SessionWindowedSerializer(Serializer<T>) - Constructor for class org.apache.kafka.streams.kstream.SessionWindowedSerializer
+
 
+
SessionWindows - Class in org.apache.kafka.streams.kstream
+
+
A session based window specification used for aggregating events into sessions.
+
+
SET - Enum constant in enum class org.apache.kafka.clients.admin.AlterConfigOp.OpType
+
+
Set the value of the configuration entry.
+
+
setAcknowledgementCommitCallback(AcknowledgementCommitCallback) - Method in class org.apache.kafka.clients.consumer.KafkaShareConsumer
+
+
Sets the acknowledgement commit callback which can be used to handle acknowledgement completion.
+
+
setAcknowledgementCommitCallback(AcknowledgementCommitCallback) - Method in class org.apache.kafka.clients.consumer.MockShareConsumer
+
 
+
setAcknowledgementCommitCallback(AcknowledgementCommitCallback) - Method in interface org.apache.kafka.clients.consumer.ShareConsumer
+
 
+
setClientInstanceId(Uuid) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
setClientInstanceId(Uuid) - Method in class org.apache.kafka.clients.consumer.MockShareConsumer
+
 
+
setClientInstanceId(Uuid) - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
setClusterId(Optional<String>) - Method in class org.apache.kafka.clients.admin.AddRaftVoterOptions
+
 
+
setClusterId(Optional<String>) - Method in class org.apache.kafka.clients.admin.RemoveRaftVoterOptions
+
 
+
setConfig(String, Options, Map<String, Object>) - Method in interface org.apache.kafka.streams.state.RocksDBConfigSetter
+
+
Set the rocks db options for the provided storeName.
+
+
setCurrentStreamTimeMs(long) - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
 
+
setCurrentStreamTimeMs(long) - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
setCurrentSystemTimeMs(long) - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
 
+
setCurrentSystemTimeMs(long) - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
setExpiryTimestamp(long) - Method in class org.apache.kafka.common.security.token.delegation.TokenInformation
+
 
+
setGlobalResult(QueryResult<R>) - Method in class org.apache.kafka.streams.query.StateQueryResult
+
+
Set the result for a global store query.
+
+
setGlobalStateRestoreListener(StateRestoreListener) - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Set the listener which is triggered whenever a StateStore is being restored in order to resume + processing.
+
+
setGroupInstanceId(Optional<String>) - Method in class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription
+
 
+
setHeaders(Headers) - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
The context exposes this metadata for use in the processor.
+
+
setIsChangelogTopic(boolean) - Method in class org.apache.kafka.streams.kstream.TimeWindowedDeserializer
+
 
+
setMaxPollRecords(long) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
+
Sets the maximum number of records returned in a single call to MockConsumer.poll(Duration).
+
+
setMockMetrics(MetricName, Metric) - Method in class org.apache.kafka.clients.producer.MockProducer
+
+
Set a mock metric for testing purpose
+
+
setOffset(long) - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
The context exposes this metadata for use in the processor.
+
+
setOffsetsException(KafkaException) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
setPartition(int) - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
The context exposes this metadata for use in the processor.
+
+
setPollException(KafkaException) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
setPosition(Position) - Method in interface org.apache.kafka.streams.query.QueryResult
+
+
Used by stores to report what exact position in the store's history it was at when it + executed the query.
+
+
setRecordMetadata(String, int, long) - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
+
The context exposes these metadata for use in the processor.
+
+
setRecordMetadata(String, int, long, Headers, long) - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
The context exposes these metadata for use in the processor.
+
+
setRecordTimestamp(long) - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
The context exposes this metadata for use in the processor.
+
+
setStandbyUpdateListener(StandbyUpdateListener) - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Set the listener which is triggered whenever a standby task is updated
+
+
setStateListener(KafkaStreams.StateListener) - Method in class org.apache.kafka.streams.KafkaStreams
+
+
An app can set a single KafkaStreams.StateListener so that the app is notified when state changes.
+
+
setTaskId(TaskId) - Method in exception org.apache.kafka.streams.errors.StreamsException
+
 
+
setTopic(String) - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
The context exposes this metadata for use in the processor.
+
+
setUncaughtExceptionHandler(StreamsUncaughtExceptionHandler) - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Set the handler invoked when an internal stream thread + throws an unexpected exception.
+
+
SHARE - Enum constant in enum class org.apache.kafka.common.GroupType
+
 
+
SHARE_ACKNOWLEDGEMENT_MODE_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
share.acknowledgement.mode
+
+
ShareConsumer<K,V> - Interface in org.apache.kafka.clients.consumer
+
+
A client that consumes records from a Kafka cluster using a share group.
+
+
ShareGroupDescription - Class in org.apache.kafka.clients.admin
+
+
A detailed description of a single share group in the cluster.
+
+
ShareGroupDescription(String, Collection<ShareMemberDescription>, GroupState, Node, int, int) - Constructor for class org.apache.kafka.clients.admin.ShareGroupDescription
+
 
+
ShareGroupDescription(String, Collection<ShareMemberDescription>, GroupState, Node, int, int, Set<AclOperation>) - Constructor for class org.apache.kafka.clients.admin.ShareGroupDescription
+
 
+
ShareGroupPartitionAssignor - Interface in org.apache.kafka.coordinator.group.api.assignor
+
+
Server-side partition assignor for share groups used by the GroupCoordinator.
+
+
ShareMemberAssignment - Class in org.apache.kafka.clients.admin
+
+
A description of the assignments of a specific share group member.
+
+
ShareMemberAssignment(Set<TopicPartition>) - Constructor for class org.apache.kafka.clients.admin.ShareMemberAssignment
+
+
Creates an instance with the specified parameters.
+
+
ShareMemberDescription - Class in org.apache.kafka.clients.admin
+
+
A detailed description of a single share group member in the cluster.
+
+
ShareMemberDescription(String, String, String, ShareMemberAssignment, int) - Constructor for class org.apache.kafka.clients.admin.ShareMemberDescription
+
 
+
ShareSessionLimitReachedException - Exception in org.apache.kafka.common.errors
+
+
Indicates that a new share session could not be opened because the limit of share sessions has been reached.
+
+
ShareSessionLimitReachedException(String) - Constructor for exception org.apache.kafka.common.errors.ShareSessionLimitReachedException
+
 
+
ShareSessionNotFoundException - Exception in org.apache.kafka.common.errors
+
+
Thrown when the share session was not found.
+
+
ShareSessionNotFoundException(String) - Constructor for exception org.apache.kafka.common.errors.ShareSessionNotFoundException
+
 
+
Short() - Static method in class org.apache.kafka.common.serialization.Serdes
+
+
A serde for nullable Short type.
+
+
SHORT - Enum constant in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigType
+
 
+
SHORT - Enum constant in enum class org.apache.kafka.common.config.ConfigDef.Type
+
+
Used for numerical values within the Java Short range.
+
+
SHORT - Enum constant in enum class org.apache.kafka.common.config.ConfigDef.Width
+
 
+
ShortDeserializer - Class in org.apache.kafka.common.serialization
+
 
+
ShortDeserializer() - Constructor for class org.apache.kafka.common.serialization.ShortDeserializer
+
 
+
ShortSerde() - Constructor for class org.apache.kafka.common.serialization.Serdes.ShortSerde
+
 
+
ShortSerializer - Class in org.apache.kafka.common.serialization
+
 
+
ShortSerializer() - Constructor for class org.apache.kafka.common.serialization.ShortSerializer
+
 
+
shouldBeRebuilt(Map<String, Object>) - Method in interface org.apache.kafka.common.security.auth.SslEngineFactory
+
+
Returns true if SSLEngine needs to be rebuilt.
+
+
shouldListInternal() - Method in class org.apache.kafka.clients.admin.ListTopicsOptions
+
+
Return true if we should list internal topics.
+
+
shouldRebalance() - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
shouldRecord() - Method in class org.apache.kafka.common.metrics.Sensor
+
 
+
shouldRecord(int) - Method in enum class org.apache.kafka.common.metrics.Sensor.RecordingLevel
+
 
+
shouldRetryOnQuotaViolation() - Method in class org.apache.kafka.clients.admin.CreatePartitionsOptions
+
+
Returns true if quota violation should be automatically retried.
+
+
shouldRetryOnQuotaViolation() - Method in class org.apache.kafka.clients.admin.CreateTopicsOptions
+
+
Returns true if quota violation should be automatically retried.
+
+
shouldRetryOnQuotaViolation() - Method in class org.apache.kafka.clients.admin.DeleteTopicsOptions
+
+
Returns true if quota violation should be automatically retried.
+
+
shouldValidateOnly() - Method in class org.apache.kafka.clients.admin.AlterConfigsOptions
+
+
Return true if the request should be validated without altering the configs.
+
+
shouldValidateOnly() - Method in class org.apache.kafka.clients.admin.CreateTopicsOptions
+
+
Return true if the request should be validated without creating the topic.
+
+
SHUTDOWN_APPLICATION - Enum constant in enum class org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse
+
+
Try to shut down the whole application.
+
+
SHUTDOWN_CLIENT - Enum constant in enum class org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse
+
+
Shut down the client.
+
+
shutDownWhenFull() - Method in interface org.apache.kafka.streams.kstream.Suppressed.BufferConfig
+
+
Set the buffer to gracefully shut down the application when any of its constraints are violated
+
+
SimpleHeaderConverter - Class in org.apache.kafka.connect.storage
+
+
A HeaderConverter that serializes header values as strings and that deserializes header values to the most appropriate + numeric, boolean, array, or map representation.
+
+
SimpleHeaderConverter() - Constructor for class org.apache.kafka.connect.storage.SimpleHeaderConverter
+
 
+
SimpleRate - Class in org.apache.kafka.common.metrics.stats
+
+
A simple rate the rate is incrementally calculated + based on the elapsed time between the earliest reading + and now.
+
+
SimpleRate() - Constructor for class org.apache.kafka.common.metrics.stats.SimpleRate
+
 
+
SINGLE_STORE_SELF_JOIN - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "topology.optimization" + for enabling the optimization that optimizes inner stream-stream joins into self-joins when + both arguments are the same stream.
+
+
SINK - Enum constant in enum class org.apache.kafka.connect.health.ConnectorType
+
+
Identifies a sink connector
+
+
SinkConnector - Class in org.apache.kafka.connect.sink
+
+
SinkConnectors implement the Connector interface to send Kafka data to another system.
+
+
SinkConnector() - Constructor for class org.apache.kafka.connect.sink.SinkConnector
+
 
+
SinkConnectorContext - Interface in org.apache.kafka.connect.sink
+
+
A context to allow a SinkConnector to interact with the Kafka Connect runtime.
+
+
SinkRecord - Class in org.apache.kafka.connect.sink
+
+
SinkRecord is a ConnectRecord that has been read from Kafka and includes the original Kafka record's + topic, partition and offset (before any transformations have been applied) + in addition to the standard fields.
+
+
SinkRecord(String, int, Schema, Object, Schema, Object, long) - Constructor for class org.apache.kafka.connect.sink.SinkRecord
+
 
+
SinkRecord(String, int, Schema, Object, Schema, Object, long, Long, TimestampType) - Constructor for class org.apache.kafka.connect.sink.SinkRecord
+
 
+
SinkRecord(String, int, Schema, Object, Schema, Object, long, Long, TimestampType, Iterable<Header>) - Constructor for class org.apache.kafka.connect.sink.SinkRecord
+
 
+
SinkRecord(String, int, Schema, Object, Schema, Object, long, Long, TimestampType, Iterable<Header>, String, Integer, long) - Constructor for class org.apache.kafka.connect.sink.SinkRecord
+
+
This constructor is intended for use by the Connect runtime only and plugins (sink connectors or transformations) + should not use this directly outside testing code.
+
+
SinkTask - Class in org.apache.kafka.connect.sink
+
+
SinkTask is a Task that takes records loaded from Kafka and sends them to another system.
+
+
SinkTask() - Constructor for class org.apache.kafka.connect.sink.SinkTask
+
 
+
SinkTaskContext - Interface in org.apache.kafka.connect.sink
+
+
Context passed to SinkTasks, allowing them to access utilities in the Kafka Connect runtime.
+
+
size() - Method in class org.apache.kafka.clients.admin.ReplicaInfo
+
+
The total size of the log segments in this replica in bytes.
+
+
size() - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
size() - Method in interface org.apache.kafka.connect.header.Headers
+
+
Get the number of headers in this object.
+
+
size() - Method in class org.apache.kafka.streams.kstream.JoinWindows
+
 
+
size() - Method in class org.apache.kafka.streams.kstream.TimeWindows
+
 
+
size() - Method in class org.apache.kafka.streams.kstream.UnlimitedWindows
+
+
Return the size of the specified windows in milliseconds.
+
+
size() - Method in class org.apache.kafka.streams.kstream.Windows
+
+
Return the size of the specified windows in milliseconds.
+
+
sizeMs - Variable in class org.apache.kafka.streams.kstream.TimeWindows
+
+
The size of the windows in milliseconds.
+
+
skipCache() - Method in class org.apache.kafka.streams.query.KeyQuery
+
+
Specifies that the cache should be skipped during query evaluation.
+
+
skipCache() - Method in class org.apache.kafka.streams.query.TimestampedKeyQuery
+
+
Specifies that the cache should be skipped during query evaluation.
+
+
SlidingWindows - Class in org.apache.kafka.streams.kstream
+
+
A sliding window used for aggregating events.
+
+
SnapshotNotFoundException - Exception in org.apache.kafka.common.errors
+
 
+
SnapshotNotFoundException(String) - Constructor for exception org.apache.kafka.common.errors.SnapshotNotFoundException
+
 
+
SnapshotNotFoundException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.SnapshotNotFoundException
+
 
+
SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
+
socket.connection.setup.timeout.max.ms
+
+
SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
socket.connection.setup.timeout.max.ms
+
+
SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
socket.connection.setup.timeout.max.ms
+
+
SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG - Static variable in class org.apache.kafka.clients.admin.AdminClientConfig
+
+
socket.connection.setup.timeout.ms
+
+
SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
socket.connection.setup.timeout.ms
+
+
SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
socket.connection.setup.timeout.ms
+
+
source() - Method in class org.apache.kafka.clients.admin.ConfigEntry.ConfigSynonym
+
+
Returns the source of this configuration.
+
+
source() - Method in class org.apache.kafka.clients.admin.ConfigEntry
+
+
Return the source of this configuration entry.
+
+
source() - Method in class org.apache.kafka.connect.mirror.SourceAndTarget
+
 
+
source() - Method in interface org.apache.kafka.streams.TopologyDescription.GlobalStore
+
+
The source node reading from a "global" topic.
+
+
SOURCE - Enum constant in enum class org.apache.kafka.connect.health.ConnectorType
+
+
Identifies a source connector
+
+
SOURCE_CLUSTER_ALIAS_CONFIG - Static variable in class org.apache.kafka.connect.mirror.IdentityReplicationPolicy
+
 
+
SOURCE_CLUSTER_ALIAS_KEY - Static variable in class org.apache.kafka.connect.mirror.Heartbeat
+
 
+
SourceAndTarget - Class in org.apache.kafka.connect.mirror
+
+
Directional pair of clusters, where source is mirrored to target.
+
+
SourceAndTarget(String, String) - Constructor for class org.apache.kafka.connect.mirror.SourceAndTarget
+
 
+
sourceClusterAlias() - Method in class org.apache.kafka.connect.mirror.Heartbeat
+
 
+
SourceConnector - Class in org.apache.kafka.connect.source
+
+
SourceConnectors implement the connector interface to pull data from another system and send + it to Kafka.
+
+
SourceConnector() - Constructor for class org.apache.kafka.connect.source.SourceConnector
+
 
+
SourceConnectorContext - Interface in org.apache.kafka.connect.source
+
+
A context to allow a SourceConnector to interact with the Kafka Connect runtime.
+
+
sourceOffset() - Method in class org.apache.kafka.connect.source.SourceRecord
+
 
+
sourcePartition() - Method in class org.apache.kafka.connect.source.SourceRecord
+
 
+
sourceRawKey() - Method in interface org.apache.kafka.streams.errors.ErrorHandlerContext
+
+
Return the non-deserialized byte[] of the input message key if the context has been triggered by a message.
+
+
sourceRawKey() - Method in interface org.apache.kafka.streams.processor.RecordContext
+
+
Return the non-deserialized byte[] of the input message key if the context has been triggered by a message.
+
+
sourceRawValue() - Method in interface org.apache.kafka.streams.errors.ErrorHandlerContext
+
+
Return the non-deserialized byte[] of the input message value if the context has been triggered by a message.
+
+
sourceRawValue() - Method in interface org.apache.kafka.streams.processor.RecordContext
+
+
Return the non-deserialized byte[] of the input message value if the context has been triggered by a message.
+
+
SourceRecord - Class in org.apache.kafka.connect.source
+
+
+ SourceRecords are generated by SourceTasks and passed to Kafka Connect for storage in + Kafka.
+
+
SourceRecord(Map<String, ?>, Map<String, ?>, String, Integer, Schema, Object) - Constructor for class org.apache.kafka.connect.source.SourceRecord
+
 
+
SourceRecord(Map<String, ?>, Map<String, ?>, String, Integer, Schema, Object, Schema, Object) - Constructor for class org.apache.kafka.connect.source.SourceRecord
+
 
+
SourceRecord(Map<String, ?>, Map<String, ?>, String, Integer, Schema, Object, Schema, Object, Long) - Constructor for class org.apache.kafka.connect.source.SourceRecord
+
 
+
SourceRecord(Map<String, ?>, Map<String, ?>, String, Integer, Schema, Object, Schema, Object, Long, Iterable<Header>) - Constructor for class org.apache.kafka.connect.source.SourceRecord
+
 
+
SourceRecord(Map<String, ?>, Map<String, ?>, String, Schema, Object) - Constructor for class org.apache.kafka.connect.source.SourceRecord
+
 
+
SourceRecord(Map<String, ?>, Map<String, ?>, String, Schema, Object, Schema, Object) - Constructor for class org.apache.kafka.connect.source.SourceRecord
+
 
+
SourceTask - Class in org.apache.kafka.connect.source
+
+
SourceTask is a Task that pulls records from another system for storage in Kafka.
+
+
SourceTask() - Constructor for class org.apache.kafka.connect.source.SourceTask
+
 
+
SourceTask.TransactionBoundary - Enum Class in org.apache.kafka.connect.source
+
+
Represents the permitted values for the SourceTask.TRANSACTION_BOUNDARY_CONFIG property.
+
+
SourceTaskContext - Interface in org.apache.kafka.connect.source
+
+
SourceTaskContext is provided to SourceTasks to allow them to interact with the underlying + runtime.
+
+
sourceTopics() - Method in class org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription
+
+
The topics the topology reads from.
+
+
split() - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Split this KStream into different branches.
+
+
split(Named) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
SSL - Enum constant in enum class org.apache.kafka.common.security.auth.SecurityProtocol
+
+
SSL channel
+
+
SSL_CIPHER_SUITES_CONFIG - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_CIPHER_SUITES_DOC - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_ENABLED_PROTOCOLS_CONFIG - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_ENABLED_PROTOCOLS_DOC - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_DOC - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_ENGINE_FACTORY_CLASS_CONFIG - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_ENGINE_FACTORY_CLASS_DOC - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_KEY_PASSWORD_CONFIG - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_KEY_PASSWORD_DOC - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_KEYMANAGER_ALGORITHM_CONFIG - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_KEYMANAGER_ALGORITHM_DOC - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_KEYSTORE_CERTIFICATE_CHAIN_CONFIG - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_KEYSTORE_CERTIFICATE_CHAIN_DOC - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_KEYSTORE_KEY_CONFIG - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_KEYSTORE_KEY_DOC - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_KEYSTORE_LOCATION_CONFIG - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_KEYSTORE_LOCATION_DOC - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_KEYSTORE_PASSWORD_CONFIG - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_KEYSTORE_PASSWORD_DOC - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_KEYSTORE_TYPE_CONFIG - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_KEYSTORE_TYPE_DOC - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_PROTOCOL_CONFIG - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_PROTOCOL_DOC - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_PROVIDER_CONFIG - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_PROVIDER_DOC - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_SECURE_RANDOM_IMPLEMENTATION_CONFIG - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_SECURE_RANDOM_IMPLEMENTATION_DOC - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_TRUSTMANAGER_ALGORITHM_CONFIG - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_TRUSTMANAGER_ALGORITHM_DOC - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_TRUSTSTORE_CERTIFICATES_CONFIG - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_TRUSTSTORE_CERTIFICATES_DOC - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_TRUSTSTORE_LOCATION_CONFIG - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_TRUSTSTORE_LOCATION_DOC - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_TRUSTSTORE_PASSWORD_CONFIG - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_TRUSTSTORE_PASSWORD_DOC - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_TRUSTSTORE_TYPE_CONFIG - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SSL_TRUSTSTORE_TYPE_DOC - Static variable in class org.apache.kafka.common.config.SslConfigs
+
 
+
SslAuthenticationContext - Class in org.apache.kafka.common.security.auth
+
 
+
SslAuthenticationContext(SSLSession, InetAddress, String) - Constructor for class org.apache.kafka.common.security.auth.SslAuthenticationContext
+
 
+
SslAuthenticationException - Exception in org.apache.kafka.common.errors
+
+
This exception indicates that SSL handshake has failed.
+
+
SslAuthenticationException(String) - Constructor for exception org.apache.kafka.common.errors.SslAuthenticationException
+
 
+
SslAuthenticationException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.SslAuthenticationException
+
 
+
SslClientAuth - Enum Class in org.apache.kafka.common.config
+
+
Describes whether the server should require or request client authentication.
+
+
SslConfigs - Class in org.apache.kafka.common.config
+
 
+
SslConfigs() - Constructor for class org.apache.kafka.common.config.SslConfigs
+
 
+
SslEngineFactory - Interface in org.apache.kafka.common.security.auth
+
+
Plugin interface for allowing creation of SSLEngine object in a custom way.
+
+
sslSession() - Method in class org.apache.kafka.common.security.auth.SaslAuthenticationContext
+
+
Returns SSL session for the connection if security protocol is SASL_SSL.
+
+
STABLE - Enum constant in enum class org.apache.kafka.common.ClassicGroupState
+
 
+
STABLE - Enum constant in enum class org.apache.kafka.common.ConsumerGroupState
+
+
Deprecated.
+
STABLE - Enum constant in enum class org.apache.kafka.common.GroupState
+
 
+
StaleBrokerEpochException - Exception in org.apache.kafka.common.errors
+
 
+
StaleBrokerEpochException(String) - Constructor for exception org.apache.kafka.common.errors.StaleBrokerEpochException
+
 
+
StaleBrokerEpochException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.StaleBrokerEpochException
+
 
+
StaleMemberEpochException - Exception in org.apache.kafka.common.errors
+
+
The StaleMemberEpochException is used in the context of the new + consumer group protocol (KIP-848).
+
+
StaleMemberEpochException(String) - Constructor for exception org.apache.kafka.common.errors.StaleMemberEpochException
+
 
+
staleStoresEnabled() - Method in class org.apache.kafka.streams.StoreQueryParameters
+
+
Get the flag staleStores.
+
+
STANDBY - Enum constant in enum class org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment.AssignedTask.Type
+
 
+
standbyHosts() - Method in class org.apache.kafka.streams.KeyQueryMetadata
+
+
Get the Kafka Streams instances that host the key as standbys.
+
+
standbyStateStoreNames() - Method in interface org.apache.kafka.streams.StreamsMetadata
+
+
Names of the state stores assigned to standby tasks of the Streams client.
+
+
standbyTasks() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberAssignment
+
+
Standby tasks for this client.
+
+
standbyTasks() - Method in interface org.apache.kafka.streams.ThreadMetadata
+
+
Metadata of the standby tasks assigned to the stream thread.
+
+
standbyTopicPartitions() - Method in interface org.apache.kafka.streams.StreamsMetadata
+
+
Source topic partitions for which the instance acts as standby.
+
+
StandbyUpdateListener - Interface in org.apache.kafka.streams.processor
+
 
+
StandbyUpdateListener.SuspendReason - Enum Class in org.apache.kafka.streams.processor
+
 
+
start() - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Start the KafkaStreams instance by starting all its threads.
+
+
start() - Method in class org.apache.kafka.streams.kstream.Window
+
+
Return the start timestamp of this window.
+
+
start(Map<String, String>) - Method in class org.apache.kafka.connect.connector.Connector
+
+
Start this Connector.
+
+
start(Map<String, String>) - Method in interface org.apache.kafka.connect.connector.Task
+
+
Start the Task
+
+
start(Map<String, String>) - Method in class org.apache.kafka.connect.sink.SinkTask
+
+
Start the Task.
+
+
start(Map<String, String>) - Method in class org.apache.kafka.connect.source.SourceTask
+
+
Start the Task.
+
+
start(Map<String, String>) - Method in class org.apache.kafka.connect.tools.MockConnector
+
 
+
start(Map<String, String>) - Method in class org.apache.kafka.connect.tools.MockSinkConnector
+
 
+
start(Map<String, String>) - Method in class org.apache.kafka.connect.tools.MockSinkTask
+
 
+
start(Map<String, String>) - Method in class org.apache.kafka.connect.tools.MockSourceConnector
+
 
+
start(Map<String, String>) - Method in class org.apache.kafka.connect.tools.MockSourceTask
+
 
+
start(Map<String, String>) - Method in class org.apache.kafka.connect.tools.SchemaSourceConnector
+
 
+
start(Map<String, String>) - Method in class org.apache.kafka.connect.tools.SchemaSourceTask
+
 
+
start(Map<String, String>) - Method in class org.apache.kafka.connect.tools.VerifiableSinkConnector
+
 
+
start(Map<String, String>) - Method in class org.apache.kafka.connect.tools.VerifiableSinkTask
+
 
+
start(Map<String, String>) - Method in class org.apache.kafka.connect.tools.VerifiableSourceConnector
+
 
+
start(Map<String, String>) - Method in class org.apache.kafka.connect.tools.VerifiableSourceTask
+
 
+
start(AuthorizerServerInfo) - Method in interface org.apache.kafka.server.authorizer.Authorizer
+
+
Starts loading authorization metadata and returns futures that can be used to wait until + metadata for authorizing requests on each listener is available.
+
+
startMs - Variable in class org.apache.kafka.streams.kstream.UnlimitedWindows
+
+
The start timestamp of the window.
+
+
startOffset() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata
+
 
+
startOn(Instant) - Method in class org.apache.kafka.streams.kstream.UnlimitedWindows
+
+
Return a new unlimited window for the specified start timestamp.
+
+
startTime() - Method in class org.apache.kafka.streams.kstream.Window
+
+
Return the start time of this window.
+
+
startTimeMs() - Method in interface org.apache.kafka.common.security.oauthbearer.OAuthBearerToken
+
+
When the credential became valid, in terms of the number of milliseconds + since the epoch, if known, otherwise null.
+
+
stat() - Method in class org.apache.kafka.common.metrics.CompoundStat.NamedMeasurable
+
 
+
Stat - Interface in org.apache.kafka.common.metrics
+
+
A Stat is a quantity such as average, max, etc that is computed off the stream of updates to a sensor
+
+
state() - Method in class org.apache.kafka.clients.admin.ClassicGroupDescription
+
+
The classic group state, or UNKNOWN if the state is too new for us to parse.
+
+
state() - Method in class org.apache.kafka.clients.admin.ConsumerGroupDescription
+
+
Deprecated. + +
+
+
state() - Method in class org.apache.kafka.clients.admin.ConsumerGroupListing
+
+
Deprecated. +
Since 4.0. Use ConsumerGroupListing.groupState() instead.
+
+
+
state() - Method in class org.apache.kafka.clients.admin.TransactionDescription
+
 
+
state() - Method in class org.apache.kafka.clients.admin.TransactionListing
+
 
+
state() - Method in class org.apache.kafka.connect.health.AbstractState
+
+
Provides the current state of the connector or task.
+
+
state() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata
+
+
Returns the current state of this remote log segment.
+
+
state() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate
+
+
It represents the state of the remote log segment.
+
+
state() - Method in class org.apache.kafka.server.log.remote.storage.RemotePartitionDeleteMetadata
+
+
It represents the state of the remote partition.
+
+
state() - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Return the current KafkaStreams.State of this KafkaStreams instance.
+
+
STATE_CLEANUP_DELAY_MS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
state.cleanup.delay
+
+
STATE_DIR_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
state.dir
+
+
STATE_UPDATER_ENABLED - Static variable in class org.apache.kafka.streams.StreamsConfig.InternalConfig
+
 
+
stateChangelogTopics() - Method in class org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription
+
+
The set of state changelog topics associated with this subtopology.
+
+
stateDir() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
 
+
stateDir() - Method in interface org.apache.kafka.streams.processor.api.ProcessingContext
+
+
Return the state directory for the partition.
+
+
stateDir() - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
stateDir() - Method in interface org.apache.kafka.streams.processor.ProcessorContext
+
+
Return the state directory for the partition.
+
+
stateDir() - Method in interface org.apache.kafka.streams.processor.StateStoreContext
+
+
Returns the state directory for the partition.
+
+
statefulTasksToLagSums() - Method in interface org.apache.kafka.streams.processor.assignment.KafkaStreamsState
+
+
Returns a collection containing all (and only) stateful tasks in the topology by TaskId, + mapped to its "offset lag sum".
+
+
StateQueryRequest<R> - Class in org.apache.kafka.streams.query
+
+
The request object for Interactive Queries.
+
+
StateQueryRequest.InStore - Class in org.apache.kafka.streams.query
+
+
A progressive builder interface for creating StoreQueryRequests.
+
+
StateQueryResult<R> - Class in org.apache.kafka.streams.query
+
+
The response object for interactive queries.
+
+
StateQueryResult() - Constructor for class org.apache.kafka.streams.query.StateQueryResult
+
 
+
StateRestoreCallback - Interface in org.apache.kafka.streams.processor
+
+
Restoration logic for log-backed state stores upon restart, + it takes one record at a time from the logs to apply to the restoring state.
+
+
StateRestoreListener - Interface in org.apache.kafka.streams.processor
+
+
Class for listening to various states of the restoration process of a StateStore.
+
+
states() - Method in class org.apache.kafka.clients.admin.ListConsumerGroupsOptions
+
+
Deprecated. + +
+
+
StateSerdes<K,V> - Class in org.apache.kafka.streams.state
+
+
Factory for creating serializers / deserializers for state stores in Kafka Streams.
+
+
StateSerdes(String, Serde<K>, Serde<V>) - Constructor for class org.apache.kafka.streams.state.StateSerdes
+
+
Create a context for serialization using the specified serializers and deserializers which + must match the key and value types used as parameters for this object; the state changelog topic + is provided to bind this serde factory to, so that future calls for serialize / deserialize do not + need to provide the topic name any more.
+
+
StateStore - Interface in org.apache.kafka.streams.processor
+
+
A storage engine for managing state maintained by a stream processor.
+
+
STATESTORE_CACHE_MAX_BYTES_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
statestore.cache.max.bytes
+
+
STATESTORE_CACHE_MAX_BYTES_DOC - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated.
+
+
StateStoreContext - Interface in org.apache.kafka.streams.processor
+
+
State store context interface.
+
+
StateStoreMigratedException - Exception in org.apache.kafka.streams.errors
+
+
Indicates that the state store being queried is closed although the Kafka Streams state is + RUNNING or + REBALANCING.
+
+
StateStoreMigratedException(String) - Constructor for exception org.apache.kafka.streams.errors.StateStoreMigratedException
+
 
+
StateStoreMigratedException(String, Throwable) - Constructor for exception org.apache.kafka.streams.errors.StateStoreMigratedException
+
 
+
stateStoreNames() - Method in interface org.apache.kafka.streams.processor.assignment.TaskInfo
+
 
+
stateStoreNames() - Method in interface org.apache.kafka.streams.StreamsMetadata
+
+
Names of the state stores assigned to active tasks of the Streams client.
+
+
StateStoreNotAvailableException - Exception in org.apache.kafka.streams.errors
+
+
Indicates that the state store being queried is already closed.
+
+
StateStoreNotAvailableException(String) - Constructor for exception org.apache.kafka.streams.errors.StateStoreNotAvailableException
+
 
+
StateStoreNotAvailableException(String, Throwable) - Constructor for exception org.apache.kafka.streams.errors.StateStoreNotAvailableException
+
 
+
stateUpdaterEnabled(Map<String, Object>) - Static method in class org.apache.kafka.streams.StreamsConfig.InternalConfig
+
 
+
STATIC_BROKER_CONFIG - Enum constant in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigSource
+
 
+
stats() - Method in interface org.apache.kafka.common.metrics.CompoundStat
+
 
+
stats() - Method in class org.apache.kafka.common.metrics.stats.Frequencies
+
 
+
stats() - Method in class org.apache.kafka.common.metrics.stats.Meter
+
 
+
stats() - Method in class org.apache.kafka.common.metrics.stats.Percentiles
+
 
+
STICKY_ASSIGNOR_NAME - Static variable in class org.apache.kafka.clients.consumer.StickyAssignor
+
 
+
StickyAssignor - Class in org.apache.kafka.clients.consumer
+
+
The sticky assignor serves two purposes.
+
+
StickyAssignor() - Constructor for class org.apache.kafka.clients.consumer.StickyAssignor
+
 
+
StickyTaskAssignor - Class in org.apache.kafka.streams.processor.assignment.assignors
+
 
+
StickyTaskAssignor() - Constructor for class org.apache.kafka.streams.processor.assignment.assignors.StickyTaskAssignor
+
 
+
StickyTaskAssignor(boolean) - Constructor for class org.apache.kafka.streams.processor.assignment.assignors.StickyTaskAssignor
+
 
+
stop() - Method in class org.apache.kafka.connect.connector.Connector
+
+
Stop this connector.
+
+
stop() - Method in interface org.apache.kafka.connect.connector.Task
+
+
Stop this task.
+
+
stop() - Method in class org.apache.kafka.connect.sink.SinkTask
+
+
Perform any cleanup to stop this task.
+
+
stop() - Method in class org.apache.kafka.connect.source.SourceTask
+
+
Signal this SourceTask to stop.
+
+
stop() - Method in class org.apache.kafka.connect.tools.MockConnector
+
 
+
stop() - Method in class org.apache.kafka.connect.tools.MockSinkConnector
+
 
+
stop() - Method in class org.apache.kafka.connect.tools.MockSinkTask
+
 
+
stop() - Method in class org.apache.kafka.connect.tools.MockSourceConnector
+
 
+
stop() - Method in class org.apache.kafka.connect.tools.MockSourceTask
+
 
+
stop() - Method in class org.apache.kafka.connect.tools.SchemaSourceConnector
+
 
+
stop() - Method in class org.apache.kafka.connect.tools.SchemaSourceTask
+
 
+
stop() - Method in class org.apache.kafka.connect.tools.VerifiableSinkConnector
+
 
+
stop() - Method in class org.apache.kafka.connect.tools.VerifiableSinkTask
+
 
+
stop() - Method in class org.apache.kafka.connect.tools.VerifiableSourceConnector
+
 
+
stop() - Method in class org.apache.kafka.connect.tools.VerifiableSourceTask
+
 
+
store(StoreQueryParameters<T>) - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Get a facade wrapping the local StateStore instances with the provided StoreQueryParameters.
+
+
STORE_EXCEPTION - Enum constant in enum class org.apache.kafka.streams.query.FailureReason
+
+
The store that handled the query got an exception during query execution.
+
+
StoreBuilder<T extends StateStore> - Interface in org.apache.kafka.streams.state
+
+
Build a StateStore wrapped with optional caching and logging.
+
+
storedKey() - Method in class org.apache.kafka.common.security.scram.ScramCredential
+
+
Stored key computed from the client password using the SCRAM algorithm.
+
+
storeName() - Method in class org.apache.kafka.streams.StoreQueryParameters
+
+
Get the name of the state store that should be queried.
+
+
StoreQueryParameters<T> - Class in org.apache.kafka.streams
+
+
StoreQueryParameters allows you to pass a variety of parameters when fetching a store for interactive query.
+
+
stores() - Method in interface org.apache.kafka.streams.processor.ConnectedStoreProvider
+
 
+
stores() - Method in interface org.apache.kafka.streams.TopologyDescription.Processor
+
+
The names of all connected stores.
+
+
Stores - Class in org.apache.kafka.streams.state
+
+
Factory for creating state stores in Kafka Streams.
+
+
Stores() - Constructor for class org.apache.kafka.streams.state.Stores
+
 
+
StoreSupplier<T extends StateStore> - Interface in org.apache.kafka.streams.state
+
+
A state store supplier which can create one or more StateStore instances.
+
+
storeType - Variable in class org.apache.kafka.streams.TopologyConfig
+
 
+
stream(String) - Method in class org.apache.kafka.streams.StreamsBuilder
+
+
Create a KStream from the specified topic.
+
+
stream(String, Consumed<K, V>) - Method in class org.apache.kafka.streams.StreamsBuilder
+
+
Create a KStream from the specified topic.
+
+
stream(Collection<String>) - Method in class org.apache.kafka.streams.StreamsBuilder
+
+
Create a KStream from the specified topics.
+
+
stream(Collection<String>, Consumed<K, V>) - Method in class org.apache.kafka.streams.StreamsBuilder
+
+
Create a KStream from the specified topics.
+
+
stream(Pattern) - Method in class org.apache.kafka.streams.StreamsBuilder
+
+
Create a KStream from the specified topic pattern.
+
+
stream(Pattern, Consumed<K, V>) - Method in class org.apache.kafka.streams.StreamsBuilder
+
+
Create a KStream from the specified topic pattern.
+
+
STREAM_TIME - Enum constant in enum class org.apache.kafka.streams.processor.PunctuationType
+
 
+
StreamJoined<K,V1,V2> - Class in org.apache.kafka.streams.kstream
+
+
Class used to configure the name of the join processor, the repartition topic name, + state stores or state store names in Stream-Stream join.
+
+
streamPartitioner(StreamPartitioner<? super K, ? super V>) - Static method in class org.apache.kafka.streams.kstream.Produced
+
+
Create a Produced instance with provided partitioner.
+
+
streamPartitioner(StreamPartitioner<K, V>) - Static method in class org.apache.kafka.streams.kstream.Repartitioned
+
+
Create a Repartitioned instance with provided partitioner.
+
+
StreamPartitioner<K,V> - Interface in org.apache.kafka.streams.processor
+
+
Determine how records are distributed among the partitions in a Kafka topic.
+
+
STREAMS - Enum constant in enum class org.apache.kafka.common.GroupType
+
 
+
STREAMS - Enum constant in enum class org.apache.kafka.streams.GroupProtocol
+
+
Streams group protocol
+
+
StreamsBuilder - Class in org.apache.kafka.streams
+
+
StreamsBuilder provides the high-level Kafka Streams DSL to specify a Kafka Streams topology.
+
+
StreamsBuilder() - Constructor for class org.apache.kafka.streams.StreamsBuilder
+
 
+
StreamsBuilder(TopologyConfig) - Constructor for class org.apache.kafka.streams.StreamsBuilder
+
+
Create a StreamsBuilder instance.
+
+
StreamsConfig - Class in org.apache.kafka.streams
+
+
Configuration for a KafkaStreams instance.
+
+
StreamsConfig(Map<?, ?>) - Constructor for class org.apache.kafka.streams.StreamsConfig
+
+
Create a new StreamsConfig using the given properties.
+
+
StreamsConfig.InternalConfig - Class in org.apache.kafka.streams
+
 
+
StreamsException - Exception in org.apache.kafka.streams.errors
+
+
StreamsException is the top-level exception type generated by Kafka Streams, and indicates errors have + occurred during a StreamThread's processing.
+
+
StreamsException(String) - Constructor for exception org.apache.kafka.streams.errors.StreamsException
+
 
+
StreamsException(String, Throwable) - Constructor for exception org.apache.kafka.streams.errors.StreamsException
+
 
+
StreamsException(String, Throwable, TaskId) - Constructor for exception org.apache.kafka.streams.errors.StreamsException
+
 
+
StreamsException(String, TaskId) - Constructor for exception org.apache.kafka.streams.errors.StreamsException
+
 
+
StreamsException(Throwable) - Constructor for exception org.apache.kafka.streams.errors.StreamsException
+
 
+
StreamsException(Throwable, TaskId) - Constructor for exception org.apache.kafka.streams.errors.StreamsException
+
 
+
StreamsGroupDescription - Class in org.apache.kafka.clients.admin
+
+
A detailed description of a single streams group in the cluster.
+
+
StreamsGroupDescription(String, int, int, int, Collection<StreamsGroupSubtopologyDescription>, Collection<StreamsGroupMemberDescription>, GroupState, Node, Set<AclOperation>) - Constructor for class org.apache.kafka.clients.admin.StreamsGroupDescription
+
 
+
StreamsGroupMemberAssignment - Class in org.apache.kafka.clients.admin
+
+
A description of the assignments of a specific group member.
+
+
StreamsGroupMemberAssignment(List<StreamsGroupMemberAssignment.TaskIds>, List<StreamsGroupMemberAssignment.TaskIds>, List<StreamsGroupMemberAssignment.TaskIds>) - Constructor for class org.apache.kafka.clients.admin.StreamsGroupMemberAssignment
+
 
+
StreamsGroupMemberAssignment.TaskIds - Class in org.apache.kafka.clients.admin
+
+
All tasks for one subtopology of a member.
+
+
StreamsGroupMemberDescription - Class in org.apache.kafka.clients.admin
+
+
A detailed description of a single streams groups member in the cluster.
+
+
StreamsGroupMemberDescription(String, int, Optional<String>, Optional<String>, String, String, int, String, Optional<StreamsGroupMemberDescription.Endpoint>, Map<String, String>, List<StreamsGroupMemberDescription.TaskOffset>, List<StreamsGroupMemberDescription.TaskOffset>, StreamsGroupMemberAssignment, StreamsGroupMemberAssignment, boolean) - Constructor for class org.apache.kafka.clients.admin.StreamsGroupMemberDescription
+
 
+
StreamsGroupMemberDescription.Endpoint - Class in org.apache.kafka.clients.admin
+
+
The user-defined endpoint for the member.
+
+
StreamsGroupMemberDescription.TaskOffset - Class in org.apache.kafka.clients.admin
+
+
The cumulative offset for one task.
+
+
StreamsGroupSubtopologyDescription - Class in org.apache.kafka.clients.admin
+
+
A detailed description of a subtopology in a streams group.
+
+
StreamsGroupSubtopologyDescription(String, List<String>, List<String>, Map<String, StreamsGroupSubtopologyDescription.TopicInfo>, Map<String, StreamsGroupSubtopologyDescription.TopicInfo>) - Constructor for class org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription
+
 
+
StreamsGroupSubtopologyDescription.TopicInfo - Class in org.apache.kafka.clients.admin
+
+
Information about a topic.
+
+
StreamsInvalidTopologyEpochException - Exception in org.apache.kafka.common.errors
+
 
+
StreamsInvalidTopologyEpochException(String) - Constructor for exception org.apache.kafka.common.errors.StreamsInvalidTopologyEpochException
+
 
+
StreamsInvalidTopologyException - Exception in org.apache.kafka.common.errors
+
 
+
StreamsInvalidTopologyException(String) - Constructor for exception org.apache.kafka.common.errors.StreamsInvalidTopologyException
+
 
+
StreamsMetadata - Interface in org.apache.kafka.streams
+
+
Metadata of a Kafka Streams client.
+
+
streamsMetadataForStore(String) - Method in class org.apache.kafka.streams.KafkaStreams
+
+
Find all currently running KafkaStreams instances (potentially remotely) that + + use the same application ID as this instance (i.e., all + instances that belong to the same Kafka Streams application) + and that contain a StateStore with the given storeName + + and return StreamsMetadata for each discovered instance.
+
+
StreamsMetrics - Interface in org.apache.kafka.streams
+
+
The Kafka Streams metrics interface for adding metric sensors and collecting metric values.
+
+
StreamsNotStartedException - Exception in org.apache.kafka.streams.errors
+
+
Indicates that Kafka Streams is in state CREATED and thus state stores cannot be queries yet.
+
+
StreamsNotStartedException(String) - Constructor for exception org.apache.kafka.streams.errors.StreamsNotStartedException
+
 
+
StreamsNotStartedException(String, Throwable) - Constructor for exception org.apache.kafka.streams.errors.StreamsNotStartedException
+
 
+
StreamsRebalancingException - Exception in org.apache.kafka.streams.errors
+
+
Indicates that Kafka Streams is in state REBALANCING and thus + cannot be queried by default.
+
+
StreamsRebalancingException(String) - Constructor for exception org.apache.kafka.streams.errors.StreamsRebalancingException
+
 
+
StreamsRebalancingException(String, Throwable) - Constructor for exception org.apache.kafka.streams.errors.StreamsRebalancingException
+
 
+
StreamsStoppedException - Exception in org.apache.kafka.streams.errors
+
+
Indicates that Kafka Streams is in a terminating or terminal state, such as KafkaStreams.State.PENDING_SHUTDOWN,KafkaStreams.State.PENDING_ERROR,KafkaStreams.State.NOT_RUNNING, or KafkaStreams.State.ERROR.
+
+
StreamsStoppedException(String) - Constructor for exception org.apache.kafka.streams.errors.StreamsStoppedException
+
 
+
StreamsStoppedException(String, Throwable) - Constructor for exception org.apache.kafka.streams.errors.StreamsStoppedException
+
 
+
StreamsTopologyFencedException - Exception in org.apache.kafka.common.errors
+
 
+
StreamsTopologyFencedException(String) - Constructor for exception org.apache.kafka.common.errors.StreamsTopologyFencedException
+
 
+
StreamsUncaughtExceptionHandler - Interface in org.apache.kafka.streams.errors
+
 
+
StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse - Enum Class in org.apache.kafka.streams.errors
+
+
Enumeration that describes the response from the exception handler.
+
+
strict() - Method in class org.apache.kafka.common.quota.ClientQuotaFilter
+
 
+
string() - Static method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
String() - Static method in class org.apache.kafka.common.serialization.Serdes
+
+
A serde for nullable String type.
+
+
STRING - Enum constant in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigType
+
 
+
STRING - Enum constant in enum class org.apache.kafka.common.config.ConfigDef.Type
+
+
Used for string values.
+
+
STRING - Enum constant in enum class org.apache.kafka.connect.data.Schema.Type
+
+
Character string that supports all Unicode characters.
+
+
STRING_SCHEMA - Static variable in interface org.apache.kafka.connect.data.Schema
+
 
+
StringConverter - Class in org.apache.kafka.connect.storage
+
+
Converter and HeaderConverter implementation that only supports serializing to strings.
+
+
StringConverter() - Constructor for class org.apache.kafka.connect.storage.StringConverter
+
 
+
StringConverterConfig - Class in org.apache.kafka.connect.storage
+
+
Configuration options for StringConverter instances.
+
+
StringConverterConfig(Map<String, ?>) - Constructor for class org.apache.kafka.connect.storage.StringConverterConfig
+
 
+
StringDecoder - Class in org.apache.kafka.tools.api
+
+
The string decoder translates bytes into strings.
+
+
StringDecoder() - Constructor for class org.apache.kafka.tools.api.StringDecoder
+
 
+
StringDeserializer - Class in org.apache.kafka.common.serialization
+
+
String encoding defaults to UTF8 and can be customized by setting the property key.deserializer.encoding, + value.deserializer.encoding or deserializer.encoding.
+
+
StringDeserializer() - Constructor for class org.apache.kafka.common.serialization.StringDeserializer
+
 
+
StringSerde() - Constructor for class org.apache.kafka.common.serialization.Serdes.StringSerde
+
 
+
StringSerializer - Class in org.apache.kafka.common.serialization
+
+
String encoding defaults to UTF8 and can be customized by setting the property key.serializer.encoding, + value.serializer.encoding or serializer.encoding.
+
+
StringSerializer() - Constructor for class org.apache.kafka.common.serialization.StringSerializer
+
 
+
struct() - Static method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
Struct - Class in org.apache.kafka.connect.data
+
+
+ A structured record containing a set of named fields with values, each field using an independent Schema.
+
+
Struct(Schema) - Constructor for class org.apache.kafka.connect.data.Struct
+
+
Create a new Struct for this Schema
+
+
STRUCT - Enum constant in enum class org.apache.kafka.connect.data.Schema.Type
+
+
A structured record containing a set of named fields, each field using a fixed, independent Schema.
+
+
subject() - Method in interface org.apache.kafka.common.security.auth.Login
+
+
Returns the authenticated subject of this login context.
+
+
subscribe(String, Set<String>, ConfigChangeCallback) - Method in interface org.apache.kafka.common.config.provider.ConfigProvider
+
+
Subscribes to changes for the given keys at the given path (optional operation).
+
+
subscribe(Collection<String>) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
subscribe(Collection<String>) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Subscribe to the given list of topics to get dynamically assigned partitions.
+
+
subscribe(Collection<String>) - Method in class org.apache.kafka.clients.consumer.KafkaShareConsumer
+
+
Subscribe to the given list of topics to get dynamically assigned partitions.
+
+
subscribe(Collection<String>) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
subscribe(Collection<String>) - Method in class org.apache.kafka.clients.consumer.MockShareConsumer
+
 
+
subscribe(Collection<String>) - Method in interface org.apache.kafka.clients.consumer.ShareConsumer
+
 
+
subscribe(Collection<String>, ConsumerRebalanceListener) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
subscribe(Collection<String>, ConsumerRebalanceListener) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Subscribe to the given list of topics to get dynamically + assigned partitions.
+
+
subscribe(Collection<String>, ConsumerRebalanceListener) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
subscribe(Pattern) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
subscribe(Pattern) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Subscribe to all topics matching specified pattern to get dynamically assigned partitions.
+
+
subscribe(Pattern) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
subscribe(Pattern, ConsumerRebalanceListener) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
subscribe(Pattern, ConsumerRebalanceListener) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Subscribe to all topics matching specified pattern to get dynamically assigned partitions.
+
+
subscribe(Pattern, ConsumerRebalanceListener) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
subscribe(SubscriptionPattern) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
subscribe(SubscriptionPattern) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Subscribe to all topics matching the specified pattern, to get dynamically assigned partitions.
+
+
subscribe(SubscriptionPattern) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
subscribe(SubscriptionPattern, ConsumerRebalanceListener) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
subscribe(SubscriptionPattern, ConsumerRebalanceListener) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Subscribe to all topics matching the specified pattern, to get dynamically assigned partitions.
+
+
subscribe(SubscriptionPattern, ConsumerRebalanceListener) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
SubscribedTopicDescriber - Interface in org.apache.kafka.coordinator.group.api.assignor
+
+
The subscribed topic describer is used by the PartitionAssignor + to obtain topic and partition metadata of the subscribed topics.
+
+
subscribedTopicIds() - Method in interface org.apache.kafka.coordinator.group.api.assignor.MemberSubscription
+
+
Gets the set of subscribed topic Ids.
+
+
subscription() - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
subscription() - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Get the current subscription.
+
+
subscription() - Method in class org.apache.kafka.clients.consumer.KafkaShareConsumer
+
+
Get the current subscription.
+
+
subscription() - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
subscription() - Method in class org.apache.kafka.clients.consumer.MockShareConsumer
+
 
+
subscription() - Method in interface org.apache.kafka.clients.consumer.ShareConsumer
+
 
+
Subscription(List<String>) - Constructor for class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription
+
 
+
Subscription(List<String>, ByteBuffer) - Constructor for class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription
+
 
+
Subscription(List<String>, ByteBuffer, List<TopicPartition>) - Constructor for class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription
+
 
+
Subscription(List<String>, ByteBuffer, List<TopicPartition>, int, Optional<String>) - Constructor for class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription
+
 
+
SubscriptionPattern - Class in org.apache.kafka.clients.consumer
+
+
Represents a regular expression compatible with Google RE2/J, used to subscribe to topics.
+
+
SubscriptionPattern(String) - Constructor for class org.apache.kafka.clients.consumer.SubscriptionPattern
+
 
+
subscriptionType() - Method in interface org.apache.kafka.coordinator.group.api.assignor.GroupSpec
+
 
+
SubscriptionType - Enum Class in org.apache.kafka.coordinator.group.api.assignor
+
+
The subscription type followed by a consumer group.
+
+
subscriptionUserData(Set<String>) - Method in interface org.apache.kafka.clients.consumer.ConsumerPartitionAssignor
+
+
Return serialized data that will be included in the ConsumerPartitionAssignor.Subscription sent to the leader + and can be leveraged in ConsumerPartitionAssignor.assign(Cluster, GroupSubscription) ((e.g.
+
+
subscriptionUserData(Set<String>) - Method in class org.apache.kafka.clients.consumer.CooperativeStickyAssignor
+
 
+
subscriptionUserData(Set<String>) - Method in class org.apache.kafka.clients.consumer.StickyAssignor
+
 
+
subtopologies() - Method in class org.apache.kafka.clients.admin.StreamsGroupDescription
+
+
A list of the subtopologies in the streams group.
+
+
subtopologies() - Method in interface org.apache.kafka.streams.TopologyDescription
+
+
All sub-topologies of the represented topology.
+
+
subtopology() - Method in class org.apache.kafka.streams.processor.TaskId
+
 
+
subtopologyId() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberAssignment.TaskIds
+
+
The subtopology identifier.
+
+
subtopologyId() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription.TaskOffset
+
+
The subtopology identifier.
+
+
subtopologyId() - Method in class org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription
+
+
String to uniquely identify the subtopology.
+
+
SUBTRACT - Enum constant in enum class org.apache.kafka.clients.admin.AlterConfigOp.OpType
+
+
(For list-type configuration entries only.) Removes the specified values from the current + value of the configuration entry.
+
+
SUCCESS - Static variable in class org.apache.kafka.server.authorizer.AclCreateResult
+
 
+
successors() - Method in interface org.apache.kafka.streams.TopologyDescription.Node
+
+
The successor of this node within a sub-topology.
+
+
SUPPORTED - Enum constant in enum class org.apache.kafka.connect.source.ConnectorTransactionBoundaries
+
+
Signals that a connector can define its own transaction boundaries.
+
+
SUPPORTED - Enum constant in enum class org.apache.kafka.connect.source.ExactlyOnceSupport
+
+
Signals that a connector supports exactly-once semantics.
+
+
supportedFeatures() - Method in class org.apache.kafka.clients.admin.FeatureMetadata
+
+
Returns a map of supported feature versions.
+
+
supportedProtocols() - Method in interface org.apache.kafka.clients.consumer.ConsumerPartitionAssignor
+
+
Indicate which rebalance protocol this assignor works with; + By default it should always work with ConsumerPartitionAssignor.RebalanceProtocol.EAGER.
+
+
supportedProtocols() - Method in class org.apache.kafka.clients.consumer.CooperativeStickyAssignor
+
 
+
SupportedVersionRange - Class in org.apache.kafka.clients.admin
+
+
Represents a range of versions that a particular broker supports for some feature.
+
+
SupportedVersionRange(short, short) - Constructor for class org.apache.kafka.clients.admin.SupportedVersionRange
+
+
Raises an exception unless the following conditions are met: + 0 <= minVersion <= maxVersion.
+
+
suppress(Suppressed<? super K>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Suppress some updates from this changelog stream, determined by the supplied Suppressed configuration.
+
+
Suppressed<K> - Interface in org.apache.kafka.streams.kstream
+
 
+
Suppressed.BufferConfig<BC extends Suppressed.BufferConfig<BC>> - Interface in org.apache.kafka.streams.kstream
+
 
+
Suppressed.EagerBufferConfig - Interface in org.apache.kafka.streams.kstream
+
+
Marker interface for a buffer configuration that will strictly enforce size constraints + (bytes and/or number of records) on the buffer, so it is suitable for reducing duplicate + results downstream, but does not promise to eliminate them entirely.
+
+
Suppressed.StrictBufferConfig - Interface in org.apache.kafka.streams.kstream
+
+
Marker interface for a buffer configuration that is "strict" in the sense that it will strictly + enforce the time bound and never emit early.
+
+
synonyms() - Method in class org.apache.kafka.clients.admin.ConfigEntry
+
+
Returns all config values that may be used as the value of this config along with their source, + in the order of precedence.
+
+
+

T

+
+
table(String) - Method in class org.apache.kafka.streams.StreamsBuilder
+
+
Create a KTable for the specified topic.
+
+
table(String, Consumed<K, V>) - Method in class org.apache.kafka.streams.StreamsBuilder
+
+
Create a KTable for the specified topic.
+
+
table(String, Consumed<K, V>, Materialized<K, V, KeyValueStore<Bytes, byte[]>>) - Method in class org.apache.kafka.streams.StreamsBuilder
+
+
Create a KTable for the specified topic.
+
+
table(String, Materialized<K, V, KeyValueStore<Bytes, byte[]>>) - Method in class org.apache.kafka.streams.StreamsBuilder
+
+
Create a KTable for the specified topic.
+
+
TableJoined<K,KO> - Class in org.apache.kafka.streams.kstream
+
+
The TableJoined class represents optional parameters that can be passed to + KTable#join(KTable,Function,...) and + KTable#leftJoin(KTable,Function,...) + operations, for foreign key joins.
+
+
tags() - Method in class org.apache.kafka.common.MetricName
+
 
+
tags() - Method in class org.apache.kafka.common.MetricNameTemplate
+
+
Get the set of tag names for the metric.
+
+
tags() - Method in class org.apache.kafka.common.metrics.MetricConfig
+
 
+
tags(Map<String, String>) - Method in class org.apache.kafka.common.metrics.MetricConfig
+
 
+
target() - Method in class org.apache.kafka.connect.mirror.SourceAndTarget
+
 
+
TARGET_CLUSTER_ALIAS_KEY - Static variable in class org.apache.kafka.connect.mirror.Heartbeat
+
 
+
targetAssignment() - Method in class org.apache.kafka.clients.admin.MemberDescription
+
+
The target assignment of the member.
+
+
targetAssignment() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription
+
+
The target assignment.
+
+
targetAssignmentEpoch() - Method in class org.apache.kafka.clients.admin.ConsumerGroupDescription
+
+
The epoch of the target assignment.
+
+
targetAssignmentEpoch() - Method in class org.apache.kafka.clients.admin.ShareGroupDescription
+
+
The epoch of the target assignment.
+
+
targetAssignmentEpoch() - Method in class org.apache.kafka.clients.admin.StreamsGroupDescription
+
+
The epoch of the target assignment.
+
+
targetClusterAlias() - Method in class org.apache.kafka.connect.mirror.Heartbeat
+
 
+
targetReplicas() - Method in class org.apache.kafka.clients.admin.NewPartitionReassignment
+
 
+
Task - Interface in org.apache.kafka.connect.connector
+
+
+ Tasks contain the code that actually copies data to/from another system.
+
+
TASK_ASSIGNOR_CLASS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
task.assignor.class
+
+
TASK_FAILURE - Static variable in class org.apache.kafka.connect.tools.MockConnector
+
 
+
TASK_TIMEOUT_MS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
task.timeout.ms
+
+
TASK_TIMEOUT_MS_DOC - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Deprecated.
+
+
TaskAssignment(Collection<KafkaStreamsAssignment>) - Constructor for class org.apache.kafka.streams.processor.assignment.TaskAssignor.TaskAssignment
+
 
+
TaskAssignmentException - Exception in org.apache.kafka.streams.errors
+
+
Indicates a run time error incurred while trying to assign + stream tasks to + threads.
+
+
TaskAssignmentException(String) - Constructor for exception org.apache.kafka.streams.errors.TaskAssignmentException
+
 
+
TaskAssignmentException(String, Throwable) - Constructor for exception org.apache.kafka.streams.errors.TaskAssignmentException
+
 
+
TaskAssignmentException(Throwable) - Constructor for exception org.apache.kafka.streams.errors.TaskAssignmentException
+
 
+
TaskAssignmentUtils - Class in org.apache.kafka.streams.processor.assignment
+
+
A set of utilities to help implement task assignment via the TaskAssignor
+
+
TaskAssignmentUtils.MoveStandbyTaskPredicate - Interface in org.apache.kafka.streams.processor.assignment
+
 
+
TaskAssignmentUtils.RackAwareOptimizationParams - Class in org.apache.kafka.streams.processor.assignment
+
+
A simple config container for necessary parameters and optional overrides to apply when + running the active or standby task rack-aware optimizations.
+
+
TaskAssignor - Interface in org.apache.kafka.streams.processor.assignment
+
+
A TaskAssignor is responsible for creating a TaskAssignment from a given + ApplicationState.
+
+
TaskAssignor.AssignmentError - Enum Class in org.apache.kafka.streams.processor.assignment
+
+
NONE: no error detected + ACTIVE_TASK_ASSIGNED_MULTIPLE_TIMES: multiple KafkaStreams clients assigned with the same active task + INVALID_STANDBY_TASK: stateless task assigned as a standby task + MISSING_PROCESS_ID: ProcessId present in the input ApplicationState was not present in the output TaskAssignment + UNKNOWN_PROCESS_ID: unrecognized ProcessId not matching any of the participating consumers + UNKNOWN_TASK_ID: unrecognized TaskId not matching any of the tasks to be assigned
+
+
TaskAssignor.TaskAssignment - Class in org.apache.kafka.streams.processor.assignment
+
+
Wrapper class for the final assignment of active and standbys tasks to individual + KafkaStreams clients.
+
+
taskClass() - Method in class org.apache.kafka.connect.connector.Connector
+
+
Returns the Task implementation for this Connector.
+
+
taskClass() - Method in class org.apache.kafka.connect.tools.MockConnector
+
 
+
taskClass() - Method in class org.apache.kafka.connect.tools.MockSinkConnector
+
 
+
taskClass() - Method in class org.apache.kafka.connect.tools.MockSourceConnector
+
 
+
taskClass() - Method in class org.apache.kafka.connect.tools.SchemaSourceConnector
+
 
+
taskClass() - Method in class org.apache.kafka.connect.tools.VerifiableSinkConnector
+
 
+
taskClass() - Method in class org.apache.kafka.connect.tools.VerifiableSourceConnector
+
 
+
taskConfigs(int) - Method in class org.apache.kafka.connect.connector.Connector
+
+
Returns a set of configurations for Tasks based on the current configuration, + producing at most maxTasks configurations.
+
+
taskConfigs(int) - Method in class org.apache.kafka.connect.tools.MockConnector
+
 
+
taskConfigs(int) - Method in class org.apache.kafka.connect.tools.MockSinkConnector
+
 
+
taskConfigs(int) - Method in class org.apache.kafka.connect.tools.MockSourceConnector
+
 
+
taskConfigs(int) - Method in class org.apache.kafka.connect.tools.SchemaSourceConnector
+
 
+
taskConfigs(int) - Method in class org.apache.kafka.connect.tools.VerifiableSinkConnector
+
 
+
taskConfigs(int) - Method in class org.apache.kafka.connect.tools.VerifiableSourceConnector
+
 
+
TaskCorruptedException - Exception in org.apache.kafka.streams.errors
+
+
Indicates a specific task is corrupted and need to be re-initialized.
+
+
TaskCorruptedException(Set<TaskId>) - Constructor for exception org.apache.kafka.streams.errors.TaskCorruptedException
+
 
+
TaskCorruptedException(Set<TaskId>, InvalidOffsetException) - Constructor for exception org.apache.kafka.streams.errors.TaskCorruptedException
+
 
+
taskEndOffsets() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription
+
+
Cumulative task changelog end offsets for tasks.
+
+
taskId() - Method in class org.apache.kafka.connect.health.TaskState
+
+
Provides the ID of the task.
+
+
taskId() - Method in interface org.apache.kafka.streams.errors.ErrorHandlerContext
+
+
Return the task ID.
+
+
taskId() - Method in exception org.apache.kafka.streams.errors.StreamsException
+
 
+
taskId() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
 
+
taskId() - Method in interface org.apache.kafka.streams.processor.api.ProcessingContext
+
+
Return the task id.
+
+
taskId() - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
taskId() - Method in interface org.apache.kafka.streams.processor.ProcessorContext
+
+
Return the task id.
+
+
taskId() - Method in interface org.apache.kafka.streams.processor.StateStoreContext
+
+
Returns the task id.
+
+
taskId() - Method in interface org.apache.kafka.streams.TaskMetadata
+
+
Task ID of the task.
+
+
TaskId - Class in org.apache.kafka.streams.processor
+
+
The task ID representation composed as subtopology plus the assigned partition ID.
+
+
TaskId(int, int) - Constructor for class org.apache.kafka.streams.processor.TaskId
+
 
+
TaskId(int, int, String) - Constructor for class org.apache.kafka.streams.processor.TaskId
+
 
+
TaskIdFormatException - Exception in org.apache.kafka.streams.errors
+
+
Indicates a run time error incurred while trying parse the task id + from the read string.
+
+
TaskIdFormatException(String) - Constructor for exception org.apache.kafka.streams.errors.TaskIdFormatException
+
 
+
TaskIdFormatException(String, Throwable) - Constructor for exception org.apache.kafka.streams.errors.TaskIdFormatException
+
 
+
TaskIdFormatException(Throwable) - Constructor for exception org.apache.kafka.streams.errors.TaskIdFormatException
+
 
+
TaskIds(String, List<Integer>) - Constructor for class org.apache.kafka.clients.admin.StreamsGroupMemberAssignment.TaskIds
+
 
+
TaskInfo - Interface in org.apache.kafka.streams.processor.assignment
+
+
A simple container class corresponding to a given TaskId.
+
+
TaskMetadata - Interface in org.apache.kafka.streams
+
+
Metadata of a task.
+
+
TaskMigratedException - Exception in org.apache.kafka.streams.errors
+
+
Indicates that all tasks belongs to the thread have migrated to another thread.
+
+
TaskMigratedException(String) - Constructor for exception org.apache.kafka.streams.errors.TaskMigratedException
+
 
+
TaskMigratedException(String, Throwable) - Constructor for exception org.apache.kafka.streams.errors.TaskMigratedException
+
 
+
TaskOffset(String, int, long) - Constructor for class org.apache.kafka.clients.admin.StreamsGroupMemberDescription.TaskOffset
+
 
+
taskOffsets() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription
+
+
Cumulative offsets for tasks.
+
+
tasks() - Method in class org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment
+
 
+
tasksState() - Method in class org.apache.kafka.connect.health.ConnectorHealth
+
+
Provides the current state of the connector tasks.
+
+
TaskState - Class in org.apache.kafka.connect.health
+
+
Describes the state, IDs, and any errors of a connector task.
+
+
TaskState(int, String, String, String) - Constructor for class org.apache.kafka.connect.health.TaskState
+
+
Provides an instance of TaskState.
+
+
taskTimeoutMs - Variable in class org.apache.kafka.streams.TopologyConfig.TaskConfig
+
 
+
taskTimeoutMs - Variable in class org.apache.kafka.streams.TopologyConfig
+
 
+
TaskTopicPartition - Interface in org.apache.kafka.streams.processor.assignment
+
+
This is a simple container class used during the assignment process to distinguish + TopicPartitions type.
+
+
TelemetryTooLargeException - Exception in org.apache.kafka.common.errors
+
+
This exception indicates that the size of the telemetry metrics data is too large.
+
+
TelemetryTooLargeException(String) - Constructor for exception org.apache.kafka.common.errors.TelemetryTooLargeException
+
 
+
TerminateTransactionOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
TerminateTransactionOptions() - Constructor for class org.apache.kafka.clients.admin.TerminateTransactionOptions
+
 
+
TerminateTransactionResult - Class in org.apache.kafka.clients.admin
+
+ +
+
test(K, V) - Method in interface org.apache.kafka.streams.kstream.Predicate
+
+
Test if the record with the given key and value satisfies the predicate.
+
+
test(R) - Method in interface org.apache.kafka.connect.transforms.predicates.Predicate
+
+
Returns whether the given record satisfies this predicate.
+
+
TestInputTopic<K,V> - Class in org.apache.kafka.streams
+
+
TestInputTopic is used to pipe records to topic in TopologyTestDriver.
+
+
TestOutputTopic<K,V> - Class in org.apache.kafka.streams
+
+
TestOutputTopic is used to read records from a topic in TopologyTestDriver.
+
+
TestRecord<K,V> - Class in org.apache.kafka.streams.test
+
+
A key/value pair, including timestamp and record headers, to be sent to or received from TopologyTestDriver.
+
+
TestRecord(K, V) - Constructor for class org.apache.kafka.streams.test.TestRecord
+
+
Creates a record.
+
+
TestRecord(K, V, Instant) - Constructor for class org.apache.kafka.streams.test.TestRecord
+
+
Creates a record.
+
+
TestRecord(K, V, Headers) - Constructor for class org.apache.kafka.streams.test.TestRecord
+
+
Creates a record.
+
+
TestRecord(K, V, Headers, Long) - Constructor for class org.apache.kafka.streams.test.TestRecord
+
+
Creates a record.
+
+
TestRecord(K, V, Headers, Instant) - Constructor for class org.apache.kafka.streams.test.TestRecord
+
+
Creates a record.
+
+
TestRecord(ConsumerRecord<K, V>) - Constructor for class org.apache.kafka.streams.test.TestRecord
+
+
Create a TestRecord from a ConsumerRecord.
+
+
TestRecord(ProducerRecord<K, V>) - Constructor for class org.apache.kafka.streams.test.TestRecord
+
+
Create a TestRecord from a ProducerRecord.
+
+
TestRecord(V) - Constructor for class org.apache.kafka.streams.test.TestRecord
+
+
Create a record with null key.
+
+
thenApply(KafkaFuture.BaseFunction<T, R>) - Method in class org.apache.kafka.common.KafkaFuture
+
+
Returns a new KafkaFuture that, when this future completes normally, is executed with this + futures's result as the argument to the supplied function.
+
+
ThreadMetadata - Interface in org.apache.kafka.streams
+
+
Metadata of a stream thread.
+
+
threadName() - Method in interface org.apache.kafka.streams.ThreadMetadata
+
+
Name of the stream thread
+
+
threadState() - Method in interface org.apache.kafka.streams.ThreadMetadata
+
+
State of the stream thread
+
+
throttleTimeMs() - Method in exception org.apache.kafka.common.errors.ThrottlingQuotaExceededException
+
 
+
ThrottlingQuotaExceededException - Exception in org.apache.kafka.common.errors
+
+
Exception thrown if an operation on a resource exceeds the throttling quota.
+
+
ThrottlingQuotaExceededException(int, String) - Constructor for exception org.apache.kafka.common.errors.ThrottlingQuotaExceededException
+
 
+
ThrottlingQuotaExceededException(String) - Constructor for exception org.apache.kafka.common.errors.ThrottlingQuotaExceededException
+
 
+
THROUGHPUT_CONFIG - Static variable in class org.apache.kafka.connect.tools.SchemaSourceTask
+
 
+
THROUGHPUT_CONFIG - Static variable in class org.apache.kafka.connect.tools.VerifiableSourceTask
+
 
+
Time - Class in org.apache.kafka.connect.data
+
+
+ A time representing a specific point in a day, not tied to any specific date.
+
+
Time() - Constructor for class org.apache.kafka.connect.data.Time
+
 
+
timeCurrentIdlingStarted() - Method in interface org.apache.kafka.streams.TaskMetadata
+
+
Time task idling started.
+
+
timeDifferenceMs() - Method in class org.apache.kafka.streams.kstream.SlidingWindows
+
 
+
timeIndex() - Method in class org.apache.kafka.server.log.remote.storage.LogSegmentData
+
 
+
timeout() - Method in class org.apache.kafka.clients.consumer.CloseOptions
+
 
+
timeout(long) - Method in interface org.apache.kafka.connect.sink.SinkTaskContext
+
+
Set the timeout in milliseconds.
+
+
timeout(Duration) - Static method in class org.apache.kafka.clients.consumer.CloseOptions
+
+
Static method to create a CloseOptions with a custom timeout.
+
+
timeout(Duration) - Method in class org.apache.kafka.streams.KafkaStreams.CloseOptions
+
 
+
TimeoutException - Exception in org.apache.kafka.common.errors
+
+
Indicates that a request timed out.
+
+
TimeoutException() - Constructor for exception org.apache.kafka.common.errors.TimeoutException
+
 
+
TimeoutException(String) - Constructor for exception org.apache.kafka.common.errors.TimeoutException
+
 
+
TimeoutException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.TimeoutException
+
 
+
TimeoutException(Throwable) - Constructor for exception org.apache.kafka.common.errors.TimeoutException
+
 
+
timeoutMs() - Method in class org.apache.kafka.clients.admin.AbstractOptions
+
+
The timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
+
+
timeoutMs(Integer) - Method in class org.apache.kafka.clients.admin.AbstractOptions
+
+
Set the timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
+
+
timeoutMs(Integer) - Method in class org.apache.kafka.clients.admin.AlterConfigsOptions
+
+
Set the timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
+
+
timeoutMs(Integer) - Method in class org.apache.kafka.clients.admin.CreateAclsOptions
+
+
Set the timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
+
+
timeoutMs(Integer) - Method in class org.apache.kafka.clients.admin.CreateTopicsOptions
+
+
Set the timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
+
+
timeoutMs(Integer) - Method in class org.apache.kafka.clients.admin.DeleteAclsOptions
+
+
Set the timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
+
+
timeoutMs(Integer) - Method in class org.apache.kafka.clients.admin.DeleteTopicsOptions
+
+
Set the timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
+
+
timeoutMs(Integer) - Method in class org.apache.kafka.clients.admin.DescribeAclsOptions
+
+
Set the timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
+
+
timeoutMs(Integer) - Method in class org.apache.kafka.clients.admin.DescribeClusterOptions
+
+
Set the timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
+
+
timeoutMs(Integer) - Method in class org.apache.kafka.clients.admin.DescribeConfigsOptions
+
+
Set the timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
+
+
timeoutMs(Integer) - Method in class org.apache.kafka.clients.admin.DescribeTopicsOptions
+
+
Set the timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
+
+
timeoutMs(Integer) - Method in class org.apache.kafka.clients.admin.ListTopicsOptions
+
+
Set the timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
+
+
timestamp() - Method in class org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo
+
 
+
timestamp() - Method in class org.apache.kafka.clients.consumer.ConsumerRecord
+
+
The timestamp of this record, in milliseconds elapsed since unix epoch.
+
+
timestamp() - Method in class org.apache.kafka.clients.consumer.OffsetAndTimestamp
+
 
+
timestamp() - Method in class org.apache.kafka.clients.producer.ProducerRecord
+
 
+
timestamp() - Method in class org.apache.kafka.clients.producer.RecordMetadata
+
+
The timestamp of the record in the topic/partition.
+
+
timestamp() - Method in exception org.apache.kafka.common.errors.RecordDeserializationException
+
 
+
timestamp() - Method in class org.apache.kafka.connect.connector.ConnectRecord
+
 
+
timestamp() - Method in class org.apache.kafka.connect.mirror.Heartbeat
+
 
+
timestamp() - Method in interface org.apache.kafka.streams.errors.ErrorHandlerContext
+
+
Return the current timestamp; could be -1 if it is not available.
+
+
timestamp() - Method in class org.apache.kafka.streams.processor.api.FixedKeyRecord
+
+
The timestamp of the record.
+
+
timestamp() - Method in class org.apache.kafka.streams.processor.api.Record
+
+
The timestamp of the record.
+
+
timestamp() - Method in class org.apache.kafka.streams.processor.MockProcessorContext.CapturedForward
+
+
Deprecated.
+
The timestamp attached to the forwarded record.
+
+
timestamp() - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
timestamp() - Method in interface org.apache.kafka.streams.processor.ProcessorContext
+
+
Return the current timestamp.
+
+
timestamp() - Method in interface org.apache.kafka.streams.processor.RecordContext
+
+
Return the current timestamp.
+
+
timestamp() - Method in class org.apache.kafka.streams.state.ValueAndTimestamp
+
 
+
timestamp() - Method in class org.apache.kafka.streams.state.VersionedRecord
+
 
+
timestamp() - Method in class org.apache.kafka.streams.test.TestRecord
+
 
+
Timestamp - Class in org.apache.kafka.connect.data
+
+
+ A timestamp representing an absolute time, without timezone information.
+
+
Timestamp() - Constructor for class org.apache.kafka.connect.data.Timestamp
+
 
+
TIMESTAMP - Enum constant in enum class org.apache.kafka.server.log.remote.storage.RemoteStorageManager.IndexType
+
+
Represents timestamp index.
+
+
TIMESTAMP_KEY - Static variable in class org.apache.kafka.connect.mirror.Heartbeat
+
 
+
TIMESTAMP_SIZE - Static variable in class org.apache.kafka.streams.state.StateSerdes
+
 
+
TimestampedBytesStore - Interface in org.apache.kafka.streams.state
+
 
+
TimestampedKeyQuery<K,V> - Class in org.apache.kafka.streams.query
+
+
Interactive query for retrieving a single record based on its key from TimestampedKeyValueStore
+
+
timestampedKeyValueStore() - Static method in class org.apache.kafka.streams.state.QueryableStoreTypes
+
+ +
+
TimestampedKeyValueStore<K,V> - Interface in org.apache.kafka.streams.state
+
+
A key-(value/timestamp) store that supports put/get/delete and range queries.
+
+
timestampedKeyValueStoreBuilder(KeyValueBytesStoreSupplier, Serde<K>, Serde<V>) - Static method in class org.apache.kafka.streams.state.Stores
+
+
Creates a StoreBuilder that can be used to build a TimestampedKeyValueStore.
+
+
TimestampedRangeQuery<K,V> - Class in org.apache.kafka.streams.query
+
+
Interactive query for issuing range queries and scans over TimestampedKeyValueStore
+
+
timestampedWindowStore() - Static method in class org.apache.kafka.streams.state.QueryableStoreTypes
+
+ +
+
TimestampedWindowStore<K,V> - Interface in org.apache.kafka.streams.state
+
+
Interface for storing the aggregated values of fixed-size time windows.
+
+
timestampedWindowStoreBuilder(WindowBytesStoreSupplier, Serde<K>, Serde<V>) - Static method in class org.apache.kafka.streams.state.Stores
+
+
Creates a StoreBuilder that can be used to build a TimestampedWindowStore.
+
+
timestampExtractor - Variable in class org.apache.kafka.streams.TopologyConfig.TaskConfig
+
 
+
TimestampExtractor - Interface in org.apache.kafka.streams.processor
+
+
An interface that allows the Kafka Streams framework to extract a timestamp from an instance of ConsumerRecord.
+
+
timestampExtractorSupplier - Variable in class org.apache.kafka.streams.TopologyConfig
+
 
+
timestampType() - Method in class org.apache.kafka.clients.consumer.ConsumerRecord
+
+
The timestamp type of this record
+
+
timestampType() - Method in exception org.apache.kafka.common.errors.RecordDeserializationException
+
 
+
timestampType() - Method in class org.apache.kafka.connect.sink.SinkRecord
+
 
+
timeWindow(long, TimeUnit) - Method in class org.apache.kafka.common.metrics.MetricConfig
+
 
+
TimeWindowedCogroupedKStream<K,V> - Interface in org.apache.kafka.streams.kstream
+
+
Same as a TimeWindowedKStream, however, for multiple co-grouped KStreams.
+
+
TimeWindowedDeserializer<T> - Class in org.apache.kafka.streams.kstream
+
 
+
TimeWindowedDeserializer() - Constructor for class org.apache.kafka.streams.kstream.TimeWindowedDeserializer
+
 
+
TimeWindowedDeserializer(Deserializer<T>, Long) - Constructor for class org.apache.kafka.streams.kstream.TimeWindowedDeserializer
+
 
+
TimeWindowedKStream<K,V> - Interface in org.apache.kafka.streams.kstream
+
+
TimeWindowedKStream is an abstraction of a windowed record stream of key-value pairs.
+
+
TimeWindowedSerde() - Constructor for class org.apache.kafka.streams.kstream.WindowedSerdes.TimeWindowedSerde
+
 
+
TimeWindowedSerde(Serde<T>, long) - Constructor for class org.apache.kafka.streams.kstream.WindowedSerdes.TimeWindowedSerde
+
 
+
timeWindowedSerdeFrom(Class<T>, long) - Static method in class org.apache.kafka.streams.kstream.WindowedSerdes
+
+
Construct a TimeWindowedSerde object to deserialize changelog topic + for the specified inner class type and window size.
+
+
TimeWindowedSerializer<T> - Class in org.apache.kafka.streams.kstream
+
 
+
TimeWindowedSerializer() - Constructor for class org.apache.kafka.streams.kstream.TimeWindowedSerializer
+
 
+
TimeWindowedSerializer(Serializer<T>) - Constructor for class org.apache.kafka.streams.kstream.TimeWindowedSerializer
+
 
+
timeWindowMs() - Method in class org.apache.kafka.common.metrics.MetricConfig
+
 
+
TimeWindows - Class in org.apache.kafka.streams.kstream
+
+
The fixed-size time-based window specifications used for aggregations.
+
+
to(String) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Materialize this stream to a topic.
+
+
to(String, Produced<K, V>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
to(TopicNameExtractor<K, V>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Materialize the record of this stream to different topics.
+
+
to(TopicNameExtractor<K, V>, Produced<K, V>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
To - Class in org.apache.kafka.streams.processor
+
+
This class is used to provide the optional parameters when sending output records to downstream processor + using ProcessorContext.forward(Object, Object, To).
+
+
toArray() - Method in interface org.apache.kafka.common.header.Headers
+
+
Returns all headers as an array, in the order they were added in.
+
+
toArray(List<Uuid>) - Static method in class org.apache.kafka.common.Uuid
+
+
Convert a list of Uuid to an array of Uuid.
+
+
toBin(double) - Method in interface org.apache.kafka.common.metrics.stats.Histogram.BinScheme
+
+
Determine the 0-based bin number in which the supplied value should be placed.
+
+
toBin(double) - Method in class org.apache.kafka.common.metrics.stats.Histogram.ConstantBinScheme
+
 
+
toBin(double) - Method in class org.apache.kafka.common.metrics.stats.Histogram.LinearBinScheme
+
 
+
toCompletionStage() - Method in class org.apache.kafka.common.KafkaFuture
+
+
Gets a CompletionStage with the same completion properties as this KafkaFuture.
+
+
toConnectData(String, byte[]) - Method in interface org.apache.kafka.connect.storage.Converter
+
+
Convert a native object to a Kafka Connect data object for deserialization.
+
+
toConnectData(String, byte[]) - Method in class org.apache.kafka.connect.storage.StringConverter
+
 
+
toConnectData(String, Headers, byte[]) - Method in interface org.apache.kafka.connect.storage.Converter
+
+
Convert a native object to a Kafka Connect data object for deserialization, + potentially using the supplied topic and headers in the record as necessary.
+
+
toConnectHeader(String, String, byte[]) - Method in interface org.apache.kafka.connect.storage.HeaderConverter
+
+
Convert the header name and byte array value into a Header object.
+
+
toConnectHeader(String, String, byte[]) - Method in class org.apache.kafka.connect.storage.SimpleHeaderConverter
+
 
+
toConnectHeader(String, String, byte[]) - Method in class org.apache.kafka.connect.storage.StringConverter
+
 
+
toEnrichedRst() - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Configs with new metadata (group, orderInGroup, dependents) formatted with reStructuredText, suitable for embedding in Sphinx + documentation.
+
+
toFile(String) - Static method in class org.apache.kafka.streams.kstream.Printed
+
+
Print the records of a KStream to a file.
+
+
toFilter() - Method in class org.apache.kafka.common.acl.AccessControlEntry
+
+
Create a filter which matches only this AccessControlEntry.
+
+
toFilter() - Method in class org.apache.kafka.common.acl.AclBinding
+
+
Create a filter which matches only this AclBinding.
+
+
toFilter() - Method in class org.apache.kafka.common.resource.ResourcePattern
+
 
+
toHtml() - Method in class org.apache.kafka.common.config.ConfigDef
+
 
+
toHtml(int, Function<String, String>) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Converts this config into an HTML list that can be embedded into docs.
+
+
toHtml(int, Function<String, String>, Map<String, String>) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Converts this config into an HTML list that can be embedded into docs.
+
+
toHtml(Map<String, String>) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Converts this config into an HTML list that can be embedded into docs.
+
+
toHtmlTable() - Method in class org.apache.kafka.common.config.ConfigDef
+
 
+
toHtmlTable(String, Iterable<MetricNameTemplate>) - Static method in class org.apache.kafka.common.metrics.Metrics
+
+
Use the specified domain and metric name templates to generate an HTML table documenting the metrics.
+
+
toHtmlTable(Map<String, String>) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Converts this config into an HTML table that can be embedded into docs.
+
+
token() - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerExtensionsValidatorCallback
+
 
+
token() - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerTokenCallback
+
+
Return the (potentially null) token
+
+
token() - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallback
+
+
Return the (potentially null) token
+
+
token(OAuthBearerToken) - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerTokenCallback
+
+
Set the token.
+
+
token(OAuthBearerToken) - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallback
+
+
Set the token.
+
+
TOKEN_AUTH_CONFIG - Static variable in class org.apache.kafka.common.security.scram.ScramLoginModule
+
 
+
tokenAuthenticated() - Method in class org.apache.kafka.common.security.auth.KafkaPrincipal
+
 
+
tokenAuthenticated(boolean) - Method in class org.apache.kafka.common.security.auth.KafkaPrincipal
+
 
+
TokenBucket - Class in org.apache.kafka.common.metrics.stats
+
+
The TokenBucket is a MeasurableStat implementing a token bucket algorithm + that is usable within a Sensor.
+
+
TokenBucket() - Constructor for class org.apache.kafka.common.metrics.stats.TokenBucket
+
 
+
TokenBucket(TimeUnit) - Constructor for class org.apache.kafka.common.metrics.stats.TokenBucket
+
 
+
tokenId() - Method in class org.apache.kafka.common.security.token.delegation.TokenInformation
+
 
+
tokenInfo() - Method in class org.apache.kafka.common.security.token.delegation.DelegationToken
+
 
+
TokenInformation - Class in org.apache.kafka.common.security.token.delegation
+
+
A class representing a delegation token details.
+
+
TokenInformation(String, KafkaPrincipal, Collection<KafkaPrincipal>, long, long, long) - Constructor for class org.apache.kafka.common.security.token.delegation.TokenInformation
+
 
+
TokenInformation(String, KafkaPrincipal, KafkaPrincipal, Collection<KafkaPrincipal>, long, long, long) - Constructor for class org.apache.kafka.common.security.token.delegation.TokenInformation
+
 
+
tokenRequester() - Method in class org.apache.kafka.common.security.token.delegation.TokenInformation
+
 
+
tokenRequesterAsString() - Method in class org.apache.kafka.common.security.token.delegation.TokenInformation
+
 
+
tokenValue() - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallback
+
+
Return the (always non-null) token value
+
+
toList(Uuid[]) - Static method in class org.apache.kafka.common.Uuid
+
+
Convert an array of Uuids to a list of Uuid.
+
+
toLogical(Schema, byte[]) - Static method in class org.apache.kafka.connect.data.Decimal
+
+
Convert a value from its encoded format (byte[]) to its logical format (BigDecimal).
+
+
toLogical(Schema, int) - Static method in class org.apache.kafka.connect.data.Date
+
+
Convert a value from its encoded format (int) to its logical format (Date).
+
+
toLogical(Schema, int) - Static method in class org.apache.kafka.connect.data.Time
+
+
Convert a value from its encoded format (int) to its logical format (Date).
+
+
toLogical(Schema, long) - Static method in class org.apache.kafka.connect.data.Timestamp
+
+
Convert a value from its encoded format (long) to its logical format (Date).
+
+
topic() - Method in class org.apache.kafka.clients.consumer.ConsumerRecord
+
+
The topic this record is received from (never null)
+
+
topic() - Method in class org.apache.kafka.clients.producer.ProducerRecord
+
 
+
topic() - Method in class org.apache.kafka.clients.producer.RecordMetadata
+
+
The topic the record was appended to
+
+
topic() - Method in class org.apache.kafka.common.PartitionInfo
+
+
The topic name
+
+
topic() - Method in class org.apache.kafka.common.TopicIdPartition
+
 
+
topic() - Method in class org.apache.kafka.common.TopicPartition
+
 
+
topic() - Method in class org.apache.kafka.common.TopicPartitionReplica
+
 
+
topic() - Method in class org.apache.kafka.connect.connector.ConnectRecord
+
 
+
topic() - Method in class org.apache.kafka.server.policy.CreateTopicPolicy.RequestMetadata
+
+
Return the name of the topic to create.
+
+
topic() - Method in interface org.apache.kafka.streams.errors.ErrorHandlerContext
+
+
Return the topic name of the current input record; could be null if it is not + available.
+
+
topic() - Method in interface org.apache.kafka.streams.processor.api.RecordMetadata
+
+
Return the topic name of the current input record; could be null if it is not + available.
+
+
topic() - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
topic() - Method in interface org.apache.kafka.streams.processor.ProcessorContext
+
+
Return the topic name of the current input record; could be null if it is not + available.
+
+
topic() - Method in interface org.apache.kafka.streams.processor.RecordContext
+
+
Return the topic name of the current input record; could be null if it is not + available.
+
+
topic() - Method in class org.apache.kafka.streams.state.StateSerdes
+
+
Return the topic.
+
+
topic() - Method in interface org.apache.kafka.streams.TopologyDescription.Sink
+
+
The topic name this sink node is writing to.
+
+
TOPIC - Enum constant in enum class org.apache.kafka.common.config.ConfigResource.Type
+
 
+
TOPIC - Enum constant in enum class org.apache.kafka.common.resource.ResourceType
+
+
A Kafka topic.
+
+
TOPIC_CONFIG - Static variable in class org.apache.kafka.connect.tools.SchemaSourceTask
+
 
+
TOPIC_CONFIG - Static variable in class org.apache.kafka.connect.tools.VerifiableSourceTask
+
 
+
TOPIC_KEY - Static variable in class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
TOPIC_PREFIX - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Prefix used to provide default topic configs to be applied when creating internal topics.
+
+
TOPIC_PREFIX_ALTERNATIVE - Static variable in class org.apache.kafka.streams.StreamsConfig.InternalConfig
+
 
+
TopicAuthorizationException - Exception in org.apache.kafka.common.errors
+
 
+
TopicAuthorizationException(String) - Constructor for exception org.apache.kafka.common.errors.TopicAuthorizationException
+
 
+
TopicAuthorizationException(String, Set<String>) - Constructor for exception org.apache.kafka.common.errors.TopicAuthorizationException
+
 
+
TopicAuthorizationException(Set<String>) - Constructor for exception org.apache.kafka.common.errors.TopicAuthorizationException
+
 
+
TopicCollection - Class in org.apache.kafka.common
+
+
A class used to represent a collection of topics.
+
+
TopicCollection.TopicIdCollection - Class in org.apache.kafka.common
+
+
A class used to represent a collection of topics defined by their topic ID.
+
+
TopicCollection.TopicNameCollection - Class in org.apache.kafka.common
+
+
A class used to represent a collection of topics defined by their topic name.
+
+
TopicConfig - Class in org.apache.kafka.common.config
+
+
Keys that can be used to configure a topic.
+
+
TopicConfig() - Constructor for class org.apache.kafka.common.config.TopicConfig
+
 
+
topicConfigs() - Method in class org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription.TopicInfo
+
+
Topic-level configurations as key-value pairs.
+
+
TopicDeletionDisabledException - Exception in org.apache.kafka.common.errors
+
 
+
TopicDeletionDisabledException() - Constructor for exception org.apache.kafka.common.errors.TopicDeletionDisabledException
+
 
+
TopicDeletionDisabledException(String) - Constructor for exception org.apache.kafka.common.errors.TopicDeletionDisabledException
+
 
+
TopicDescription - Class in org.apache.kafka.clients.admin
+
+
A detailed description of a single topic in the cluster.
+
+
TopicDescription(String, boolean, List<TopicPartitionInfo>) - Constructor for class org.apache.kafka.clients.admin.TopicDescription
+
+
Create an instance with the specified parameters.
+
+
TopicDescription(String, boolean, List<TopicPartitionInfo>, Set<AclOperation>) - Constructor for class org.apache.kafka.clients.admin.TopicDescription
+
+
Create an instance with the specified parameters.
+
+
TopicDescription(String, boolean, List<TopicPartitionInfo>, Set<AclOperation>, Uuid) - Constructor for class org.apache.kafka.clients.admin.TopicDescription
+
+
Create an instance with the specified parameters.
+
+
TopicExistsException - Exception in org.apache.kafka.common.errors
+
 
+
TopicExistsException(String) - Constructor for exception org.apache.kafka.common.errors.TopicExistsException
+
 
+
TopicExistsException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.TopicExistsException
+
 
+
topicId() - Method in class org.apache.kafka.clients.admin.CreateTopicsResult.TopicMetadataAndConfig
+
 
+
topicId() - Method in class org.apache.kafka.clients.admin.TopicDescription
+
 
+
topicId() - Method in class org.apache.kafka.clients.admin.TopicListing
+
+
The id of the topic.
+
+
topicId() - Method in class org.apache.kafka.common.TopicIdPartition
+
 
+
topicId(String) - Method in class org.apache.kafka.clients.admin.CreateTopicsResult
+
+
Returns a future that provides topic ID for the topic when the request completes.
+
+
topicId(String) - Method in class org.apache.kafka.common.Cluster
+
 
+
topicIdPartition() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogMetadata
+
 
+
topicIdPartition() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId
+
 
+
topicIdPartition() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata
+
 
+
topicIdPartition() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate
+
 
+
topicIdPartition() - Method in class org.apache.kafka.server.log.remote.storage.RemotePartitionDeleteMetadata
+
 
+
TopicIdPartition - Class in org.apache.kafka.common
+
+
This represents universally unique identifier with topic id for a topic partition.
+
+
TopicIdPartition(Uuid, int, String) - Constructor for class org.apache.kafka.common.TopicIdPartition
+
+
Create an instance with the provided parameters.
+
+
TopicIdPartition(Uuid, TopicPartition) - Constructor for class org.apache.kafka.common.TopicIdPartition
+
+
Create an instance with the provided parameters.
+
+
topicIds() - Method in class org.apache.kafka.common.Cluster
+
 
+
topicIds() - Method in class org.apache.kafka.common.TopicCollection.TopicIdCollection
+
 
+
topicIdValues() - Method in class org.apache.kafka.clients.admin.DeleteTopicsResult
+
+ +
+
topicIdValues() - Method in class org.apache.kafka.clients.admin.DescribeTopicsResult
+
+ +
+
TopicInfo(int, int, Map<String, String>) - Constructor for class org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription.TopicInfo
+
 
+
TopicListing - Class in org.apache.kafka.clients.admin
+
+
A listing of a topic in the cluster.
+
+
TopicListing(String, Uuid, boolean) - Constructor for class org.apache.kafka.clients.admin.TopicListing
+
+
Create an instance with the specified parameters.
+
+
TopicMetadataAndConfig(ApiException) - Constructor for class org.apache.kafka.clients.admin.CreateTopicsResult.TopicMetadataAndConfig
+
 
+
TopicMetadataAndConfig(Uuid, int, int, Config) - Constructor for class org.apache.kafka.clients.admin.CreateTopicsResult.TopicMetadataAndConfig
+
 
+
topicName(Uuid) - Method in class org.apache.kafka.common.Cluster
+
 
+
topicNameExtractor() - Method in interface org.apache.kafka.streams.TopologyDescription.Sink
+
+
The TopicNameExtractor class that this sink node uses to dynamically extract the topic name to write to.
+
+
TopicNameExtractor<K,V> - Interface in org.apache.kafka.streams.processor
+
+
An interface that allows to dynamically determine the name of the Kafka topic to send at the sink node of the topology.
+
+
topicNames() - Method in class org.apache.kafka.common.TopicCollection.TopicNameCollection
+
 
+
topicNameValues() - Method in class org.apache.kafka.clients.admin.DeleteTopicsResult
+
+ +
+
topicNameValues() - Method in class org.apache.kafka.clients.admin.DescribeTopicsResult
+
+ +
+
topicPartition() - Method in class org.apache.kafka.clients.admin.AbortTransactionSpec
+
 
+
topicPartition() - Method in exception org.apache.kafka.common.errors.RecordDeserializationException
+
 
+
topicPartition() - Method in class org.apache.kafka.common.TopicIdPartition
+
 
+
topicPartition() - Method in class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
topicPartition() - Method in interface org.apache.kafka.streams.processor.assignment.TaskTopicPartition
+
 
+
TopicPartition - Class in org.apache.kafka.common
+
+
A topic name and partition number
+
+
TopicPartition(String, int) - Constructor for class org.apache.kafka.common.TopicPartition
+
 
+
TopicPartitionInfo - Class in org.apache.kafka.common
+
+
A class containing leadership, replicas and ISR information for a topic partition.
+
+
TopicPartitionInfo(int, Node, List<Node>, List<Node>) - Constructor for class org.apache.kafka.common.TopicPartitionInfo
+
 
+
TopicPartitionInfo(int, Node, List<Node>, List<Node>, List<Node>, List<Node>) - Constructor for class org.apache.kafka.common.TopicPartitionInfo
+
+
Create an instance of this class with the provided parameters.
+
+
TopicPartitionReplica - Class in org.apache.kafka.common
+
+
The topic name, partition number and the brokerId of the replica
+
+
TopicPartitionReplica(String, int, int) - Constructor for class org.apache.kafka.common.TopicPartitionReplica
+
 
+
topicPartitions() - Method in class org.apache.kafka.clients.admin.ListConsumerGroupOffsetsSpec
+
+
Returns the topic partitions whose offsets are to be listed for a consumer group.
+
+
topicPartitions() - Method in class org.apache.kafka.clients.admin.ListShareGroupOffsetsSpec
+
+
Returns the topic partitions whose offsets are to be listed for a share group.
+
+
topicPartitions() - Method in class org.apache.kafka.clients.admin.MemberAssignment
+
+
The topic partitions assigned to a group member.
+
+
topicPartitions() - Method in class org.apache.kafka.clients.admin.ShareMemberAssignment
+
+
The topic partitions assigned to a group member.
+
+
topicPartitions() - Method in class org.apache.kafka.clients.admin.TransactionDescription
+
 
+
topicPartitions() - Method in interface org.apache.kafka.streams.processor.assignment.TaskInfo
+
 
+
topicPartitions() - Method in interface org.apache.kafka.streams.StreamsMetadata
+
+
Source topic partitions of the active tasks of the Streams client.
+
+
topicPartitions() - Method in interface org.apache.kafka.streams.TaskMetadata
+
+
Source topic partitions of the task.
+
+
topicPartitions(Collection<TopicPartition>) - Method in class org.apache.kafka.clients.admin.ListConsumerGroupOffsetsSpec
+
+
Set the topic partitions whose offsets are to be listed for a consumer group.
+
+
topicPartitions(Collection<TopicPartition>) - Method in class org.apache.kafka.clients.admin.ListShareGroupOffsetsSpec
+
+
Set the topic partitions whose offsets are to be listed for a share group.
+
+
topicPattern() - Method in interface org.apache.kafka.streams.TopologyDescription.Source
+
+
The pattern used to match topic names that is reading from.
+
+
topicPrefix(String) - Static method in class org.apache.kafka.streams.StreamsConfig
+
+
Prefix a property with StreamsConfig.TOPIC_PREFIX + used to provide default topic configs to be applied when creating internal topics.
+
+
topicResult(String) - Method in class org.apache.kafka.clients.admin.DeleteShareGroupOffsetsResult
+
+
Return a future which can be used to check the result for a given topic.
+
+
topics() - Method in class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription
+
 
+
topics() - Method in class org.apache.kafka.common.Cluster
+
+
Get all topics.
+
+
TOPICS_CONFIG - Static variable in class org.apache.kafka.connect.sink.SinkConnector
+
+
+ Configuration key for the list of input topics for this connector.
+
+
TOPICS_CONFIG - Static variable in class org.apache.kafka.connect.sink.SinkTask
+
+
+ The configuration key that provides the list of topics that are inputs for this + SinkTask.
+
+
TOPICS_REGEX_CONFIG - Static variable in class org.apache.kafka.connect.sink.SinkTask
+
+
+ The configuration key that provides a regex specifying which topics to include as inputs + for this SinkTask.
+
+
topicSet() - Method in interface org.apache.kafka.streams.TopologyDescription.Source
+
+
The topic names this source node is reading from.
+
+
topicSource(String) - Method in class org.apache.kafka.connect.mirror.DefaultReplicationPolicy
+
 
+
topicSource(String) - Method in class org.apache.kafka.connect.mirror.IdentityReplicationPolicy
+
+
Unlike DefaultReplicationPolicy, IdentityReplicationPolicy cannot know the source of + a remote topic based on its name alone.
+
+
topicSource(String) - Method in interface org.apache.kafka.connect.mirror.ReplicationPolicy
+
+
Returns the source cluster alias of given topic.
+
+
Topology - Class in org.apache.kafka.streams
+
+
A logical representation of a ProcessorTopology.
+
+
Topology() - Constructor for class org.apache.kafka.streams.Topology
+
 
+
Topology(TopologyConfig) - Constructor for class org.apache.kafka.streams.Topology
+
 
+
TOPOLOGY_OPTIMIZATION_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
topology.optimization
+
+
Topology.AutoOffsetReset - Enum Class in org.apache.kafka.streams
+
+
Deprecated. +
Since 4.0. Use AutoOffsetReset instead.
+
+
+
TopologyConfig - Class in org.apache.kafka.streams
+
+
Streams configs that apply at the topology level.
+
+
TopologyConfig(String, StreamsConfig, Properties) - Constructor for class org.apache.kafka.streams.TopologyConfig
+
 
+
TopologyConfig(StreamsConfig) - Constructor for class org.apache.kafka.streams.TopologyConfig
+
 
+
TopologyConfig.TaskConfig - Class in org.apache.kafka.streams
+
 
+
TopologyDescription - Interface in org.apache.kafka.streams
+
+
A meta representation of a topology.
+
+
TopologyDescription.GlobalStore - Interface in org.apache.kafka.streams
+
+
Represents a global store.
+
+
TopologyDescription.Node - Interface in org.apache.kafka.streams
+
+
A node of a topology.
+
+
TopologyDescription.Processor - Interface in org.apache.kafka.streams
+
+
A processor node of a topology.
+
+
TopologyDescription.Sink - Interface in org.apache.kafka.streams
+
+
A sink node of a topology.
+
+
TopologyDescription.Source - Interface in org.apache.kafka.streams
+
+
A source node of a topology.
+
+
TopologyDescription.Subtopology - Interface in org.apache.kafka.streams
+
+
A connected sub-graph of a Topology.
+
+
topologyEpoch() - Method in class org.apache.kafka.clients.admin.StreamsGroupDescription
+
+
The epoch of the currently used topology.
+
+
topologyEpoch() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription
+
+
The epoch of the topology present on the client.
+
+
TopologyException - Exception in org.apache.kafka.streams.errors
+
+
Indicates a pre run time error occurred while parsing the logical topology + to construct the physical processor topology.
+
+
TopologyException(String) - Constructor for exception org.apache.kafka.streams.errors.TopologyException
+
 
+
TopologyException(String, Throwable) - Constructor for exception org.apache.kafka.streams.errors.TopologyException
+
 
+
TopologyException(Throwable) - Constructor for exception org.apache.kafka.streams.errors.TopologyException
+
 
+
topologyName - Variable in class org.apache.kafka.streams.TopologyConfig
+
 
+
topologyName() - Method in class org.apache.kafka.streams.processor.TaskId
+
+
Experimental feature -- will return null
+
+
topologyOverrides - Variable in class org.apache.kafka.streams.TopologyConfig
+
 
+
TopologyTestDriver - Class in org.apache.kafka.streams
+
+
This class makes it easier to write tests to verify the behavior of topologies created with Topology or + StreamsBuilder.
+
+
TopologyTestDriver(Topology) - Constructor for class org.apache.kafka.streams.TopologyTestDriver
+
+
Create a new test diver instance.
+
+
TopologyTestDriver(Topology, Instant) - Constructor for class org.apache.kafka.streams.TopologyTestDriver
+
+
Create a new test diver instance.
+
+
TopologyTestDriver(Topology, Properties) - Constructor for class org.apache.kafka.streams.TopologyTestDriver
+
+
Create a new test diver instance.
+
+
TopologyTestDriver(Topology, Properties, Instant) - Constructor for class org.apache.kafka.streams.TopologyTestDriver
+
+
Create a new test diver instance.
+
+
toRst() - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Get the configs formatted with reStructuredText, suitable for embedding in Sphinx + documentation.
+
+
toStream() - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Convert this changelog stream to a KStream.
+
+
toStream(KeyValueMapper<? super K, ? super V, ? extends KR>) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Convert this changelog stream to a KStream using the given KeyValueMapper to select the new key.
+
+
toStream(KeyValueMapper<? super K, ? super V, ? extends KR>, Named) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Convert this changelog stream to a KStream using the given KeyValueMapper to select the new key.
+
+
toStream(Named) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Convert this changelog stream to a KStream.
+
+
toString() - Method in class org.apache.kafka.clients.admin.AbortTransactionOptions
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.AbortTransactionSpec
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.AlterConfigOp
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.ClassicGroupDescription
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.ClientMetricsResourceListing
+
+
Deprecated.
+
toString() - Method in class org.apache.kafka.clients.admin.Config
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.ConfigEntry.ConfigSynonym
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.ConfigEntry
+
+
Override toString to redact sensitive value.
+
+
toString() - Method in class org.apache.kafka.clients.admin.ConsumerGroupDescription
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.ConsumerGroupListing
+
+
Deprecated.
+
toString() - Method in class org.apache.kafka.clients.admin.DescribeProducersOptions
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.DescribeProducersResult.PartitionProducerState
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult.ReplicaLogDirInfo
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.DescribeTransactionsOptions
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.FeatureMetadata
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.FeatureUpdate
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.FenceProducersOptions
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.FinalizedVersionRange
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.GroupListing
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.ListConsumerGroupOffsetsSpec
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.ListShareGroupOffsetsSpec
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.ListTopicsOptions
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.ListTransactionsOptions
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.LogDirDescription
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.MemberAssignment
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.MemberDescription
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.NewPartitions
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.NewTopic
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.PartitionReassignment
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.ProducerState
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.QuorumInfo.Node
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.QuorumInfo.ReplicaState
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.QuorumInfo
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.RaftVoterEndpoint
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.RecordsToDelete
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.ReplicaInfo
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.ScramCredentialInfo
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.ShareGroupDescription
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.ShareMemberAssignment
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.ShareMemberDescription
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.StreamsGroupDescription
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberAssignment.TaskIds
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberAssignment
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription.Endpoint
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription.TaskOffset
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription.TopicInfo
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.SupportedVersionRange
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.TerminateTransactionOptions
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.TopicDescription
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.TopicListing
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.TransactionDescription
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.TransactionListing
+
 
+
toString() - Method in enum class org.apache.kafka.clients.admin.TransactionState
+
 
+
toString() - Method in class org.apache.kafka.clients.admin.UserScramCredentialsDescription
+
 
+
toString() - Method in enum class org.apache.kafka.clients.consumer.AcknowledgeType
+
 
+
toString() - Method in class org.apache.kafka.clients.consumer.ConsumerGroupMetadata
+
 
+
toString() - Method in class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment
+
 
+
toString() - Method in class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupAssignment
+
 
+
toString() - Method in class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription
+
 
+
toString() - Method in class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription
+
 
+
toString() - Method in class org.apache.kafka.clients.consumer.ConsumerRecord
+
 
+
toString() - Method in class org.apache.kafka.clients.consumer.OffsetAndMetadata
+
 
+
toString() - Method in class org.apache.kafka.clients.consumer.OffsetAndTimestamp
+
 
+
toString() - Method in enum class org.apache.kafka.clients.consumer.OffsetResetStrategy
+
+
Deprecated.
+
toString() - Method in class org.apache.kafka.clients.consumer.SubscriptionPattern
+
 
+
toString() - Method in class org.apache.kafka.clients.producer.PreparedTxnState
+
+
Returns a serialized string representation of this transaction state.
+
+
toString() - Method in class org.apache.kafka.clients.producer.ProducerRecord
+
 
+
toString() - Method in class org.apache.kafka.clients.producer.RecordMetadata
+
 
+
toString() - Method in class org.apache.kafka.common.acl.AccessControlEntry
+
 
+
toString() - Method in class org.apache.kafka.common.acl.AccessControlEntryFilter
+
 
+
toString() - Method in class org.apache.kafka.common.acl.AclBinding
+
 
+
toString() - Method in class org.apache.kafka.common.acl.AclBindingFilter
+
 
+
toString() - Method in enum class org.apache.kafka.common.ClassicGroupState
+
 
+
toString() - Method in class org.apache.kafka.common.Cluster
+
 
+
toString() - Method in class org.apache.kafka.common.ClusterResource
+
 
+
toString() - Method in class org.apache.kafka.common.config.ConfigDef.CaseInsensitiveValidString
+
 
+
toString() - Method in class org.apache.kafka.common.config.ConfigDef.CompositeValidator
+
 
+
toString() - Method in class org.apache.kafka.common.config.ConfigDef.LambdaValidator
+
 
+
toString() - Method in class org.apache.kafka.common.config.ConfigDef.ListSize
+
 
+
toString() - Method in class org.apache.kafka.common.config.ConfigDef.NonEmptyString
+
 
+
toString() - Method in class org.apache.kafka.common.config.ConfigDef.NonEmptyStringWithoutControlChars
+
 
+
toString() - Method in class org.apache.kafka.common.config.ConfigDef.NonNullValidator
+
 
+
toString() - Method in class org.apache.kafka.common.config.ConfigDef.Range
+
 
+
toString() - Method in class org.apache.kafka.common.config.ConfigDef.ValidList
+
 
+
toString() - Method in class org.apache.kafka.common.config.ConfigDef.ValidString
+
 
+
toString() - Method in class org.apache.kafka.common.config.ConfigResource
+
 
+
toString() - Method in class org.apache.kafka.common.config.ConfigValue
+
 
+
toString() - Method in enum class org.apache.kafka.common.config.SslClientAuth
+
 
+
toString() - Method in enum class org.apache.kafka.common.ConsumerGroupState
+
+
Deprecated.
+
toString() - Method in class org.apache.kafka.common.Endpoint
+
 
+
toString() - Method in enum class org.apache.kafka.common.GroupState
+
 
+
toString() - Method in enum class org.apache.kafka.common.GroupType
+
 
+
toString() - Method in enum class org.apache.kafka.common.IsolationLevel
+
 
+
toString() - Method in class org.apache.kafka.common.MetricName
+
 
+
toString() - Method in class org.apache.kafka.common.MetricNameTemplate
+
 
+
toString() - Method in class org.apache.kafka.common.metrics.Quota
+
 
+
toString() - Method in exception org.apache.kafka.common.metrics.QuotaViolationException
+
 
+
toString() - Method in class org.apache.kafka.common.metrics.stats.CumulativeSum
+
 
+
toString() - Method in class org.apache.kafka.common.metrics.stats.Frequency
+
 
+
toString() - Method in class org.apache.kafka.common.metrics.stats.Histogram
+
 
+
toString() - Method in class org.apache.kafka.common.metrics.stats.Meter
+
 
+
toString() - Method in class org.apache.kafka.common.metrics.stats.Rate
+
 
+
toString() - Method in class org.apache.kafka.common.metrics.stats.SampledStat
+
 
+
toString() - Method in class org.apache.kafka.common.metrics.stats.TokenBucket
+
 
+
toString() - Method in class org.apache.kafka.common.Node
+
 
+
toString() - Method in class org.apache.kafka.common.PartitionInfo
+
 
+
toString() - Method in class org.apache.kafka.common.quota.ClientQuotaAlteration.Op
+
 
+
toString() - Method in class org.apache.kafka.common.quota.ClientQuotaAlteration
+
 
+
toString() - Method in class org.apache.kafka.common.quota.ClientQuotaEntity
+
 
+
toString() - Method in class org.apache.kafka.common.quota.ClientQuotaFilter
+
 
+
toString() - Method in class org.apache.kafka.common.quota.ClientQuotaFilterComponent
+
 
+
toString() - Method in class org.apache.kafka.common.resource.Resource
+
 
+
toString() - Method in class org.apache.kafka.common.resource.ResourcePattern
+
 
+
toString() - Method in class org.apache.kafka.common.resource.ResourcePatternFilter
+
 
+
toString() - Method in class org.apache.kafka.common.security.auth.KafkaPrincipal
+
 
+
toString() - Method in class org.apache.kafka.common.security.auth.SaslExtensions
+
 
+
toString() - Method in class org.apache.kafka.common.security.token.delegation.DelegationToken
+
 
+
toString() - Method in class org.apache.kafka.common.security.token.delegation.TokenInformation
+
 
+
toString() - Method in class org.apache.kafka.common.TopicIdPartition
+
 
+
toString() - Method in class org.apache.kafka.common.TopicPartition
+
 
+
toString() - Method in class org.apache.kafka.common.TopicPartitionInfo
+
 
+
toString() - Method in class org.apache.kafka.common.TopicPartitionReplica
+
 
+
toString() - Method in class org.apache.kafka.common.Uuid
+
+
Returns a base64 string encoding of the UUID.
+
+
toString() - Method in class org.apache.kafka.connect.connector.ConnectRecord
+
 
+
toString() - Method in class org.apache.kafka.connect.data.ConnectSchema
+
 
+
toString() - Method in class org.apache.kafka.connect.data.Field
+
 
+
toString() - Method in class org.apache.kafka.connect.data.SchemaAndValue
+
 
+
toString() - Method in class org.apache.kafka.connect.data.Struct
+
 
+
toString() - Method in class org.apache.kafka.connect.header.ConnectHeaders
+
 
+
toString() - Method in class org.apache.kafka.connect.health.ConnectorHealth
+
 
+
toString() - Method in class org.apache.kafka.connect.health.ConnectorState
+
 
+
toString() - Method in enum class org.apache.kafka.connect.health.ConnectorType
+
 
+
toString() - Method in class org.apache.kafka.connect.health.TaskState
+
 
+
toString() - Method in class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
toString() - Method in class org.apache.kafka.connect.mirror.Heartbeat
+
 
+
toString() - Method in class org.apache.kafka.connect.mirror.SourceAndTarget
+
 
+
toString() - Method in class org.apache.kafka.connect.sink.SinkRecord
+
 
+
toString() - Method in class org.apache.kafka.connect.source.SourceRecord
+
 
+
toString() - Method in enum class org.apache.kafka.connect.source.SourceTask.TransactionBoundary
+
 
+
toString() - Method in class org.apache.kafka.coordinator.group.api.assignor.GroupAssignment
+
 
+
toString() - Method in enum class org.apache.kafka.coordinator.group.api.assignor.SubscriptionType
+
 
+
toString() - Method in class org.apache.kafka.server.authorizer.Action
+
 
+
toString() - Method in class org.apache.kafka.server.log.remote.storage.LogSegmentData
+
 
+
toString() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId
+
 
+
toString() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata.CustomMetadata
+
 
+
toString() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata
+
 
+
toString() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate
+
 
+
toString() - Method in class org.apache.kafka.server.log.remote.storage.RemotePartitionDeleteMetadata
+
 
+
toString() - Method in class org.apache.kafka.server.policy.AlterConfigPolicy.RequestMetadata
+
 
+
toString() - Method in class org.apache.kafka.server.policy.CreateTopicPolicy.RequestMetadata
+
 
+
toString() - Method in class org.apache.kafka.streams.KeyQueryMetadata
+
 
+
toString() - Method in class org.apache.kafka.streams.KeyValue
+
 
+
toString() - Method in class org.apache.kafka.streams.kstream.JoinWindows
+
 
+
toString() - Method in class org.apache.kafka.streams.kstream.SessionWindows
+
 
+
toString() - Method in class org.apache.kafka.streams.kstream.SlidingWindows
+
 
+
toString() - Method in class org.apache.kafka.streams.kstream.StreamJoined
+
 
+
toString() - Method in class org.apache.kafka.streams.kstream.TimeWindows
+
 
+
toString() - Method in class org.apache.kafka.streams.kstream.UnlimitedWindows
+
 
+
toString() - Method in class org.apache.kafka.streams.kstream.Window
+
 
+
toString() - Method in class org.apache.kafka.streams.kstream.Windowed
+
 
+
toString() - Method in class org.apache.kafka.streams.LagInfo
+
 
+
toString() - Method in class org.apache.kafka.streams.processor.api.FixedKeyRecord
+
 
+
toString() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext.CapturedForward
+
 
+
toString() - Method in class org.apache.kafka.streams.processor.api.Record
+
 
+
toString() - Method in class org.apache.kafka.streams.processor.assignment.AssignmentConfigs
+
 
+
toString() - Method in class org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment.AssignedTask
+
 
+
toString() - Method in class org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment
+
 
+
toString() - Method in class org.apache.kafka.streams.processor.assignment.ProcessId
+
 
+
toString() - Method in class org.apache.kafka.streams.processor.MockProcessorContext.CapturedForward
+
+
Deprecated.
+
toString() - Method in class org.apache.kafka.streams.processor.TaskId
+
 
+
toString() - Method in class org.apache.kafka.streams.processor.To
+
 
+
toString() - Method in class org.apache.kafka.streams.query.Position
+
 
+
toString() - Method in class org.apache.kafka.streams.query.PositionBound
+
 
+
toString() - Method in class org.apache.kafka.streams.query.StateQueryResult
+
 
+
toString() - Method in class org.apache.kafka.streams.query.WindowKeyQuery
+
 
+
toString() - Method in class org.apache.kafka.streams.query.WindowRangeQuery
+
 
+
toString() - Method in class org.apache.kafka.streams.state.DslKeyValueParams
+
 
+
toString() - Method in class org.apache.kafka.streams.state.DslSessionParams
+
 
+
toString() - Method in class org.apache.kafka.streams.state.DslWindowParams
+
 
+
toString() - Method in class org.apache.kafka.streams.state.HostInfo
+
 
+
toString() - Method in class org.apache.kafka.streams.state.ValueAndTimestamp
+
 
+
toString() - Method in class org.apache.kafka.streams.state.VersionedRecord
+
 
+
toString() - Method in class org.apache.kafka.streams.StoreQueryParameters
+
 
+
toString() - Method in class org.apache.kafka.streams.test.TestRecord
+
 
+
toString() - Method in class org.apache.kafka.streams.TestInputTopic
+
 
+
toString() - Method in class org.apache.kafka.streams.TestOutputTopic
+
 
+
toSysOut() - Static method in class org.apache.kafka.streams.kstream.Printed
+
+
Print the records of a KStream to system out.
+
+
toTable() - Method in interface org.apache.kafka.streams.kstream.KStream
+
+
Convert this stream to a KTable.
+
+
toTable(Materialized<K, V, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
toTable(Named) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
toTable(Named, Materialized<K, V, KeyValueStore<Bytes, byte[]>>) - Method in interface org.apache.kafka.streams.kstream.KStream
+
+ +
+
totalBytes() - Method in class org.apache.kafka.clients.admin.LogDirDescription
+
+
The total size of the volume this log directory is on or empty if the broker did not return a value.
+
+
totalCount() - Method in class org.apache.kafka.clients.admin.NewPartitions
+
+
The total number of partitions after the operation succeeds.
+
+
toTime() - Method in class org.apache.kafka.streams.query.MultiVersionedKeyQuery
+
+
The ending time point of the query, if specified
+
+
toTime(Instant) - Method in class org.apache.kafka.streams.query.MultiVersionedKeyQuery
+
+
Specifies the ending time point for the key query.
+
+
TRACE - Enum constant in enum class org.apache.kafka.common.metrics.Sensor.RecordingLevel
+
 
+
TRACE_LOG_LEVEL - Static variable in class org.apache.kafka.common.config.LogLevelConfig
+
+
The TRACE level designates finer-grained + informational events than the DEBUG level.
+
+
traceMessage() - Method in class org.apache.kafka.connect.health.AbstractState
+
+
The error message associated with the connector or task.
+
+
TRANSACTION - Enum constant in enum class org.apache.kafka.server.log.remote.storage.RemoteStorageManager.IndexType
+
+
Represents transaction index.
+
+
TRANSACTION_BOUNDARY_CONFIG - Static variable in class org.apache.kafka.connect.source.SourceTask
+
+
The configuration key that determines how source tasks will define transaction boundaries + when exactly-once support is enabled.
+
+
TRANSACTION_TIMEOUT_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
transaction.timeout.ms
+
+
TRANSACTION_TIMEOUT_DOC - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
 
+
TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
transaction.two.phase.commit.enable
+
+
TransactionAbortableException - Exception in org.apache.kafka.common.errors
+
 
+
TransactionAbortableException(String) - Constructor for exception org.apache.kafka.common.errors.TransactionAbortableException
+
 
+
TransactionAbortableException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.TransactionAbortableException
+
 
+
transactionAborted() - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
TransactionAbortedException - Exception in org.apache.kafka.common.errors
+
+
This is the Exception thrown when we are aborting any undrained batches during + a transaction which is aborted without any underlying cause - which likely means that the user chose to abort.
+
+
TransactionAbortedException() - Constructor for exception org.apache.kafka.common.errors.TransactionAbortedException
+
 
+
TransactionAbortedException(String) - Constructor for exception org.apache.kafka.common.errors.TransactionAbortedException
+
 
+
TransactionAbortedException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.TransactionAbortedException
+
 
+
TRANSACTIONAL_ID - Enum constant in enum class org.apache.kafka.common.resource.ResourceType
+
+
A transactional ID.
+
+
TRANSACTIONAL_ID_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
transactional.id
+
+
TRANSACTIONAL_ID_DOC - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
 
+
transactionalId() - Method in class org.apache.kafka.clients.admin.TransactionListing
+
 
+
TransactionalIdAuthorizationException - Exception in org.apache.kafka.common.errors
+
 
+
TransactionalIdAuthorizationException(String) - Constructor for exception org.apache.kafka.common.errors.TransactionalIdAuthorizationException
+
 
+
TransactionalIdNotFoundException - Exception in org.apache.kafka.common.errors
+
 
+
TransactionalIdNotFoundException(String) - Constructor for exception org.apache.kafka.common.errors.TransactionalIdNotFoundException
+
 
+
transactionCommitted() - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
transactionContext() - Method in interface org.apache.kafka.connect.source.SourceTaskContext
+
+
Get a TransactionContext that can be used to define producer transaction boundaries + when exactly-once support is enabled for the connector.
+
+
TransactionContext - Interface in org.apache.kafka.connect.source
+
+
Provided to source tasks to allow them to define their own producer transaction boundaries when + exactly-once support is enabled.
+
+
TransactionCoordinatorFencedException - Exception in org.apache.kafka.common.errors
+
 
+
TransactionCoordinatorFencedException(String) - Constructor for exception org.apache.kafka.common.errors.TransactionCoordinatorFencedException
+
 
+
TransactionCoordinatorFencedException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.TransactionCoordinatorFencedException
+
 
+
TransactionDescription - Class in org.apache.kafka.clients.admin
+
 
+
TransactionDescription(int, TransactionState, long, int, long, OptionalLong, Set<TopicPartition>) - Constructor for class org.apache.kafka.clients.admin.TransactionDescription
+
 
+
transactionIndex() - Method in class org.apache.kafka.server.log.remote.storage.LogSegmentData
+
 
+
transactionInFlight() - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
transactionInitialized() - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
TransactionListing - Class in org.apache.kafka.clients.admin
+
 
+
TransactionListing(String, long, TransactionState) - Constructor for class org.apache.kafka.clients.admin.TransactionListing
+
 
+
transactionStartTimeMs() - Method in class org.apache.kafka.clients.admin.TransactionDescription
+
 
+
TransactionState - Enum Class in org.apache.kafka.clients.admin
+
 
+
transactionTimeoutMs() - Method in class org.apache.kafka.clients.admin.TransactionDescription
+
 
+
transform(Map<String, String>) - Method in class org.apache.kafka.common.config.ConfigTransformer
+
+
Transforms the given configuration data by using the ConfigProvider instances to + look up values to replace the variables in the pattern.
+
+
transform(K, V) - Method in interface org.apache.kafka.streams.kstream.Transformer
+
+
Deprecated.
+
Transform the record with the given key and value.
+
+
transform(K, V) - Method in interface org.apache.kafka.streams.kstream.ValueTransformerWithKey
+
+
Transform the given [key and] value to a new value.
+
+
transform(V) - Method in interface org.apache.kafka.streams.kstream.ValueTransformer
+
+
Deprecated.
+
Transform the given value to a new value.
+
+
Transformation<R extends ConnectRecord<R>> - Interface in org.apache.kafka.connect.transforms
+
+
Single message transformation for Kafka Connect record types.
+
+
Transformer<K,V,R> - Interface in org.apache.kafka.streams.kstream
+
+
Deprecated. +
Since 4.0. Use api.Processor instead.
+
+
+
TransformerSupplier<K,V,R> - Interface in org.apache.kafka.streams.kstream
+
+
Deprecated. +
Since 4.0. Use api.ProcessorSupplier instead.
+
+
+
transformValues(ValueTransformerWithKeySupplier<? super K, ? super V, ? extends VR>, String...) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type), with default serializers, deserializers, and state store.
+
+
transformValues(ValueTransformerWithKeySupplier<? super K, ? super V, ? extends VR>, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>, String...) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type), with the key serde, value serde, and the underlying + materialized state storage configured in the Materialized instance.
+
+
transformValues(ValueTransformerWithKeySupplier<? super K, ? super V, ? extends VR>, Materialized<K, VR, KeyValueStore<Bytes, byte[]>>, Named, String...) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type), with the key serde, value serde, and the underlying + materialized state storage configured in the Materialized instance.
+
+
transformValues(ValueTransformerWithKeySupplier<? super K, ? super V, ? extends VR>, Named, String...) - Method in interface org.apache.kafka.streams.kstream.KTable
+
+
Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type), with default serializers, deserializers, and state store.
+
+
translateOffsets(Map<String, Object>, String, String, Duration) - Static method in class org.apache.kafka.connect.mirror.RemoteClusterUtils
+
+
Translates a remote consumer group's offsets into corresponding local offsets.
+
+
truststore() - Method in interface org.apache.kafka.common.security.auth.SslEngineFactory
+
+
Returns truststore configured for this factory.
+
+
ttl() - Method in class org.apache.kafka.common.config.ConfigData
+
+
Returns the TTL (in milliseconds).
+
+
ttls() - Method in class org.apache.kafka.common.config.ConfigTransformerResult
+
+
Returns the TTL values (in milliseconds) returned from the ConfigProvider instances for a given set of paths.
+
+
TWO_PHASE_COMMIT - Enum constant in enum class org.apache.kafka.common.acl.AclOperation
+
+
TWO_PHASE_COMMIT operation.
+
+
type - Variable in class org.apache.kafka.common.config.ConfigDef.ConfigKey
+
 
+
type() - Method in class org.apache.kafka.clients.admin.ConfigEntry
+
+
Return the config data type.
+
+
type() - Method in class org.apache.kafka.clients.admin.ConsumerGroupDescription
+
+
The group type (or the protocol) of this consumer group.
+
+
type() - Method in class org.apache.kafka.clients.admin.ConsumerGroupListing
+
+
Deprecated.
+
The type of the consumer group.
+
+
type() - Method in class org.apache.kafka.clients.admin.GroupListing
+
+
The type of the group.
+
+
type() - Method in enum class org.apache.kafka.clients.admin.ScramMechanism
+
 
+
type() - Method in class org.apache.kafka.common.config.ConfigDef.ConfigKey
+
 
+
type() - Method in class org.apache.kafka.common.config.ConfigResource
+
+
Return the resource type.
+
+
type() - Method in class org.apache.kafka.connect.data.ConnectSchema
+
 
+
type() - Method in interface org.apache.kafka.connect.data.Schema
+
 
+
type() - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
type() - Method in class org.apache.kafka.connect.health.ConnectorHealth
+
+
Provides the type of the connector.
+
+
type() - Method in class org.apache.kafka.connect.storage.ConverterConfig
+
+
Get the type of converter as defined by the ConverterConfig.TYPE_CONFIG configuration.
+
+
type() - Method in interface org.apache.kafka.streams.kstream.EmitStrategy
+
+
Returns the strategy type.
+
+
type() - Method in class org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment.AssignedTask
+
 
+
type(Schema.Type) - Static method in class org.apache.kafka.connect.data.SchemaBuilder
+
+
Create a SchemaBuilder for the specified type.
+
+
TYPE_CONFIG - Static variable in class org.apache.kafka.connect.storage.ConverterConfig
+
 
+
typeOf(String) - Method in class org.apache.kafka.common.config.AbstractConfig
+
 
+
types() - Method in class org.apache.kafka.clients.admin.ListConsumerGroupsOptions
+
+
Deprecated.
+
Returns the list of group types that are requested or empty if no types have been specified.
+
+
types() - Method in class org.apache.kafka.clients.admin.ListGroupsOptions
+
+
Returns the list of group types that are requested or empty if no types have been specified.
+
+
+

U

+
+
UnacceptableCredentialException - Exception in org.apache.kafka.common.errors
+
+
Exception thrown when attempting to define a credential that does not meet the criteria for acceptability + (for example, attempting to create a SCRAM credential with an empty username or password or too few/many iterations).
+
+
UnacceptableCredentialException(String) - Constructor for exception org.apache.kafka.common.errors.UnacceptableCredentialException
+
+
Constructor
+
+
UnacceptableCredentialException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.UnacceptableCredentialException
+
 
+
unauthorizedTopics() - Method in class org.apache.kafka.common.Cluster
+
 
+
unauthorizedTopics() - Method in exception org.apache.kafka.common.errors.TopicAuthorizationException
+
+
Get the set of topics which failed authorization.
+
+
unavailable() - Static method in class org.apache.kafka.streams.state.HostInfo
+
 
+
unbounded() - Static method in interface org.apache.kafka.streams.kstream.Suppressed.BufferConfig
+
+
Create a buffer unconstrained by size (either keys or bytes).
+
+
unbounded() - Static method in class org.apache.kafka.streams.query.PositionBound
+
+
Creates a new PositionBound representing "no bound"
+
+
UNCLEAN - Enum constant in enum class org.apache.kafka.common.ElectionType
+
 
+
UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
UNCLEAN_LEADER_ELECTION_ENABLE_DOC - Static variable in class org.apache.kafka.common.config.TopicConfig
+
 
+
uncommittedOffsets() - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
uncommittedRecords() - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
unitName() - Method in class org.apache.kafka.common.metrics.stats.Rate
+
 
+
UNKNOWN - Enum constant in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigSource
+
 
+
UNKNOWN - Enum constant in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigType
+
 
+
UNKNOWN - Enum constant in enum class org.apache.kafka.clients.admin.EndpointType
+
 
+
UNKNOWN - Enum constant in enum class org.apache.kafka.clients.admin.FeatureUpdate.UpgradeType
+
 
+
UNKNOWN - Enum constant in enum class org.apache.kafka.clients.admin.ScramMechanism
+
 
+
UNKNOWN - Enum constant in enum class org.apache.kafka.clients.admin.TransactionState
+
 
+
UNKNOWN - Enum constant in enum class org.apache.kafka.common.acl.AclOperation
+
+
Represents any AclOperation which this client cannot understand, perhaps because this + client is too old.
+
+
UNKNOWN - Enum constant in enum class org.apache.kafka.common.acl.AclPermissionType
+
+
Represents any AclPermissionType which this client cannot understand, + perhaps because this client is too old.
+
+
UNKNOWN - Enum constant in enum class org.apache.kafka.common.ClassicGroupState
+
 
+
UNKNOWN - Enum constant in enum class org.apache.kafka.common.config.ConfigResource.Type
+
 
+
UNKNOWN - Enum constant in enum class org.apache.kafka.common.ConsumerGroupState
+
+
Deprecated.
+
UNKNOWN - Enum constant in enum class org.apache.kafka.common.GroupState
+
 
+
UNKNOWN - Enum constant in enum class org.apache.kafka.common.GroupType
+
 
+
UNKNOWN - Enum constant in enum class org.apache.kafka.common.resource.PatternType
+
+
Represents any PatternType which this client cannot understand, perhaps because this client is too old.
+
+
UNKNOWN - Enum constant in enum class org.apache.kafka.common.resource.ResourceType
+
+
Represents any ResourceType which this client cannot understand, + perhaps because this client is too old.
+
+
UNKNOWN - Enum constant in enum class org.apache.kafka.connect.health.ConnectorType
+
+
Identifies a connector whose type could not be inferred
+
+
UNKNOWN_PARTITION - Static variable in class org.apache.kafka.clients.producer.RecordMetadata
+
+
Partition value for record without partition assigned
+
+
UNKNOWN_PROCESS_ID - Enum constant in enum class org.apache.kafka.streams.processor.assignment.TaskAssignor.AssignmentError
+
 
+
UNKNOWN_QUERY_TYPE - Enum constant in enum class org.apache.kafka.streams.query.FailureReason
+
+
Failure indicating that the store doesn't know how to handle the given query.
+
+
UNKNOWN_TASK_ID - Enum constant in enum class org.apache.kafka.streams.processor.assignment.TaskAssignor.AssignmentError
+
 
+
UnknownControllerIdException - Exception in org.apache.kafka.common.errors
+
 
+
UnknownControllerIdException(String) - Constructor for exception org.apache.kafka.common.errors.UnknownControllerIdException
+
 
+
UnknownLeaderEpochException - Exception in org.apache.kafka.common.errors
+
+
The request contained a leader epoch which is larger than that on the broker that received the + request.
+
+
UnknownLeaderEpochException(String) - Constructor for exception org.apache.kafka.common.errors.UnknownLeaderEpochException
+
 
+
UnknownLeaderEpochException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.UnknownLeaderEpochException
+
 
+
UnknownMemberIdException - Exception in org.apache.kafka.common.errors
+
 
+
UnknownMemberIdException() - Constructor for exception org.apache.kafka.common.errors.UnknownMemberIdException
+
 
+
UnknownMemberIdException(String) - Constructor for exception org.apache.kafka.common.errors.UnknownMemberIdException
+
 
+
UnknownMemberIdException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.UnknownMemberIdException
+
 
+
UnknownMemberIdException(Throwable) - Constructor for exception org.apache.kafka.common.errors.UnknownMemberIdException
+
 
+
UnknownProducerIdException - Exception in org.apache.kafka.common.errors
+
+
This exception is raised by the broker if it could not locate the producer metadata associated with the producerId + in question.
+
+
UnknownProducerIdException(String) - Constructor for exception org.apache.kafka.common.errors.UnknownProducerIdException
+
 
+
UnknownServerException - Exception in org.apache.kafka.common.errors
+
+
An error occurred on the server for which the client doesn't have a corresponding error code.
+
+
UnknownServerException() - Constructor for exception org.apache.kafka.common.errors.UnknownServerException
+
 
+
UnknownServerException(String) - Constructor for exception org.apache.kafka.common.errors.UnknownServerException
+
 
+
UnknownServerException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.UnknownServerException
+
 
+
UnknownServerException(Throwable) - Constructor for exception org.apache.kafka.common.errors.UnknownServerException
+
 
+
UnknownStateStoreException - Exception in org.apache.kafka.streams.errors
+
+
Indicates that the state store being queried is unknown, i.e., the state store does either not exist in your topology + or it is not queryable.
+
+
UnknownStateStoreException(String) - Constructor for exception org.apache.kafka.streams.errors.UnknownStateStoreException
+
 
+
UnknownStateStoreException(String, Throwable) - Constructor for exception org.apache.kafka.streams.errors.UnknownStateStoreException
+
 
+
UnknownSubscriptionIdException - Exception in org.apache.kafka.common.errors
+
+
This exception indicates that the client sent an invalid or outdated SubscriptionId
+
+
UnknownSubscriptionIdException(String) - Constructor for exception org.apache.kafka.common.errors.UnknownSubscriptionIdException
+
 
+
UnknownTopicIdException - Exception in org.apache.kafka.common.errors
+
 
+
UnknownTopicIdException(String) - Constructor for exception org.apache.kafka.common.errors.UnknownTopicIdException
+
 
+
UnknownTopicOrPartitionException - Exception in org.apache.kafka.common.errors
+
+
This topic/partition doesn't exist.
+
+
UnknownTopicOrPartitionException() - Constructor for exception org.apache.kafka.common.errors.UnknownTopicOrPartitionException
+
 
+
UnknownTopicOrPartitionException(String) - Constructor for exception org.apache.kafka.common.errors.UnknownTopicOrPartitionException
+
 
+
UnknownTopicOrPartitionException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.UnknownTopicOrPartitionException
+
 
+
UnknownTopicOrPartitionException(Throwable) - Constructor for exception org.apache.kafka.common.errors.UnknownTopicOrPartitionException
+
 
+
UnknownTopologyException - Exception in org.apache.kafka.streams.errors
+
+
Indicates that the NamedTopology being + looked up does not exist in this application
+
+
UnknownTopologyException(String, String) - Constructor for exception org.apache.kafka.streams.errors.UnknownTopologyException
+
 
+
UnknownTopologyException(String, Throwable, String) - Constructor for exception org.apache.kafka.streams.errors.UnknownTopologyException
+
 
+
UnlimitedWindows - Class in org.apache.kafka.streams.kstream
+
+
The unlimited window specifications used for aggregations.
+
+
unregisterBroker(int) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Unregister a broker.
+
+
unregisterBroker(int, UnregisterBrokerOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Unregister a broker.
+
+
unregisterBroker(int, UnregisterBrokerOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
unregisterBroker(int, UnregisterBrokerOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
UnregisterBrokerOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
UnregisterBrokerOptions() - Constructor for class org.apache.kafka.clients.admin.UnregisterBrokerOptions
+
 
+
UnregisterBrokerResult - Class in org.apache.kafka.clients.admin
+
+ +
+
unregisterMetricFromSubscription(KafkaMetric) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Remove the provided application metric for subscription.
+
+
unregisterMetricFromSubscription(KafkaMetric) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
unregisterMetricFromSubscription(KafkaMetric) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
unregisterMetricFromSubscription(KafkaMetric) - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
unregisterMetricFromSubscription(KafkaMetric) - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Remove the provided application metric for subscription.
+
+
unregisterMetricFromSubscription(KafkaMetric) - Method in class org.apache.kafka.clients.consumer.KafkaShareConsumer
+
+
Remove the provided application metric for subscription.
+
+
unregisterMetricFromSubscription(KafkaMetric) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
unregisterMetricFromSubscription(KafkaMetric) - Method in class org.apache.kafka.clients.consumer.MockShareConsumer
+
 
+
unregisterMetricFromSubscription(KafkaMetric) - Method in interface org.apache.kafka.clients.consumer.ShareConsumer
+
 
+
unregisterMetricFromSubscription(KafkaMetric) - Method in class org.apache.kafka.clients.producer.KafkaProducer
+
+
Remove the provided application metric for subscription.
+
+
unregisterMetricFromSubscription(KafkaMetric) - Method in class org.apache.kafka.clients.producer.MockProducer
+
 
+
unregisterMetricFromSubscription(KafkaMetric) - Method in interface org.apache.kafka.clients.producer.Producer
+
 
+
UnreleasedInstanceIdException - Exception in org.apache.kafka.common.errors
+
 
+
UnreleasedInstanceIdException(String) - Constructor for exception org.apache.kafka.common.errors.UnreleasedInstanceIdException
+
 
+
UNSAFE_DOWNGRADE - Enum constant in enum class org.apache.kafka.clients.admin.FeatureUpdate.UpgradeType
+
 
+
UnstableOffsetCommitException - Exception in org.apache.kafka.common.errors
+
+
Exception thrown when there are unstable offsets for the requested topic partitions.
+
+
UnstableOffsetCommitException(String) - Constructor for exception org.apache.kafka.common.errors.UnstableOffsetCommitException
+
 
+
unsubscribe() - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
unsubscribe() - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Unsubscribe from topics currently subscribed with KafkaConsumer.subscribe(Collection) or KafkaConsumer.subscribe(Pattern).
+
+
unsubscribe() - Method in class org.apache.kafka.clients.consumer.KafkaShareConsumer
+
+
Unsubscribe from topics currently subscribed with KafkaShareConsumer.subscribe(Collection).
+
+
unsubscribe() - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
unsubscribe() - Method in class org.apache.kafka.clients.consumer.MockShareConsumer
+
 
+
unsubscribe() - Method in interface org.apache.kafka.clients.consumer.ShareConsumer
+
 
+
unsubscribe(String, Set<String>, ConfigChangeCallback) - Method in interface org.apache.kafka.common.config.provider.ConfigProvider
+
+
Unsubscribes to changes for the given keys at the given path (optional operation).
+
+
unsubscribeAll() - Method in interface org.apache.kafka.common.config.provider.ConfigProvider
+
+
Clears all subscribers (optional operation).
+
+
UNSUPPORTED - Enum constant in enum class org.apache.kafka.connect.source.ConnectorTransactionBoundaries
+
+
Signals that a connector cannot define its own transaction boundaries.
+
+
UNSUPPORTED - Enum constant in enum class org.apache.kafka.connect.source.ExactlyOnceSupport
+
+
Signals that a connector does not support exactly-once semantics.
+
+
UnsupportedAssignorException - Exception in org.apache.kafka.common.errors
+
 
+
UnsupportedAssignorException(String) - Constructor for exception org.apache.kafka.common.errors.UnsupportedAssignorException
+
 
+
UnsupportedByAuthenticationException - Exception in org.apache.kafka.common.errors
+
+
Authentication mechanism does not support the requested function.
+
+
UnsupportedByAuthenticationException(String) - Constructor for exception org.apache.kafka.common.errors.UnsupportedByAuthenticationException
+
 
+
UnsupportedByAuthenticationException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.UnsupportedByAuthenticationException
+
 
+
UnsupportedCompressionTypeException - Exception in org.apache.kafka.common.errors
+
+
The requesting client does not support the compression type of given partition.
+
+
UnsupportedCompressionTypeException(String) - Constructor for exception org.apache.kafka.common.errors.UnsupportedCompressionTypeException
+
 
+
UnsupportedCompressionTypeException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.UnsupportedCompressionTypeException
+
 
+
UnsupportedEndpointTypeException - Exception in org.apache.kafka.common.errors
+
 
+
UnsupportedEndpointTypeException(String) - Constructor for exception org.apache.kafka.common.errors.UnsupportedEndpointTypeException
+
 
+
UnsupportedForMessageFormatException - Exception in org.apache.kafka.common.errors
+
+
The message format version does not support the requested function.
+
+
UnsupportedForMessageFormatException(String) - Constructor for exception org.apache.kafka.common.errors.UnsupportedForMessageFormatException
+
 
+
UnsupportedForMessageFormatException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.UnsupportedForMessageFormatException
+
 
+
UnsupportedSaslMechanismException - Exception in org.apache.kafka.common.errors
+
+
This exception indicates that the SASL mechanism requested by the client + is not enabled on the broker.
+
+
UnsupportedSaslMechanismException(String) - Constructor for exception org.apache.kafka.common.errors.UnsupportedSaslMechanismException
+
 
+
UnsupportedSaslMechanismException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.UnsupportedSaslMechanismException
+
 
+
UnsupportedVersionException - Exception in org.apache.kafka.common.errors
+
+
Indicates that a request API or version needed by the client is not supported by the broker.
+
+
UnsupportedVersionException(String) - Constructor for exception org.apache.kafka.common.errors.UnsupportedVersionException
+
 
+
UnsupportedVersionException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.UnsupportedVersionException
+
 
+
untilTimeLimit(Duration, Suppressed.BufferConfig<?>) - Static method in interface org.apache.kafka.streams.kstream.Suppressed
+
+
Configure the suppression to wait timeToWaitForMoreEvents amount of time after receiving a record + before emitting it further downstream.
+
+
untilWindowCloses(Suppressed.StrictBufferConfig) - Static method in interface org.apache.kafka.streams.kstream.Suppressed
+
+
Configure the suppression to emit only the "final results" from the window.
+
+
unused() - Method in class org.apache.kafka.common.config.AbstractConfig
+
 
+
updateBeginningOffsets(Map<TopicPartition, Long>) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
updateClusterMetadata(Cluster) - Method in interface org.apache.kafka.server.quota.ClientQuotaCallback
+
+
This callback is invoked whenever there are changes in the cluster metadata, such as + brokers being added or removed, topics being created or deleted, or partition leadership updates.
+
+
updateDurationOffsets(Map<TopicPartition, Long>) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
updateEndOffsets(Map<TopicPartition, Long>) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
updateFeatures(Map<String, FeatureUpdate>, UpdateFeaturesOptions) - Method in interface org.apache.kafka.clients.admin.Admin
+
+
Applies specified updates to finalized features.
+
+
updateFeatures(Map<String, FeatureUpdate>, UpdateFeaturesOptions) - Method in class org.apache.kafka.clients.admin.ForwardingAdmin
+
 
+
updateFeatures(Map<String, FeatureUpdate>, UpdateFeaturesOptions) - Method in class org.apache.kafka.clients.admin.KafkaAdminClient
+
 
+
UpdateFeaturesOptions - Class in org.apache.kafka.clients.admin
+
+ +
+
UpdateFeaturesOptions() - Constructor for class org.apache.kafka.clients.admin.UpdateFeaturesOptions
+
 
+
UpdateFeaturesResult - Class in org.apache.kafka.clients.admin
+
+ +
+
updatePartitions(String, List<PartitionInfo>) - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
updateQuota(ClientQuotaType, ClientQuotaEntity, double) - Method in interface org.apache.kafka.server.quota.ClientQuotaCallback
+
+
Quota configuration update callback that is invoked when quota configuration for an entity is + updated in the quorum.
+
+
updateRemoteLogSegmentMetadata(RemoteLogSegmentMetadataUpdate) - Method in interface org.apache.kafka.server.log.remote.storage.RemoteLogMetadataManager
+
+
This method is used to update the RemoteLogSegmentMetadata asynchronously.
+
+
UPGRADE - Enum constant in enum class org.apache.kafka.clients.admin.FeatureUpdate.UpgradeType
+
 
+
UPGRADE_FROM_0100 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 0.10.0.x.
+
+
UPGRADE_FROM_0101 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 0.10.1.x.
+
+
UPGRADE_FROM_0102 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 0.10.2.x.
+
+
UPGRADE_FROM_0110 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 0.11.0.x.
+
+
UPGRADE_FROM_10 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 1.0.x.
+
+
UPGRADE_FROM_11 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 1.1.x.
+
+
UPGRADE_FROM_20 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 2.0.x.
+
+
UPGRADE_FROM_21 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 2.1.x.
+
+
UPGRADE_FROM_22 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 2.2.x.
+
+
UPGRADE_FROM_23 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 2.3.x.
+
+
UPGRADE_FROM_24 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 2.4.x.
+
+
UPGRADE_FROM_25 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 2.5.x.
+
+
UPGRADE_FROM_26 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 2.6.x.
+
+
UPGRADE_FROM_27 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 2.7.x.
+
+
UPGRADE_FROM_28 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 2.8.x.
+
+
UPGRADE_FROM_30 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 3.0.x.
+
+
UPGRADE_FROM_31 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 3.1.x.
+
+
UPGRADE_FROM_32 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 3.2.x.
+
+
UPGRADE_FROM_33 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 3.3.x.
+
+
UPGRADE_FROM_34 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 3.4.x.
+
+
UPGRADE_FROM_35 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 3.5.x.
+
+
UPGRADE_FROM_36 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 3.6.x.
+
+
UPGRADE_FROM_37 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 3.7.x.
+
+
UPGRADE_FROM_38 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 3.8.x.
+
+
UPGRADE_FROM_39 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 3.9.x.
+
+
UPGRADE_FROM_40 - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
Config value for parameter "upgrade.from" for upgrading an application from version 4.0.x.
+
+
UPGRADE_FROM_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
upgrade.from
+
+
upgraded() - Method in class org.apache.kafka.clients.admin.MemberDescription
+
+
The flag indicating whether a member within a GroupType.CONSUMER group uses the + GroupType.CONSUMER protocol.
+
+
upgradeType() - Method in class org.apache.kafka.clients.admin.FeatureUpdate
+
 
+
upperBound() - Method in class org.apache.kafka.streams.query.TimestampedRangeQuery
+
+
The upper bound of the query, if specified
+
+
upperBound(double) - Static method in class org.apache.kafka.common.metrics.Quota
+
 
+
UPSTREAM_OFFSET_KEY - Static variable in class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
upstreamClusters() - Method in class org.apache.kafka.connect.mirror.MirrorClient
+
+
Finds upstream clusters, which may be multiple hops away, based on incoming heartbeats.
+
+
upstreamClusters(Map<String, Object>) - Static method in class org.apache.kafka.connect.mirror.RemoteClusterUtils
+
+
Finds all upstream clusters
+
+
upstreamOffset() - Method in class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
upstreamTopic(String) - Method in class org.apache.kafka.connect.mirror.DefaultReplicationPolicy
+
 
+
upstreamTopic(String) - Method in class org.apache.kafka.connect.mirror.IdentityReplicationPolicy
+
+
Since any topic may be a remote topic, this just returns `topic`.
+
+
upstreamTopic(String) - Method in interface org.apache.kafka.connect.mirror.ReplicationPolicy
+
+
Return the name of the given topic on the source cluster.
+
+
usableBytes() - Method in class org.apache.kafka.clients.admin.LogDirDescription
+
+
The usable size on the volume this log directory is on or empty if the broker did not return a value.
+
+
UsePartitionTimeOnInvalidTimestamp - Class in org.apache.kafka.streams.processor
+
+
Retrieves embedded metadata timestamps from Kafka messages.
+
+
UsePartitionTimeOnInvalidTimestamp() - Constructor for class org.apache.kafka.streams.processor.UsePartitionTimeOnInvalidTimestamp
+
 
+
user() - Method in class org.apache.kafka.clients.admin.UserScramCredentialAlteration
+
 
+
USER - Enum constant in enum class org.apache.kafka.common.resource.ResourceType
+
+
A user principal
+
+
USER - Enum constant in enum class org.apache.kafka.server.quota.ClientQuotaEntity.ConfigEntityType
+
 
+
USER - Static variable in class org.apache.kafka.common.quota.ClientQuotaEntity
+
+
The type of an entity entry.
+
+
USER_TYPE - Static variable in class org.apache.kafka.common.security.auth.KafkaPrincipal
+
 
+
userData() - Method in class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment
+
 
+
userData() - Method in class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription
+
 
+
userEndpoint() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberDescription
+
+
User-defined endpoint for Interactive Queries.
+
+
users() - Method in class org.apache.kafka.clients.admin.DescribeUserScramCredentialsResult
+
 
+
UserScramCredentialAlteration - Class in org.apache.kafka.clients.admin
+
+
A request to alter a user's SASL/SCRAM credentials.
+
+
UserScramCredentialDeletion - Class in org.apache.kafka.clients.admin
+
+
A request to delete a SASL/SCRAM credential for a user.
+
+
UserScramCredentialDeletion(String, ScramMechanism) - Constructor for class org.apache.kafka.clients.admin.UserScramCredentialDeletion
+
 
+
UserScramCredentialsDescription - Class in org.apache.kafka.clients.admin
+
+
Representation of all SASL/SCRAM credentials associated with a user that can be retrieved, or an exception indicating + why credentials could not be retrieved.
+
+
UserScramCredentialsDescription(String, List<ScramCredentialInfo>) - Constructor for class org.apache.kafka.clients.admin.UserScramCredentialsDescription
+
 
+
UserScramCredentialUpsertion - Class in org.apache.kafka.clients.admin
+
+
A request to update/insert a SASL/SCRAM credential for a user.
+
+
UserScramCredentialUpsertion(String, ScramCredentialInfo, byte[]) - Constructor for class org.apache.kafka.clients.admin.UserScramCredentialUpsertion
+
+
Constructor that generates a random salt
+
+
UserScramCredentialUpsertion(String, ScramCredentialInfo, byte[], byte[]) - Constructor for class org.apache.kafka.clients.admin.UserScramCredentialUpsertion
+
+
Constructor that accepts an explicit salt
+
+
UserScramCredentialUpsertion(String, ScramCredentialInfo, String) - Constructor for class org.apache.kafka.clients.admin.UserScramCredentialUpsertion
+
+
Constructor that generates a random salt
+
+
Uuid - Class in org.apache.kafka.common
+
+
This class defines an immutable universally unique identifier (UUID).
+
+
Uuid(long, long) - Constructor for class org.apache.kafka.common.Uuid
+
+
Constructs a 128-bit type 4 UUID where the first long represents the most significant 64 bits + and the second long represents the least significant 64 bits.
+
+
UUID() - Static method in class org.apache.kafka.common.serialization.Serdes
+
+
A serde for nullable UUID type
+
+
UUIDDeserializer - Class in org.apache.kafka.common.serialization
+
+
We are converting the byte array to String before deserializing to UUID.
+
+
UUIDDeserializer() - Constructor for class org.apache.kafka.common.serialization.UUIDDeserializer
+
 
+
UUIDSerde() - Constructor for class org.apache.kafka.common.serialization.Serdes.UUIDSerde
+
 
+
UUIDSerializer - Class in org.apache.kafka.common.serialization
+
+
We are converting UUID to String before serializing.
+
+
UUIDSerializer() - Constructor for class org.apache.kafka.common.serialization.UUIDSerializer
+
 
+
+

V

+
+
valid() - Method in class org.apache.kafka.clients.admin.ListConsumerGroupsResult
+
+
Deprecated.
+
Returns a future which yields just the valid listings.
+
+
valid() - Method in class org.apache.kafka.clients.admin.ListGroupsResult
+
+
Returns a future which yields just the valid listings.
+
+
valid(String) - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerExtensionsValidatorCallback
+
+
Validates a specific extension in the original inputExtensions map
+
+
VALID_LOG_LEVELS - Static variable in class org.apache.kafka.common.config.LogLevelConfig
+
 
+
validate() - Method in class org.apache.kafka.connect.data.Struct
+
+
Validates that this struct has filled in all the necessary data with valid values.
+
+
validate(String) - Method in class org.apache.kafka.common.security.oauthbearer.BrokerJwtValidator
+
+
Accepts an OAuth JWT access token in base-64 encoded format, validates, and returns an + OAuthBearerToken.
+
+
validate(String) - Method in class org.apache.kafka.common.security.oauthbearer.ClientJwtValidator
+
+
Accepts an OAuth JWT access token in base-64 encoded format, validates, and returns an + OAuthBearerToken.
+
+
validate(String) - Method in class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
+
 
+
validate(String) - Method in interface org.apache.kafka.common.security.oauthbearer.JwtValidator
+
+
Accepts an OAuth JWT access token in base-64 encoded format, validates, and returns an + OAuthBearerToken.
+
+
validate(Map<String, String>) - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Validate the current configuration values with the configuration definition.
+
+
validate(Map<String, String>) - Method in class org.apache.kafka.connect.connector.Connector
+
+
Validate the connector configuration values against configuration definitions.
+
+
validate(Map<String, String>) - Method in class org.apache.kafka.connect.tools.MockSinkConnector
+
 
+
validate(Map<String, String>) - Method in class org.apache.kafka.connect.tools.MockSourceConnector
+
 
+
validate(ConnectorClientConfigRequest) - Method in interface org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy
+
+
Workers will invoke this before configuring per-connector Kafka admin, producer, and consumer client instances + to validate if all the overridden client configurations are allowed per the policy implementation.
+
+
validate(AlterConfigPolicy.RequestMetadata) - Method in interface org.apache.kafka.server.policy.AlterConfigPolicy
+
+
Validate the request parameters and throw a PolicyViolationException with a suitable error + message if the alter configs request parameters for the provided resource do not satisfy this policy.
+
+
validate(CreateTopicPolicy.RequestMetadata) - Method in interface org.apache.kafka.server.policy.CreateTopicPolicy
+
+
Validate the request parameters and throw a PolicyViolationException with a suitable error + message if the create topics request parameters for the provided topic do not satisfy this policy.
+
+
validateAll(Map<String, String>) - Method in class org.apache.kafka.common.config.ConfigDef
+
 
+
validatedExtensions() - Method in class org.apache.kafka.common.security.oauthbearer.OAuthBearerExtensionsValidatorCallback
+
 
+
validateOnly() - Method in class org.apache.kafka.clients.admin.AlterClientQuotasOptions
+
+
Returns whether the request should be validated without altering the configs.
+
+
validateOnly() - Method in class org.apache.kafka.clients.admin.CreatePartitionsOptions
+
+
Return true if the request should be validated without creating new partitions.
+
+
validateOnly() - Method in class org.apache.kafka.clients.admin.UpdateFeaturesOptions
+
 
+
validateOnly(boolean) - Method in class org.apache.kafka.clients.admin.AlterClientQuotasOptions
+
+
Sets whether the request should be validated without altering the configs.
+
+
validateOnly(boolean) - Method in class org.apache.kafka.clients.admin.AlterConfigsOptions
+
+
Set to true if the request should be validated without altering the configs.
+
+
validateOnly(boolean) - Method in class org.apache.kafka.clients.admin.CreatePartitionsOptions
+
+
Set to true if the request should be validated without creating new partitions.
+
+
validateOnly(boolean) - Method in class org.apache.kafka.clients.admin.CreateTopicsOptions
+
+
Set to true if the request should be validated without creating the topic.
+
+
validateOnly(boolean) - Method in class org.apache.kafka.clients.admin.UpdateFeaturesOptions
+
 
+
validateReconfiguration(Map<String, ?>) - Method in class org.apache.kafka.common.metrics.JmxReporter
+
 
+
validateReconfiguration(Map<String, ?>) - Method in interface org.apache.kafka.common.metrics.MetricsReporter
+
 
+
validateReconfiguration(Map<String, ?>) - Method in interface org.apache.kafka.common.Reconfigurable
+
+
Validates the provided configuration.
+
+
validateTaskAssignment(ApplicationState, TaskAssignor.TaskAssignment) - Static method in class org.apache.kafka.streams.processor.assignment.TaskAssignmentUtils
+
+
Validate the passed-in TaskAssignor.TaskAssignment and return an TaskAssignor.AssignmentError representing the + first error detected in the assignment, or TaskAssignor.AssignmentError.NONE if the assignment passes the + verification check.
+
+
validateValue(Object) - Method in class org.apache.kafka.connect.data.ConnectSchema
+
+
Validate that the value can be used for this schema, i.e.
+
+
validateValue(String, Schema, Object) - Static method in class org.apache.kafka.connect.data.ConnectSchema
+
 
+
validateValue(Schema, Object) - Static method in class org.apache.kafka.connect.data.ConnectSchema
+
+
Validate that the value can be used with the schema, i.e.
+
+
validator - Variable in class org.apache.kafka.common.config.ConfigDef.ConfigKey
+
 
+
validTo() - Method in class org.apache.kafka.streams.state.VersionedRecord
+
 
+
validValues(String, Map<String, Object>) - Method in interface org.apache.kafka.common.config.ConfigDef.Recommender
+
+
The valid values for the configuration given the current configuration values.
+
+
value - Variable in enum class org.apache.kafka.common.ElectionType
+
 
+
value - Variable in class org.apache.kafka.streams.KeyValue
+
+
The value of the key-value pair.
+
+
value() - Method in class org.apache.kafka.clients.admin.ConfigEntry.ConfigSynonym
+
+
Returns the value of this configuration, which may be null if the configuration is sensitive.
+
+
value() - Method in class org.apache.kafka.clients.admin.ConfigEntry
+
+
Return the value or null.
+
+
value() - Method in class org.apache.kafka.clients.consumer.ConsumerRecord
+
+
The value
+
+
value() - Method in class org.apache.kafka.clients.producer.ProducerRecord
+
 
+
value() - Method in class org.apache.kafka.common.config.ConfigValue
+
 
+
value() - Method in interface org.apache.kafka.common.header.Header
+
 
+
value() - Method in exception org.apache.kafka.common.metrics.QuotaViolationException
+
 
+
value() - Method in class org.apache.kafka.common.quota.ClientQuotaAlteration.Op
+
 
+
value() - Method in interface org.apache.kafka.common.security.oauthbearer.OAuthBearerToken
+
+
The b64token value as defined in + RFC 6750 Section + 2.1
+
+
value() - Method in class org.apache.kafka.connect.connector.ConnectRecord
+
 
+
value() - Method in class org.apache.kafka.connect.data.SchemaAndValue
+
 
+
value() - Method in interface org.apache.kafka.connect.header.Header
+
+
Get the header's value as deserialized by Connect's header converter.
+
+
value() - Method in class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata.CustomMetadata
+
 
+
value() - Method in class org.apache.kafka.streams.processor.api.FixedKeyRecord
+
+
The value of the record.
+
+
value() - Method in class org.apache.kafka.streams.processor.api.Record
+
+
The value of the record.
+
+
value() - Method in class org.apache.kafka.streams.state.ValueAndTimestamp
+
 
+
value() - Method in class org.apache.kafka.streams.state.VersionedRecord
+
 
+
value() - Method in class org.apache.kafka.streams.test.TestRecord
+
 
+
value(double) - Method in class org.apache.kafka.common.metrics.stats.Histogram
+
 
+
value(Object) - Method in class org.apache.kafka.common.config.ConfigValue
+
 
+
value(MetricConfig, long) - Method in interface org.apache.kafka.common.metrics.Gauge
+
+
Returns the current value associated with this gauge.
+
+
value(MetricConfig, long, double) - Method in class org.apache.kafka.common.metrics.stats.Percentiles
+
 
+
Value - Class in org.apache.kafka.common.metrics.stats
+
+
An instantaneous value.
+
+
Value() - Constructor for class org.apache.kafka.common.metrics.stats.Value
+
 
+
VALUE - Enum constant in enum class org.apache.kafka.common.errors.RecordDeserializationException.DeserializationExceptionOrigin
+
 
+
VALUE - Enum constant in enum class org.apache.kafka.connect.storage.ConverterType
+
 
+
VALUE - Enum constant in enum class org.apache.kafka.streams.errors.ProductionExceptionHandler.SerializationExceptionOrigin
+
+
Serialization exception occurred during serialization of the value.
+
+
VALUE_DESERIALIZER_CLASS_CONFIG - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
+
value.deserializer
+
+
VALUE_DESERIALIZER_CLASS_DOC - Static variable in class org.apache.kafka.clients.consumer.ConsumerConfig
+
 
+
VALUE_SCHEMA_V0 - Static variable in class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
VALUE_SCHEMA_V0 - Static variable in class org.apache.kafka.connect.mirror.Heartbeat
+
 
+
VALUE_SERIALIZER_CLASS_CONFIG - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
+
value.serializer
+
+
VALUE_SERIALIZER_CLASS_DOC - Static variable in class org.apache.kafka.clients.producer.ProducerConfig
+
 
+
ValueAndTimestamp<V> - Class in org.apache.kafka.streams.state
+
+
Combines a value from a KeyValue with a timestamp.
+
+
valueBuffer() - Method in exception org.apache.kafka.common.errors.RecordDeserializationException
+
 
+
valueDeserializer() - Method in class org.apache.kafka.streams.state.StateSerdes
+
+
Return the value deserializer.
+
+
valueFrom(byte[]) - Method in class org.apache.kafka.streams.state.StateSerdes
+
+
Deserialize the value from raw bytes.
+
+
ValueJoiner<V1,V2,VR> - Interface in org.apache.kafka.streams.kstream
+
+
The ValueJoiner interface for joining two values into a new value of arbitrary type.
+
+
ValueJoinerWithKey<K1,V1,V2,VR> - Interface in org.apache.kafka.streams.kstream
+
+
The ValueJoinerWithKey interface for joining two values into a new value of arbitrary type.
+
+
ValueMapper<V,VR> - Interface in org.apache.kafka.streams.kstream
+
+
The ValueMapper interface for mapping a value to a new value of arbitrary type.
+
+
ValueMapperWithKey<K,V,VR> - Interface in org.apache.kafka.streams.kstream
+
+
The ValueMapperWithKey interface for mapping a value to a new value of arbitrary type.
+
+
valueOf(byte) - Static method in enum class org.apache.kafka.common.ElectionType
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.clients.admin.AlterConfigOp.OpType
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigSource
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigType
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.clients.admin.EndpointType
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.clients.admin.FeatureUpdate.UpgradeType
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.clients.admin.ScramMechanism
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.clients.admin.TransactionState
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.clients.consumer.AcknowledgeType
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.RebalanceProtocol
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.clients.consumer.GroupProtocol
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.clients.consumer.OffsetResetStrategy
+
+
Deprecated.
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.common.acl.AclOperation
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.common.acl.AclPermissionType
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.common.ClassicGroupState
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.common.config.ConfigDef.Importance
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.common.config.ConfigDef.Type
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.common.config.ConfigDef.Width
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.common.config.ConfigResource.Type
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.common.config.SslClientAuth
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.common.ConsumerGroupState
+
+
Deprecated.
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.common.ElectionType
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.common.errors.RecordDeserializationException.DeserializationExceptionOrigin
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.common.GroupState
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.common.GroupType
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.common.IsolationLevel
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.common.metrics.Sensor.RecordingLevel
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.common.metrics.stats.Percentiles.BucketSizing
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.common.resource.PatternType
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.common.resource.ResourceType
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.common.security.auth.SecurityProtocol
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.connect.connector.policy.ConnectorClientConfigRequest.ClientType
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.connect.data.Schema.Type
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.connect.health.ConnectorType
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.connect.source.ConnectorTransactionBoundaries
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.connect.source.ExactlyOnceSupport
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.connect.source.SourceTask.TransactionBoundary
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.connect.storage.ConverterType
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.coordinator.group.api.assignor.SubscriptionType
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.server.authorizer.AuthorizationResult
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.server.log.remote.storage.RemotePartitionDeleteState
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.server.log.remote.storage.RemoteStorageManager.IndexType
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.server.quota.ClientQuotaEntity.ConfigEntityType
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.server.quota.ClientQuotaType
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.streams.errors.DeserializationExceptionHandler.DeserializationHandlerResponse
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.streams.errors.ProcessingExceptionHandler.ProcessingHandlerResponse
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.streams.errors.ProductionExceptionHandler.ProductionExceptionHandlerResponse
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.streams.errors.ProductionExceptionHandler.SerializationExceptionOrigin
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.streams.GroupProtocol
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.streams.KafkaStreams.State
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.streams.kstream.EmitStrategy.StrategyType
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.streams.kstream.Materialized.StoreType
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment.AssignedTask.Type
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.streams.processor.assignment.TaskAssignor.AssignmentError
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.streams.processor.PunctuationType
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.streams.processor.StandbyUpdateListener.SuspendReason
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.streams.query.FailureReason
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.streams.query.ResultOrder
+
+
Returns the enum constant of this class with the specified name.
+
+
valueOf(String) - Static method in enum class org.apache.kafka.streams.Topology.AutoOffsetReset
+
+
Deprecated.
+
Returns the enum constant of this class with the specified name.
+
+
values() - Method in class org.apache.kafka.clients.admin.AlterClientQuotasResult
+
+
Returns a map from quota entity to a future which can be used to check the status of the operation.
+
+
values() - Static method in enum class org.apache.kafka.clients.admin.AlterConfigOp.OpType
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Method in class org.apache.kafka.clients.admin.AlterConfigsResult
+
+
Return a map from resources to futures which can be used to check the status of the operation on each resource.
+
+
values() - Method in class org.apache.kafka.clients.admin.AlterPartitionReassignmentsResult
+
+
Return a map from partitions to futures which can be used to check the status of the reassignment.
+
+
values() - Method in class org.apache.kafka.clients.admin.AlterReplicaLogDirsResult
+
+
Return a map from TopicPartitionReplica to KafkaFuture which holds the status of individual + replica movement.
+
+
values() - Method in class org.apache.kafka.clients.admin.AlterUserScramCredentialsResult
+
+
Return a map from user names to futures, which can be used to check the status of the alteration(s) + for each user.
+
+
values() - Static method in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigSource
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.clients.admin.ConfigEntry.ConfigType
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Method in class org.apache.kafka.clients.admin.CreateAclsResult
+
+
Return a map from ACL bindings to futures which can be used to check the status of the creation of each ACL + binding.
+
+
values() - Method in class org.apache.kafka.clients.admin.CreatePartitionsResult
+
+
Return a map from topic names to futures, which can be used to check the status of individual + partition creations.
+
+
values() - Method in class org.apache.kafka.clients.admin.CreateTopicsResult
+
+
Return a map from topic names to futures, which can be used to check the status of individual + topic creations.
+
+
values() - Method in class org.apache.kafka.clients.admin.DeleteAclsResult.FilterResults
+
+
Return a list of delete ACLs results for a given filter.
+
+
values() - Method in class org.apache.kafka.clients.admin.DeleteAclsResult
+
+
Return a map from acl filters to futures which can be used to check the status of the deletions by each + filter.
+
+
values() - Method in class org.apache.kafka.clients.admin.DescribeAclsResult
+
+
Return a future containing the ACLs requested.
+
+
values() - Method in class org.apache.kafka.clients.admin.DescribeConfigsResult
+
+
Return a map from resources to futures which can be used to check the status of the configuration for each + resource.
+
+
values() - Method in class org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult
+
+
Return a map from replica to future which can be used to check the log directory information of individual replicas
+
+
values() - Static method in enum class org.apache.kafka.clients.admin.EndpointType
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.clients.admin.FeatureUpdate.UpgradeType
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.clients.admin.ScramMechanism
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.clients.admin.TransactionState
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Method in class org.apache.kafka.clients.admin.UpdateFeaturesResult
+
 
+
values() - Static method in enum class org.apache.kafka.clients.consumer.AcknowledgeType
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.RebalanceProtocol
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.clients.consumer.GroupProtocol
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.clients.consumer.OffsetResetStrategy
+
+
Deprecated.
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.common.acl.AclOperation
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.common.acl.AclPermissionType
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.common.ClassicGroupState
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Method in class org.apache.kafka.common.config.AbstractConfig
+
 
+
values() - Static method in enum class org.apache.kafka.common.config.ConfigDef.Importance
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.common.config.ConfigDef.Type
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.common.config.ConfigDef.Width
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.common.config.ConfigResource.Type
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.common.config.SslClientAuth
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.common.ConsumerGroupState
+
+
Deprecated.
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.common.ElectionType
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.common.errors.RecordDeserializationException.DeserializationExceptionOrigin
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.common.GroupState
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.common.GroupType
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.common.IsolationLevel
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.common.metrics.Sensor.RecordingLevel
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.common.metrics.stats.Percentiles.BucketSizing
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.common.resource.PatternType
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.common.resource.ResourceType
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.common.security.auth.SecurityProtocol
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.connect.connector.policy.ConnectorClientConfigRequest.ClientType
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.connect.data.Schema.Type
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.connect.health.ConnectorType
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.connect.source.ConnectorTransactionBoundaries
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.connect.source.ExactlyOnceSupport
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.connect.source.SourceTask.TransactionBoundary
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.connect.storage.ConverterType
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.coordinator.group.api.assignor.SubscriptionType
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.server.authorizer.AuthorizationResult
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.server.log.remote.storage.RemotePartitionDeleteState
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.server.log.remote.storage.RemoteStorageManager.IndexType
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.server.quota.ClientQuotaEntity.ConfigEntityType
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.server.quota.ClientQuotaType
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.streams.errors.DeserializationExceptionHandler.DeserializationHandlerResponse
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.streams.errors.ProcessingExceptionHandler.ProcessingHandlerResponse
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.streams.errors.ProductionExceptionHandler.ProductionExceptionHandlerResponse
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.streams.errors.ProductionExceptionHandler.SerializationExceptionOrigin
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.streams.GroupProtocol
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.streams.KafkaStreams.State
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.streams.kstream.EmitStrategy.StrategyType
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.streams.kstream.Materialized.StoreType
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment.AssignedTask.Type
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.streams.processor.assignment.TaskAssignor.AssignmentError
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.streams.processor.PunctuationType
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.streams.processor.StandbyUpdateListener.SuspendReason
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.streams.query.FailureReason
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.streams.query.ResultOrder
+
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
values() - Static method in enum class org.apache.kafka.streams.Topology.AutoOffsetReset
+
+
Deprecated.
+
Returns an array containing the constants of this enum class, in +the order they are declared.
+
+
Values - Class in org.apache.kafka.connect.data
+
+
Utility for converting from one Connect value to a different form.
+
+
Values() - Constructor for class org.apache.kafka.connect.data.Values
+
 
+
VALUES - Static variable in enum class org.apache.kafka.common.config.SslClientAuth
+
 
+
valueSchema() - Method in class org.apache.kafka.connect.connector.ConnectRecord
+
 
+
valueSchema() - Method in class org.apache.kafka.connect.data.ConnectSchema
+
 
+
valueSchema() - Method in interface org.apache.kafka.connect.data.Schema
+
+
Get the value schema for this map or array schema.
+
+
valueSchema() - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
valueSerde() - Method in class org.apache.kafka.streams.kstream.Joined
+
+
Deprecated. +
Since 4.0 and should not be used any longer.
+
+
+
valueSerde() - Method in class org.apache.kafka.streams.processor.api.MockProcessorContext
+
 
+
valueSerde() - Method in interface org.apache.kafka.streams.processor.api.ProcessingContext
+
+
Return the default value serde.
+
+
valueSerde() - Method in class org.apache.kafka.streams.processor.MockProcessorContext
+
+
Deprecated.
+
valueSerde() - Method in interface org.apache.kafka.streams.processor.ProcessorContext
+
+
Return the default value serde.
+
+
valueSerde() - Method in interface org.apache.kafka.streams.processor.StateStoreContext
+
+
Returns the default value serde.
+
+
valueSerde() - Method in class org.apache.kafka.streams.state.StateSerdes
+
+
Return the value serde.
+
+
valueSerde(Serde<V>) - Static method in class org.apache.kafka.streams.kstream.Grouped
+
+
Create a Grouped instance with the provided valueSerde.
+
+
valueSerde(Serde<V>) - Static method in class org.apache.kafka.streams.kstream.Produced
+
+
Create a Produced instance with provided valueSerde.
+
+
valueSerde(Serde<VLeft>) - Static method in class org.apache.kafka.streams.kstream.Joined
+
+
Create an instance of Joined with a value Serde.
+
+
valueSerializer() - Method in class org.apache.kafka.streams.state.StateSerdes
+
+
Return the value serializer.
+
+
valuesWithPrefixAllOrNothing(String) - Method in class org.apache.kafka.common.config.AbstractConfig
+
+
If at least one key with prefix exists, all prefixed values will be parsed and put into map.
+
+
valuesWithPrefixOverride(String) - Method in class org.apache.kafka.common.config.AbstractConfig
+
+
Put all keys that do not start with prefix and their parsed values in the result map and then + put all the remaining keys with the prefix stripped and their parsed values in the result map.
+
+
ValueTransformer<V,VR> - Interface in org.apache.kafka.streams.kstream
+
+
Deprecated. +
Since 4.0. Use FixedKeyProcessor instead.
+
+
+
ValueTransformerSupplier<V,VR> - Interface in org.apache.kafka.streams.kstream
+
+
Deprecated. +
Since 4.0. Use FixedKeyProcessorSupplier instead.
+
+
+
ValueTransformerWithKey<K,V,VR> - Interface in org.apache.kafka.streams.kstream
+
+
The ValueTransformerWithKey interface for stateful mapping of a value to a new value (with possible new type).
+
+
ValueTransformerWithKeySupplier<K,V,VR> - Interface in org.apache.kafka.streams.kstream
+
+
A ValueTransformerWithKeySupplier interface which can create one or more ValueTransformerWithKey instances.
+
+
VerifiableSinkConnector - Class in org.apache.kafka.connect.tools
+
+
A connector primarily intended for system tests.
+
+
VerifiableSinkConnector() - Constructor for class org.apache.kafka.connect.tools.VerifiableSinkConnector
+
 
+
VerifiableSinkTask - Class in org.apache.kafka.connect.tools
+
+
Counterpart to VerifiableSourceTask that consumes records and logs information about each to stdout.
+
+
VerifiableSinkTask() - Constructor for class org.apache.kafka.connect.tools.VerifiableSinkTask
+
 
+
VerifiableSourceConnector - Class in org.apache.kafka.connect.tools
+
+
A connector primarily intended for system tests.
+
+
VerifiableSourceConnector() - Constructor for class org.apache.kafka.connect.tools.VerifiableSourceConnector
+
 
+
VerifiableSourceTask - Class in org.apache.kafka.connect.tools
+
+
A connector primarily intended for system tests.
+
+
VerifiableSourceTask() - Constructor for class org.apache.kafka.connect.tools.VerifiableSourceTask
+
 
+
verifyTopologyOptimizationConfigs(String) - Static method in class org.apache.kafka.streams.StreamsConfig
+
 
+
version() - Method in interface org.apache.kafka.clients.consumer.ConsumerPartitionAssignor
+
+
Return the version of the assignor which indicates how the user metadata encodings + and the assignment algorithm gets evolved.
+
+
version() - Method in interface org.apache.kafka.connect.components.Versioned
+
+
Get the version of this component.
+
+
version() - Method in interface org.apache.kafka.connect.connector.Task
+
+
Get the version of this task.
+
+
version() - Method in class org.apache.kafka.connect.data.ConnectSchema
+
 
+
version() - Method in interface org.apache.kafka.connect.data.Schema
+
+
Get the optional version of the schema.
+
+
version() - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
 
+
version() - Method in class org.apache.kafka.connect.storage.SimpleHeaderConverter
+
 
+
version() - Method in class org.apache.kafka.connect.storage.StringConverter
+
 
+
version() - Method in class org.apache.kafka.connect.tools.MockConnector
+
 
+
version() - Method in class org.apache.kafka.connect.tools.MockSinkConnector
+
 
+
version() - Method in class org.apache.kafka.connect.tools.MockSinkTask
+
 
+
version() - Method in class org.apache.kafka.connect.tools.MockSourceConnector
+
 
+
version() - Method in class org.apache.kafka.connect.tools.MockSourceTask
+
 
+
version() - Method in class org.apache.kafka.connect.tools.SchemaSourceConnector
+
 
+
version() - Method in class org.apache.kafka.connect.tools.SchemaSourceTask
+
 
+
version() - Method in class org.apache.kafka.connect.tools.VerifiableSinkConnector
+
 
+
version() - Method in class org.apache.kafka.connect.tools.VerifiableSinkTask
+
 
+
version() - Method in class org.apache.kafka.connect.tools.VerifiableSourceConnector
+
 
+
version() - Method in class org.apache.kafka.connect.tools.VerifiableSourceTask
+
 
+
version(Integer) - Method in class org.apache.kafka.connect.data.SchemaBuilder
+
+
Set the version of this schema.
+
+
VERSION - Static variable in class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
VERSION - Static variable in class org.apache.kafka.connect.mirror.Heartbeat
+
 
+
VERSION_KEY - Static variable in class org.apache.kafka.connect.mirror.Checkpoint
+
 
+
VERSION_KEY - Static variable in class org.apache.kafka.connect.mirror.Heartbeat
+
 
+
Versioned - Interface in org.apache.kafka.connect.components
+
+
Connect requires some components implement this interface to define a version string.
+
+
VersionedBytesStore - Interface in org.apache.kafka.streams.state
+
+
A representation of a versioned key-value store as a KeyValueStore of type <Bytes, byte[]>.
+
+
VersionedBytesStoreSupplier - Interface in org.apache.kafka.streams.state
+
+
A store supplier that can be used to create one or more versioned key-value stores, + specifically, VersionedBytesStore instances.
+
+
VersionedKeyQuery<K,V> - Class in org.apache.kafka.streams.query
+
+
Interactive query for retrieving a single record from a versioned state store based on its key and timestamp.
+
+
VersionedKeyValueStore<K,V> - Interface in org.apache.kafka.streams.state
+
+
A key-value store that stores multiple record versions per key, and supports timestamp-based + retrieval operations to return the latest record (per key) as of a specified timestamp.
+
+
versionedKeyValueStoreBuilder(VersionedBytesStoreSupplier, Serde<K>, Serde<V>) - Static method in class org.apache.kafka.streams.state.Stores
+
+
Creates a StoreBuilder that can be used to build a VersionedKeyValueStore.
+
+
VersionedRecord<V> - Class in org.apache.kafka.streams.state
+
+
Combines a value (from a key-value record) with a timestamp, for use as the return type + from VersionedKeyValueStore.get(Object, long) and related methods.
+
+
VersionedRecord(V, long) - Constructor for class org.apache.kafka.streams.state.VersionedRecord
+
+
Create a new VersionedRecord instance.
+
+
VersionedRecord(V, long, long) - Constructor for class org.apache.kafka.streams.state.VersionedRecord
+
+
Create a new VersionedRecord instance.
+
+
VersionedRecordIterator<V> - Interface in org.apache.kafka.streams.state
+
+
Iterator interface of VersionedRecord.
+
+
visible() - Method in class org.apache.kafka.common.config.ConfigValue
+
 
+
visible(boolean) - Method in class org.apache.kafka.common.config.ConfigValue
+
 
+
visible(String, Map<String, Object>) - Method in interface org.apache.kafka.common.config.ConfigDef.Recommender
+
+
Set the visibility of the configuration given the current configuration values.
+
+
Void() - Static method in class org.apache.kafka.common.serialization.Serdes
+
+
A serde for Void type.
+
+
VoidDeserializer - Class in org.apache.kafka.common.serialization
+
 
+
VoidDeserializer() - Constructor for class org.apache.kafka.common.serialization.VoidDeserializer
+
 
+
VoidSerde() - Constructor for class org.apache.kafka.common.serialization.Serdes.VoidSerde
+
 
+
VoidSerializer - Class in org.apache.kafka.common.serialization
+
 
+
VoidSerializer() - Constructor for class org.apache.kafka.common.serialization.VoidSerializer
+
 
+
VoterNotFoundException - Exception in org.apache.kafka.common.errors
+
 
+
VoterNotFoundException(String) - Constructor for exception org.apache.kafka.common.errors.VoterNotFoundException
+
 
+
VoterNotFoundException(String, Throwable) - Constructor for exception org.apache.kafka.common.errors.VoterNotFoundException
+
 
+
voters() - Method in class org.apache.kafka.clients.admin.QuorumInfo
+
 
+
+

W

+
+
wakeup() - Method in interface org.apache.kafka.clients.consumer.Consumer
+
 
+
wakeup() - Method in class org.apache.kafka.clients.consumer.KafkaConsumer
+
+
Wakeup the consumer.
+
+
wakeup() - Method in class org.apache.kafka.clients.consumer.KafkaShareConsumer
+
+
Wake up the consumer.
+
+
wakeup() - Method in class org.apache.kafka.clients.consumer.MockConsumer
+
 
+
wakeup() - Method in class org.apache.kafka.clients.consumer.MockShareConsumer
+
 
+
wakeup() - Method in interface org.apache.kafka.clients.consumer.ShareConsumer
+
 
+
WakeupException - Exception in org.apache.kafka.common.errors
+
+
Exception used to indicate preemption of a blocking operation by an external thread.
+
+
WakeupException() - Constructor for exception org.apache.kafka.common.errors.WakeupException
+
 
+
WALL_CLOCK_TIME - Enum constant in enum class org.apache.kafka.streams.processor.PunctuationType
+
 
+
WallclockTimestampExtractor - Class in org.apache.kafka.streams.processor
+
+
Retrieves current wall clock timestamps as System.currentTimeMillis().
+
+
WallclockTimestampExtractor() - Constructor for class org.apache.kafka.streams.processor.WallclockTimestampExtractor
+
 
+
warmupTasks() - Method in class org.apache.kafka.clients.admin.StreamsGroupMemberAssignment
+
+
Warmup tasks for this client.
+
+
WARN_LOG_LEVEL - Static variable in class org.apache.kafka.common.config.LogLevelConfig
+
+
The WARN level designates potentially harmful situations.
+
+
whenComplete(KafkaFuture.BiConsumer<? super T, ? super Throwable>) - Method in class org.apache.kafka.common.KafkaFuture
+
+
Returns a new KafkaFuture with the same result or exception as this future, that executes the given action + when this future completes.
+
+
width - Variable in class org.apache.kafka.common.config.ConfigDef.ConfigKey
+
 
+
WILDCARD_RESOURCE - Static variable in class org.apache.kafka.common.resource.ResourcePattern
+
+
A special literal resource name that corresponds to 'all resources of a certain type'.
+
+
window() - Method in class org.apache.kafka.streams.kstream.Windowed
+
+
Return the window containing the values associated with this key.
+
+
Window - Class in org.apache.kafka.streams.kstream
+
+
A single window instance, defined by its start and end timestamp.
+
+
Window(long, long) - Constructor for class org.apache.kafka.streams.kstream.Window
+
+
Create a new window for the given start and end time.
+
+
WINDOW_SIZE_MS_CONFIG - Static variable in class org.apache.kafka.streams.kstream.TimeWindowedDeserializer
+
+
Sets window size for the deserializer in order to calculate window end times.
+
+
WINDOW_SIZE_MS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+ +
+
WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+
windowstore.changelog.additional.retention.ms
+
+
WindowBytesStoreSupplier - Interface in org.apache.kafka.streams.state
+
+
A store supplier that can be used to create one or more WindowStore<Byte, byte[]> instances of type <Byte, byte[]>.
+
+
Windowed<K> - Class in org.apache.kafka.streams.kstream
+
+
The result key type of a windowed stream aggregation.
+
+
Windowed(K, Window) - Constructor for class org.apache.kafka.streams.kstream.Windowed
+
 
+
WINDOWED_INNER_CLASS_SERDE - Static variable in class org.apache.kafka.streams.StreamsConfig
+
+ +
+
WINDOWED_INNER_DESERIALIZER_CLASS - Static variable in class org.apache.kafka.streams.kstream.SessionWindowedDeserializer
+
+
Default deserializer for the inner deserializer class of a windowed record.
+
+
WINDOWED_INNER_DESERIALIZER_CLASS - Static variable in class org.apache.kafka.streams.kstream.TimeWindowedDeserializer
+
+
Default deserializer for the inner deserializer class of a windowed record.
+
+
WINDOWED_INNER_SERIALIZER_CLASS - Static variable in class org.apache.kafka.streams.kstream.SessionWindowedSerializer
+
+
Default serializer for the inner serializer class of a windowed record.
+
+
WINDOWED_INNER_SERIALIZER_CLASS - Static variable in class org.apache.kafka.streams.kstream.TimeWindowedSerializer
+
+
Default serializer for the inner serializer class of a windowed record.
+
+
windowedBy(SessionWindows) - Method in interface org.apache.kafka.streams.kstream.CogroupedKStream
+
+
Create a new SessionWindowedCogroupedKStream instance that can be used to perform session + windowed aggregations.
+
+
windowedBy(SessionWindows) - Method in interface org.apache.kafka.streams.kstream.KGroupedStream
+
+
Create a new SessionWindowedKStream instance that can be used to perform session windowed aggregations.
+
+
windowedBy(SlidingWindows) - Method in interface org.apache.kafka.streams.kstream.CogroupedKStream
+
+
Create a new TimeWindowedCogroupedKStream instance that can be used to perform sliding + windowed aggregations.
+
+
windowedBy(SlidingWindows) - Method in interface org.apache.kafka.streams.kstream.KGroupedStream
+
+
Create a new TimeWindowedKStream instance that can be used to perform sliding windowed aggregations.
+
+
windowedBy(Windows<W>) - Method in interface org.apache.kafka.streams.kstream.CogroupedKStream
+
+
Create a new TimeWindowedCogroupedKStream instance that can be used to perform windowed + aggregations.
+
+
windowedBy(Windows<W>) - Method in interface org.apache.kafka.streams.kstream.KGroupedStream
+
+
Create a new TimeWindowedKStream instance that can be used to perform windowed aggregations.
+
+
WindowedCount - Class in org.apache.kafka.common.metrics.stats
+
+
A SampledStat that maintains a simple count of what it has seen.
+
+
WindowedCount() - Constructor for class org.apache.kafka.common.metrics.stats.WindowedCount
+
 
+
WindowedSerdes - Class in org.apache.kafka.streams.kstream
+
 
+
WindowedSerdes() - Constructor for class org.apache.kafka.streams.kstream.WindowedSerdes
+
 
+
WindowedSerdes.SessionWindowedSerde<T> - Class in org.apache.kafka.streams.kstream
+
 
+
WindowedSerdes.TimeWindowedSerde<T> - Class in org.apache.kafka.streams.kstream
+
 
+
WindowedSum - Class in org.apache.kafka.common.metrics.stats
+
+
A SampledStat that maintains the sum of what it has seen.
+
+
WindowedSum() - Constructor for class org.apache.kafka.common.metrics.stats.WindowedSum
+
 
+
WindowKeyQuery<K,V> - Class in org.apache.kafka.streams.query
+
 
+
WindowRangeQuery<K,V> - Class in org.apache.kafka.streams.query
+
 
+
Windows<W extends Window> - Class in org.apache.kafka.streams.kstream
+
+
The window specification for fixed size windows that is used to define window boundaries and grace period.
+
+
windowsFor(long) - Method in class org.apache.kafka.streams.kstream.JoinWindows
+
+
Not supported by JoinWindows.
+
+
windowsFor(long) - Method in class org.apache.kafka.streams.kstream.TimeWindows
+
 
+
windowsFor(long) - Method in class org.apache.kafka.streams.kstream.UnlimitedWindows
+
 
+
windowsFor(long) - Method in class org.apache.kafka.streams.kstream.Windows
+
+
Create all windows that contain the provided timestamp, indexed by non-negative window start timestamps.
+
+
windowSize() - Method in class org.apache.kafka.streams.state.DslWindowParams
+
 
+
windowSize() - Method in interface org.apache.kafka.streams.state.WindowBytesStoreSupplier
+
+
The size of the windows (in milliseconds) any store created from this supplier is creating.
+
+
windowSize(MetricConfig, long) - Method in class org.apache.kafka.common.metrics.stats.Rate
+
 
+
windowSize(MetricConfig, long) - Method in class org.apache.kafka.common.metrics.stats.SimpleRate
+
 
+
windowStore() - Static method in class org.apache.kafka.streams.state.QueryableStoreTypes
+
+ +
+
windowStore(DslWindowParams) - Method in enum class org.apache.kafka.streams.kstream.Materialized.StoreType
+
 
+
windowStore(DslWindowParams) - Method in class org.apache.kafka.streams.state.BuiltInDslStoreSuppliers.InMemoryDslStoreSuppliers
+
 
+
windowStore(DslWindowParams) - Method in class org.apache.kafka.streams.state.BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers
+
 
+
windowStore(DslWindowParams) - Method in interface org.apache.kafka.streams.state.DslStoreSuppliers
+
 
+
WindowStore<K,V> - Interface in org.apache.kafka.streams.state
+
+
Interface for storing the aggregated values of fixed-size time windows.
+
+
windowStoreBuilder(WindowBytesStoreSupplier, Serde<K>, Serde<V>) - Static method in class org.apache.kafka.streams.state.Stores
+
+
Creates a StoreBuilder that can be used to build a WindowStore.
+
+
WindowStoreIterator<V> - Interface in org.apache.kafka.streams.state
+
+
Iterator interface of KeyValue with key typed Long used for WindowStore.fetch(Object, long, long) + and WindowStore.fetch(Object, Instant, Instant) + + Users must call its close method explicitly upon completeness to release resources, + or use try-with-resources statement (available since JDK7) for this Closeable class.
+
+
with(String, Serde<K>, Serde<V>) - Static method in class org.apache.kafka.streams.kstream.Grouped
+
+
Create a Grouped instance with the provided name, keySerde, and valueSerde.
+
+
with(BiConsumer<String, Object>, Supplier<String>) - Static method in class org.apache.kafka.common.config.ConfigDef.LambdaValidator
+
 
+
with(Serde<K>, Serde<V>) - Static method in class org.apache.kafka.streams.kstream.Consumed
+
+
Create an instance of Consumed with key and value Serdes.
+
+
with(Serde<K>, Serde<V>) - Static method in class org.apache.kafka.streams.kstream.Grouped
+
+
Create a Grouped instance with the provided keySerde and valueSerde.
+
+
with(Serde<K>, Serde<V>) - Static method in class org.apache.kafka.streams.kstream.Materialized
+
+
Materialize a StateStore with the provided key and value Serdes.
+
+
with(Serde<K>, Serde<V>) - Static method in class org.apache.kafka.streams.kstream.Produced
+
+
Create a Produced instance with provided keySerde and valueSerde.
+
+
with(Serde<K>, Serde<V>) - Static method in class org.apache.kafka.streams.kstream.Repartitioned
+
+
Create a Repartitioned instance with provided key serde and value serde.
+
+
with(Serde<K>, Serde<V>, StreamPartitioner<? super K, ? super V>) - Static method in class org.apache.kafka.streams.kstream.Produced
+
+
Create a Produced instance with provided keySerde, valueSerde, and partitioner.
+
+
with(Serde<K>, Serde<V>, TimestampExtractor, AutoOffsetReset) - Static method in class org.apache.kafka.streams.kstream.Consumed
+
+
Create an instance of Consumed with the supplied arguments.
+
+
with(Serde<K>, Serde<V>, TimestampExtractor, Topology.AutoOffsetReset) - Static method in class org.apache.kafka.streams.kstream.Consumed
+
+ +
+
with(Serde<K>, Serde<V1>, Serde<V2>) - Static method in class org.apache.kafka.streams.kstream.StreamJoined
+
+
Creates a StreamJoined instance with the provided serdes to configure the stores + for the join.
+
+
with(Serde<K>, Serde<VLeft>, Serde<VRight>) - Static method in class org.apache.kafka.streams.kstream.Joined
+
+
Create an instance of Joined with key, value, and otherValue Serde instances.
+
+
with(Serde<K>, Serde<VLeft>, Serde<VRight>, String) - Static method in class org.apache.kafka.streams.kstream.Joined
+
+
Create an instance of Joined with key, value, and otherValue Serde instances.
+
+
with(Serde<K>, Serde<VLeft>, Serde<VRight>, String, Duration) - Static method in class org.apache.kafka.streams.kstream.Joined
+
+
Create an instance of Joined with key, value, and otherValue Serde instances.
+
+
with(Schema, Object) - Method in interface org.apache.kafka.connect.header.Header
+
+
Return a new Header object that has the same key but with the supplied value.
+
+
with(AutoOffsetReset) - Static method in class org.apache.kafka.streams.kstream.Consumed
+
+
Create an instance of Consumed with a Topology.AutoOffsetReset.
+
+
with(StreamPartitioner<K, Void>, StreamPartitioner<KO, Void>) - Static method in class org.apache.kafka.streams.kstream.TableJoined
+
+
Create an instance of TableJoined with partitioner and otherPartitioner StreamPartitioner instances.
+
+
with(TimestampExtractor) - Static method in class org.apache.kafka.streams.kstream.Consumed
+
+
Create an instance of Consumed with a TimestampExtractor.
+
+
with(DslStoreSuppliers) - Static method in class org.apache.kafka.streams.kstream.StreamJoined
+
+
Creates a StreamJoined instance with the given DslStoreSuppliers.
+
+
with(WindowBytesStoreSupplier, WindowBytesStoreSupplier) - Static method in class org.apache.kafka.streams.kstream.StreamJoined
+
+
Creates a StreamJoined instance with the provided store suppliers.
+
+
with(Topology.AutoOffsetReset) - Static method in class org.apache.kafka.streams.kstream.Consumed
+
+
Deprecated. +
Since 4.0. Use Consumed.with(AutoOffsetReset) instead.
+
+
+
withAllPartitions() - Method in class org.apache.kafka.streams.query.StateQueryRequest
+
+
Specifies that the query will run against all locally available partitions.
+
+
withAscendingKeys() - Method in class org.apache.kafka.streams.query.RangeQuery
+
+
Set the query to return the serialized byte[] of the keys in ascending order.
+
+
withAscendingKeys() - Method in class org.apache.kafka.streams.query.TimestampedRangeQuery
+
+
Set the query to return the serialized byte[] of the keys in ascending order.
+
+
withAscendingTimestamps() - Method in class org.apache.kafka.streams.query.MultiVersionedKeyQuery
+
+
Specifies the order of the returned records by the query as ascending by timestamp.
+
+
withBuiltinTypes(String, Class<K>, Class<V>) - Static method in class org.apache.kafka.streams.state.StateSerdes
+
+
Create a new instance of StateSerdes for the given state name and key-/value-type classes.
+
+
withCachingDisabled() - Method in class org.apache.kafka.streams.kstream.Materialized
+
+
Disable caching for the materialized StateStore.
+
+
withCachingDisabled() - Method in interface org.apache.kafka.streams.state.StoreBuilder
+
+
Disable caching on the store.
+
+
withCachingEnabled() - Method in class org.apache.kafka.streams.kstream.Materialized
+
+
Enable caching for the materialized StateStore.
+
+
withCachingEnabled() - Method in interface org.apache.kafka.streams.state.StoreBuilder
+
+
Enable caching on the store.
+
+
withClientSaslSupport() - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Add standard SASL client configuration options.
+
+
withClientSslSupport() - Method in class org.apache.kafka.common.config.ConfigDef
+
+
Add standard SSL client configuration options.
+
+
withComponent(String, int, long) - Method in class org.apache.kafka.streams.query.Position
+
+
Augment an existing Position by setting a new offset for a topic and partition.
+
+
withConsumer(Consumer<? super KStream<K, V>>, String) - Static method in class org.apache.kafka.streams.kstream.Branched
+
+
Create an instance of Branched with provided chain consumer and branch name suffix.
+
+
withConsumer(Consumer<KStream<K, V>>) - Static method in class org.apache.kafka.streams.kstream.Branched
+
+
Create an instance of Branched with provided chain consumer.
+
+
withDescendingKeys() - Method in class org.apache.kafka.streams.query.RangeQuery
+
+
Set the query to return the serialized byte[] of the keys in descending order.
+
+
withDescendingKeys() - Method in class org.apache.kafka.streams.query.TimestampedRangeQuery
+
+
Set the query to return the serialized byte[] of the keys in descending order.
+
+
withDescendingTimestamps() - Method in class org.apache.kafka.streams.query.MultiVersionedKeyQuery
+
+
Specifies the order of the returned records by the query as descending by timestamp.
+
+
withDslStoreSuppliers(DslStoreSuppliers) - Method in class org.apache.kafka.streams.kstream.StreamJoined
+
+
Configure with the provided DslStoreSuppliers for store suppliers that are not provided.
+
+
withFollowupRebalance(Instant) - Method in class org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment
+
+
This API can be used to request that a followup rebalance be triggered by the KafkaStreams client + receiving this assignment.
+
+
withFunction(Function<? super KStream<K, V>, ? extends KStream<K, V>>) - Static method in class org.apache.kafka.streams.kstream.Branched
+
+
Create an instance of Branched with provided chain function.
+
+
withFunction(Function<? super KStream<K, V>, ? extends KStream<K, V>>, String) - Static method in class org.apache.kafka.streams.kstream.Branched
+
+
Create an instance of Branched with provided chain function and branch name suffix.
+
+
withGracePeriod(Duration) - Method in class org.apache.kafka.streams.kstream.Joined
+
+
Set the grace period on the stream side of the join.
+
+
withGroupMembershipOperation(CloseOptions.GroupMembershipOperation) - Method in class org.apache.kafka.clients.consumer.CloseOptions
+
+
Fluent method to set the group membership operation upon shutdown.
+
+
withHeaders(Headers) - Method in class org.apache.kafka.streams.processor.api.FixedKeyRecord
+
+
A convenient way to produce a new record if you only need to change the headers.
+
+
withHeaders(Headers) - Method in class org.apache.kafka.streams.processor.api.Record
+
+
A convenient way to produce a new record if you only need to change the headers.
+
+
withKey(K) - Static method in class org.apache.kafka.streams.query.KeyQuery
+
+
Creates a query that will retrieve the record identified by key if it exists + (or null otherwise).
+
+
withKey(K) - Static method in class org.apache.kafka.streams.query.MultiVersionedKeyQuery
+
+
Creates a query that will retrieve the set of records identified by key if any exists + (or null otherwise).
+
+
withKey(K) - Static method in class org.apache.kafka.streams.query.TimestampedKeyQuery
+
+
Creates a query that will retrieve the record identified by key if it exists + (or null otherwise).
+
+
withKey(K) - Static method in class org.apache.kafka.streams.query.VersionedKeyQuery
+
+
Creates a query that will retrieve the record from a versioned state store identified by key if it exists + (or null otherwise).
+
+
withKey(K) - Static method in class org.apache.kafka.streams.query.WindowRangeQuery
+
 
+
withKey(NewK) - Method in class org.apache.kafka.streams.processor.api.Record
+
+
A convenient way to produce a new record if you only need to change the key.
+
+
withKeyAndWindowStartRange(K, Instant, Instant) - Static method in class org.apache.kafka.streams.query.WindowKeyQuery
+
 
+
withKeySerde(Serde<K>) - Method in class org.apache.kafka.streams.kstream.Consumed
+
+
Configure the instance of Consumed with a key Serde.
+
+
withKeySerde(Serde<K>) - Method in class org.apache.kafka.streams.kstream.Grouped
+
+
Perform the grouping operation using the provided keySerde for serializing the key.
+
+
withKeySerde(Serde<K>) - Method in class org.apache.kafka.streams.kstream.Joined
+
+
Set the key Serde to be used.
+
+
withKeySerde(Serde<K>) - Method in class org.apache.kafka.streams.kstream.Materialized
+
+
Set the keySerde the materialized StateStore will use.
+
+
withKeySerde(Serde<K>) - Method in class org.apache.kafka.streams.kstream.Produced
+
+
Produce records using the provided keySerde.
+
+
withKeySerde(Serde<K>) - Method in class org.apache.kafka.streams.kstream.Repartitioned
+
+
Create a new instance of Repartitioned with the provided key serde.
+
+
withKeySerde(Serde<K>) - Method in class org.apache.kafka.streams.kstream.StreamJoined
+
+
Configure with the provided Serde for the key
+
+
withKeyValueMapper(KeyValueMapper<? super K, ? super V, String>) - Method in class org.apache.kafka.streams.kstream.Printed
+
+
Print the records of a KStream with the provided KeyValueMapper + The provided KeyValueMapper's mapped value type must be String.
+
+
withLabel(String) - Method in class org.apache.kafka.streams.kstream.Printed
+
+
Print the records of a KStream with the provided label.
+
+
withLoggingDisabled() - Method in class org.apache.kafka.streams.kstream.Materialized
+
+
Disable change logging for the materialized StateStore.
+
+
withLoggingDisabled() - Method in class org.apache.kafka.streams.kstream.StreamJoined
+
+
Disable change logging for both state stores.
+
+
withLoggingDisabled() - Method in interface org.apache.kafka.streams.kstream.Suppressed.BufferConfig
+
+
Disable the changelog for this suppression's internal buffer.
+
+
withLoggingDisabled() - Method in interface org.apache.kafka.streams.state.StoreBuilder
+
+
Disable the changelog for store built by this StoreBuilder.
+
+
withLoggingEnabled(Map<String, String>) - Method in class org.apache.kafka.streams.kstream.Materialized
+
+
Indicates that a changelog should be created for the store.
+
+
withLoggingEnabled(Map<String, String>) - Method in class org.apache.kafka.streams.kstream.StreamJoined
+
+
Configures logging for both state stores.
+
+
withLoggingEnabled(Map<String, String>) - Method in interface org.apache.kafka.streams.kstream.Suppressed.BufferConfig
+
+
Indicates that a changelog topic should be created containing the currently suppressed + records.
+
+
withLoggingEnabled(Map<String, String>) - Method in interface org.apache.kafka.streams.state.StoreBuilder
+
+
Maintain a changelog for any changes made to the store.
+
+
withLowerBound(K) - Static method in class org.apache.kafka.streams.query.RangeQuery
+
+
Interactive range query using a lower bound to filter the keys returned.
+
+
withLowerBound(K) - Static method in class org.apache.kafka.streams.query.TimestampedRangeQuery
+
+
Interactive range query using a lower bound to filter the keys returned.
+
+
withMaxBytes(long) - Method in interface org.apache.kafka.streams.kstream.Suppressed.BufferConfig
+
+
Set a size constraint on the buffer, the maximum number of bytes it will use.
+
+
withMaxRecords(long) - Method in interface org.apache.kafka.streams.kstream.Suppressed.BufferConfig
+
+
Set a size constraint on the buffer in terms of the maximum number of keys it will store.
+
+
withName(String) - Static method in enum class org.apache.kafka.connect.storage.ConverterType
+
+
Find the ConverterType with the given name, using a case-insensitive match.
+
+
withName(String) - Method in class org.apache.kafka.streams.kstream.Branched
+
+
Configure the instance of Branched with a branch name suffix.
+
+
withName(String) - Method in class org.apache.kafka.streams.kstream.Consumed
+
+
Configure the instance of Consumed with a processor name.
+
+
withName(String) - Method in class org.apache.kafka.streams.kstream.Grouped
+
+
Perform the grouping operation with the name for a repartition topic if required.
+
+
withName(String) - Method in class org.apache.kafka.streams.kstream.Joined
+
+
Set the base name used for all components of the join, this may include any repartition topics + created to complete the join.
+
+
withName(String) - Method in class org.apache.kafka.streams.kstream.Named
+
 
+
withName(String) - Method in class org.apache.kafka.streams.kstream.Printed
+
+
Print the records of a KStream with provided processor name.
+
+
withName(String) - Method in class org.apache.kafka.streams.kstream.Produced
+
 
+
withName(String) - Method in class org.apache.kafka.streams.kstream.Repartitioned
+
+
Create a new instance of Repartitioned with the provided name used as part of repartition topic and processor name.
+
+
withName(String) - Method in class org.apache.kafka.streams.kstream.StreamJoined
+
+
Set the name to use for the join processor and the repartition topic(s) if required.
+
+
withName(String) - Method in interface org.apache.kafka.streams.kstream.Suppressed
+
+
Use the specified name for the suppression node in the topology.
+
+
withName(String) - Method in class org.apache.kafka.streams.kstream.TableJoined
+
+
Set the base name used for all components of the join, including internal topics + created to complete the join.
+
+
withNoBound() - Method in interface org.apache.kafka.streams.kstream.Suppressed.BufferConfig
+
+
Set the buffer to be unconstrained by size (either keys or bytes).
+
+
withNoBounds() - Static method in class org.apache.kafka.streams.query.RangeQuery
+
+
Interactive scan query that returns all records in the store.
+
+
withNoBounds() - Static method in class org.apache.kafka.streams.query.TimestampedRangeQuery
+
+
Interactive scan query that returns all records in the store.
+
+
withNonOverlapCostOverride(int) - Method in class org.apache.kafka.streams.processor.assignment.TaskAssignmentUtils.RackAwareOptimizationParams
+
+
Return a new config object with the provided nonOverlapCost override applied
+
+
withNumberOfPartitions(int) - Method in class org.apache.kafka.streams.kstream.Repartitioned
+
+
Create a new instance of Repartitioned with the provided number of partitions for repartition topic.
+
+
withOffsetResetPolicy(AutoOffsetReset) - Method in class org.apache.kafka.streams.kstream.Consumed
+
+
Configure the instance of Consumed with a Topology.AutoOffsetReset.
+
+
withOffsetResetPolicy(Topology.AutoOffsetReset) - Method in class org.apache.kafka.streams.kstream.Consumed
+
+
Deprecated. + +
+
+
withOtherPartitioner(StreamPartitioner<KO, Void>) - Method in class org.apache.kafka.streams.kstream.TableJoined
+
+
Set the custom other StreamPartitioner to be used as part of computing the join.
+
+
withOtherStoreSupplier(WindowBytesStoreSupplier) - Method in class org.apache.kafka.streams.kstream.StreamJoined
+
+
Configure with the provided WindowBytesStoreSupplier for the other store supplier.
+
+
withOtherValueSerde(Serde<V2>) - Method in class org.apache.kafka.streams.kstream.StreamJoined
+
+
Configure with the provided Serde for the other value
+
+
withOtherValueSerde(Serde<VRight>) - Method in class org.apache.kafka.streams.kstream.Joined
+
+
Set the otherValue Serde to be used.
+
+
withPartition(Integer) - Method in class org.apache.kafka.streams.StoreQueryParameters
+
+
Set a specific partition that should be queried exclusively.
+
+
withPartitioner(StreamPartitioner<K, Void>) - Method in class org.apache.kafka.streams.kstream.TableJoined
+
+
Set the custom StreamPartitioner to be used as part of computing the join.
+
+
withPartitions(Map<TopicPartition, PartitionInfo>) - Method in class org.apache.kafka.common.Cluster
+
+
Return a copy of this cluster combined with `partitions`.
+
+
withPartitions(Set<Integer>) - Method in class org.apache.kafka.streams.query.StateQueryRequest
+
+
Specifies a set of partitions to run against.
+
+
withPluginMetrics(PluginMetrics) - Method in interface org.apache.kafka.common.metrics.Monitorable
+
+
Provides a PluginMetrics instance from the component that instantiates the plugin.
+
+
withPositionBound(PositionBound) - Method in class org.apache.kafka.streams.query.StateQueryRequest
+
+
Bounds the position of the state store against its input topics.
+
+
withProtocolTypes(Set<String>) - Method in class org.apache.kafka.clients.admin.ListGroupsOptions
+
+
If protocol types is set, only groups of these protocol types will be returned by listGroups().
+
+
withQuery(Query<R>) - Method in class org.apache.kafka.streams.query.StateQueryRequest.InStore
+
+
Specifies the query to run on the specified store.
+
+
withRange(K, K) - Static method in class org.apache.kafka.streams.query.RangeQuery
+
+
Interactive range query using a lower and upper bound to filter the keys returned.
+
+
withRange(K, K) - Static method in class org.apache.kafka.streams.query.TimestampedRangeQuery
+
+
Interactive range query using a lower and upper bound to filter the keys returned.
+
+
withRetention(Duration) - Method in class org.apache.kafka.streams.kstream.Materialized
+
+
Configure retention period for window and session stores.
+
+
withStoreName(String) - Method in class org.apache.kafka.streams.kstream.StreamJoined
+
+
Sets the base store name to use for both sides of the join.
+
+
withStoreType(DslStoreSuppliers) - Method in class org.apache.kafka.streams.kstream.Materialized
+
+
Set the type of the materialized StateStore.
+
+
withStreamPartitioner(StreamPartitioner<? super K, ? super V>) - Method in class org.apache.kafka.streams.kstream.Produced
+
+
Produce records using the provided partitioner.
+
+
withStreamPartitioner(StreamPartitioner<K, V>) - Method in class org.apache.kafka.streams.kstream.Repartitioned
+
+
Create a new instance of Repartitioned with the provided partitioner.
+
+
withThisStoreSupplier(WindowBytesStoreSupplier) - Method in class org.apache.kafka.streams.kstream.StreamJoined
+
+
Configure with the provided WindowBytesStoreSupplier for this store supplier.
+
+
withTimeout(Duration) - Method in class org.apache.kafka.clients.consumer.CloseOptions
+
+
Fluent method to set the timeout for the close process.
+
+
withTimestamp(long) - Method in class org.apache.kafka.streams.processor.api.FixedKeyRecord
+
+
A convenient way to produce a new record if you only need to change the timestamp.
+
+
withTimestamp(long) - Method in class org.apache.kafka.streams.processor.api.Record
+
+
A convenient way to produce a new record if you only need to change the timestamp.
+
+
withTimestamp(long) - Method in class org.apache.kafka.streams.processor.To
+
+
Set the timestamp of the output record.
+
+
withTimestampExtractor(TimestampExtractor) - Method in class org.apache.kafka.streams.kstream.Consumed
+
+
Configure the instance of Consumed with a TimestampExtractor.
+
+
withTrafficCostOverride(int) - Method in class org.apache.kafka.streams.processor.assignment.TaskAssignmentUtils.RackAwareOptimizationParams
+
+
Return a new config object with the provided trafficCost override applied
+
+
withTypes(Set<GroupType>) - Method in class org.apache.kafka.clients.admin.ListConsumerGroupsOptions
+
+
Deprecated.
+
If types is set, only groups of these types will be returned by listConsumerGroups().
+
+
withTypes(Set<GroupType>) - Method in class org.apache.kafka.clients.admin.ListGroupsOptions
+
+
If types is set, only groups of these types will be returned by listGroups().
+
+
withUpperBound(K) - Static method in class org.apache.kafka.streams.query.RangeQuery
+
+
Interactive range query using an upper bound to filter the keys returned.
+
+
withUpperBound(K) - Static method in class org.apache.kafka.streams.query.TimestampedRangeQuery
+
+
Interactive range query using an upper bound to filter the keys returned.
+
+
withValue(NewV) - Method in class org.apache.kafka.streams.processor.api.FixedKeyRecord
+
+
A convenient way to produce a new record if you only need to change the value.
+
+
withValue(NewV) - Method in class org.apache.kafka.streams.processor.api.Record
+
+
A convenient way to produce a new record if you only need to change the value.
+
+
withValueSerde(Serde<V>) - Method in class org.apache.kafka.streams.kstream.Consumed
+
+
Configure the instance of Consumed with a value Serde.
+
+
withValueSerde(Serde<V>) - Method in class org.apache.kafka.streams.kstream.Grouped
+
+
Perform the grouping operation using the provided valueSerde for serializing the value.
+
+
withValueSerde(Serde<V>) - Method in class org.apache.kafka.streams.kstream.Materialized
+
+
Set the valueSerde the materialized StateStore will use.
+
+
withValueSerde(Serde<V>) - Method in class org.apache.kafka.streams.kstream.Produced
+
+
Produce records using the provided valueSerde.
+
+
withValueSerde(Serde<V>) - Method in class org.apache.kafka.streams.kstream.Repartitioned
+
+
Create a new instance of Repartitioned with the provided value serde.
+
+
withValueSerde(Serde<V1>) - Method in class org.apache.kafka.streams.kstream.StreamJoined
+
+
Configure with the provided Serde for this value
+
+
withValueSerde(Serde<VLeft>) - Method in class org.apache.kafka.streams.kstream.Joined
+
+
Set the value Serde to be used.
+
+
withWindowStartRange(Instant, Instant) - Static method in class org.apache.kafka.streams.query.WindowRangeQuery
+
 
+
workerId() - Method in class org.apache.kafka.connect.health.AbstractState
+
+
The identifier of the worker associated with the connector or the task.
+
+
wrapFixedKeyProcessorSupplier(String, FixedKeyProcessorSupplier<KIn, VIn, VOut>) - Method in interface org.apache.kafka.streams.processor.api.ProcessorWrapper
+
+ +
+
WrappedFixedKeyProcessorSupplier<KIn,VIn,VOut> - Interface in org.apache.kafka.streams.processor.api
+
+
Marker interface for classes implementing FixedKeyProcessorSupplier + that have been wrapped via a ProcessorWrapper.
+
+
WrappedProcessorSupplier<KIn,VIn,KOut,VOut> - Interface in org.apache.kafka.streams.processor.api
+
+
Marker interface for classes implementing ProcessorSupplier + that have been wrapped via a ProcessorWrapper.
+
+
WrapperSerde(Serializer<T>, Deserializer<T>) - Constructor for class org.apache.kafka.common.serialization.Serdes.WrapperSerde
+
 
+
wrapProcessorSupplier(String, ProcessorSupplier<KIn, VIn, KOut, VOut>) - Method in interface org.apache.kafka.streams.processor.api.ProcessorWrapper
+
+
Wrap or replace the provided ProcessorSupplier and return a WrappedProcessorSupplier + To convert a ProcessorSupplier instance into a WrappedProcessorSupplier, + use the ProcessorWrapper.asWrapped(ProcessorSupplier) method
+
+
WRITE - Enum constant in enum class org.apache.kafka.common.acl.AclOperation
+
+
WRITE operation.
+
+
writeTo(ConsumerRecord<byte[], byte[]>, PrintStream) - Method in interface org.apache.kafka.common.MessageFormatter
+
+
Parses and formats a record for display
+
+
+

Z

+
+
ZERO_UUID - Static variable in class org.apache.kafka.common.Uuid
+
+
A UUID that represents a null or empty UUID.
+
+
+A B C D E F G H I J K L M N O P Q R S T U V W Z 
All Classes and Interfaces|All Packages|Constant Field Values|Serialized Form
+
+
+ + diff --git a/static/41/javadoc/index.html b/static/41/javadoc/index.html new file mode 100644 index 000000000..5767f6054 --- /dev/null +++ b/static/41/javadoc/index.html @@ -0,0 +1,277 @@ + + + + +Overview (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+
+

kafka 4.1.0 API

+
+
+
Packages
+
+
Package
+
Description
+ +
+
Provides a Kafka client for performing administrative operations (such as creating topics and configuring brokers) on a Kafka cluster.
+
+ +
+
Provides a Kafka client for consuming records from topics and/or partitions in a Kafka cluster.
+
+ +
+
Provides a Kafka client for producing records to topics and/or partitions in a Kafka cluster.
+
+ +
+
Provides shared functionality for Kafka clients and servers.
+
+ +
+
Provides classes representing Access Control Lists for authorization of clients
+
+ +
+
Provides annotations used on Kafka APIs.
+
+ +
+
Provides common mechanisms for defining, parsing, validating, and documenting user-configurable parameters.
+
+ +
+
Provides a pluggable interface and some implementations for late-binding in configuration values.
+
+ +
+
Provides common exception classes.
+
+ +
+
Provides API for application-defined metadata attached to Kafka records.
+
+ +
+
Provides the API used by Kafka clients to emit metrics which are then exposed using the * MetricsReporter interface.
+
+ +
+
Provides methods of statistically aggregating metrics upon emission.
+
+ +
+
Provides mechanisms for enforcing resource quotas.
+
+ +
+
Provides client handles representing logical resources in a Kafka cluster.
+
+ +
+
Provides pluggable interfaces for implementing Kafka authentication mechanisms.
+
+ +
+
Provides a LoginModule for using OAuth Bearer Token authentication with Kafka clusters.
+
+ +
+
Provides implementation to use plaintext credentials authentication for securing Kafka clusters.
+
+ +
+
Provides adaptor to use the Salted Challenge Response Authentication Mechanism for securing Kafka clusters.
+
+ +
+
Provides mechanism for delegating authorization to a distinct Principal for securing Kafka clusters.
+
+ +
+
Provides interface and some implementations of serialization/deserialization routines for various objects.
+
+ +
+
Provides common interfaces used to describe pluggable components.
+
+ +
+
Provides interfaces for Connector and Task implementations.
+
+ +
+
Provides pluggable interfaces for policies controlling how users can configure connectors.
+
+ +
+
Provides classes for representing data and schemas handled by Connect.
+
+ +
+
Provides common exception classes for Connect, used by the framework and plugins to communicate failures.
+
+ +
+
Provides an API for application-defined metadata attached to Connect records.
+
+ +
+
Provides an API for describing the state of a running Connect cluster to + ConnectRestExtension instances.
+
+ +
+
Provides APIs for the MirrorMaker connectors and utilities to manage MirrorMaker resources.
+
+ +
+
Provides a pluggable interface for altering the behavior of the Connect REST API.
+
+ +
+
Provides an API for implementing sink connectors which write Kafka records to external applications.
+
+ +
+
Provides an API for implementing source connectors which read data from external applications into Kafka.
+
+ +
+
Provides pluggable interfaces and some implementations for (de)serializing data to and from Kafka
+
+ +
+
Provides source and sink connector implementations used for testing
+
+ +
+
Provides a pluggable interface for altering data which is being moved by Connect.
+
+ +
+
Provides a pluggable interface for describing when a Transformation should be applied to a record.
+
+ +
+
Provides common utilities that can be used in component implementations.
+
+ +
+
Provides the core functionality and metadata management for consumer group partition assignment.
+
+ +
+
Provides pluggable interface for performing authorization on a Kafka server.
+
+ +
+
Provides a pluggable API for defining remote storage and retrieval of Kafka log segments.
+
+ +
+
Provides pluggable interfaces for expressing policies on topics and configs.
+
+ +
+
Provides pluggable interface for enforcing client quotas from a Kafka server.
+
+ +
+
Provides pluggable interface for capturing client telemetry metrics.
+
+ +
+
Provides the Kafka Streams library for building streaming data applications.
+
+ +
+
Provides common exception classes for Streams applications.
+
+ +
+
Provides a high-level programming model (DSL) to express a (stateful) data flow computation over input streams and tables.
+
+ +
+
Provides a low-level programming model (Processor API, aka, PAPI) to express a (stateful) data flow computation over input topics.
+
+ +
+
Provides a low-level programming model (Processor API, aka, PAPI) to express a (stateful) data flow computation over input topics.
+
+ +
+
Provides classes and interfaces used to manage and assign tasks within Kafka Streams applications.
+
+ +
+
Provides classes for assigning tasks to stream threads.
+
+ +
+
Provides a query API (aka Interactive Queries) over state stores, for extracting data from a stateful Kafka Streams application.
+
+ +
+
Provides interfaces for managing the intermediate state of a stateful streams application.
+
+ +
+
Provides classes for testing Kafka Streams applications with mocked inputs.
+
+ +
+
Provides interfaces for writing plugins of kafka tools
+
+
+
+
+
+
+ + diff --git a/static/41/javadoc/jquery-ui.overrides.css b/static/41/javadoc/jquery-ui.overrides.css new file mode 100644 index 000000000..facf852c2 --- /dev/null +++ b/static/41/javadoc/jquery-ui.overrides.css @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +.ui-state-active, +.ui-widget-content .ui-state-active, +.ui-widget-header .ui-state-active, +a.ui-button:active, +.ui-button:active, +.ui-button.ui-state-active:hover { + /* Overrides the color of selection used in jQuery UI */ + background: #F8981D; + border: 1px solid #F8981D; +} diff --git a/static/41/javadoc/legal/ADDITIONAL_LICENSE_INFO b/static/41/javadoc/legal/ADDITIONAL_LICENSE_INFO new file mode 100644 index 000000000..ff700cd09 --- /dev/null +++ b/static/41/javadoc/legal/ADDITIONAL_LICENSE_INFO @@ -0,0 +1,37 @@ + ADDITIONAL INFORMATION ABOUT LICENSING + +Certain files distributed by Oracle America, Inc. and/or its affiliates are +subject to the following clarification and special exception to the GPLv2, +based on the GNU Project exception for its Classpath libraries, known as the +GNU Classpath Exception. + +Note that Oracle includes multiple, independent programs in this software +package. Some of those programs are provided under licenses deemed +incompatible with the GPLv2 by the Free Software Foundation and others. +For example, the package includes programs licensed under the Apache +License, Version 2.0 and may include FreeType. Such programs are licensed +to you under their original licenses. + +Oracle facilitates your further distribution of this package by adding the +Classpath Exception to the necessary parts of its GPLv2 code, which permits +you to use that code in combination with other independent modules not +licensed under the GPLv2. However, note that this would not permit you to +commingle code under an incompatible license with Oracle's GPLv2 licensed +code by, for example, cutting and pasting such code into a file also +containing Oracle's GPLv2 licensed code and then distributing the result. + +Additionally, if you were to remove the Classpath Exception from any of the +files to which it applies and distribute the result, you would likely be +required to license some or all of the other code in that distribution under +the GPLv2 as well, and since the GPLv2 is incompatible with the license terms +of some items included in the distribution by Oracle, removing the Classpath +Exception could therefore effectively compromise your ability to further +distribute the package. + +Failing to distribute notices associated with some files may also create +unexpected legal consequences. + +Proceed with caution and we recommend that you obtain the advice of a lawyer +skilled in open source matters before removing the Classpath Exception or +making modifications to this package which may subsequently be redistributed +and/or involve the use of third party software. diff --git a/static/41/javadoc/legal/ASSEMBLY_EXCEPTION b/static/41/javadoc/legal/ASSEMBLY_EXCEPTION new file mode 100644 index 000000000..065b8d902 --- /dev/null +++ b/static/41/javadoc/legal/ASSEMBLY_EXCEPTION @@ -0,0 +1,27 @@ + +OPENJDK ASSEMBLY EXCEPTION + +The OpenJDK source code made available by Oracle America, Inc. (Oracle) at +openjdk.java.net ("OpenJDK Code") is distributed under the terms of the GNU +General Public License version 2 +only ("GPL2"), with the following clarification and special exception. + + Linking this OpenJDK Code statically or dynamically with other code + is making a combined work based on this library. Thus, the terms + and conditions of GPL2 cover the whole combination. + + As a special exception, Oracle gives you permission to link this + OpenJDK Code with certain code licensed by Oracle as indicated at + http://openjdk.java.net/legal/exception-modules-2007-05-08.html + ("Designated Exception Modules") to produce an executable, + regardless of the license terms of the Designated Exception Modules, + and to copy and distribute the resulting executable under GPL2, + provided that the Designated Exception Modules continue to be + governed by the licenses under which they were offered by Oracle. + +As such, it allows licensees and sublicensees of Oracle's GPL2 OpenJDK Code +to build an executable that includes those portions of necessary code that +Oracle could not provide under GPL2 (or that Oracle has provided under GPL2 +with the Classpath exception). If you modify or add to the OpenJDK code, +that new GPL2 code may still be combined with Designated Exception Modules +if the new code is made subject to this exception by its copyright holder. diff --git a/static/41/javadoc/legal/LICENSE b/static/41/javadoc/legal/LICENSE new file mode 100644 index 000000000..8b400c7ab --- /dev/null +++ b/static/41/javadoc/legal/LICENSE @@ -0,0 +1,347 @@ +The GNU General Public License (GPL) + +Version 2, June 1991 + +Copyright (C) 1989, 1991 Free Software Foundation, Inc. +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +Everyone is permitted to copy and distribute verbatim copies of this license +document, but changing it is not allowed. + +Preamble + +The licenses for most software are designed to take away your freedom to share +and change it. By contrast, the GNU General Public License is intended to +guarantee your freedom to share and change free software--to make sure the +software is free for all its users. This General Public License applies to +most of the Free Software Foundation's software and to any other program whose +authors commit to using it. (Some other Free Software Foundation software is +covered by the GNU Library General Public License instead.) You can apply it to +your programs, too. + +When we speak of free software, we are referring to freedom, not price. Our +General Public Licenses are designed to make sure that you have the freedom to +distribute copies of free software (and charge for this service if you wish), +that you receive source code or can get it if you want it, that you can change +the software or use pieces of it in new free programs; and that you know you +can do these things. + +To protect your rights, we need to make restrictions that forbid anyone to deny +you these rights or to ask you to surrender the rights. These restrictions +translate to certain responsibilities for you if you distribute copies of the +software, or if you modify it. + +For example, if you distribute copies of such a program, whether gratis or for +a fee, you must give the recipients all the rights that you have. You must +make sure that they, too, receive or can get the source code. And you must +show them these terms so they know their rights. + +We protect your rights with two steps: (1) copyright the software, and (2) +offer you this license which gives you legal permission to copy, distribute +and/or modify the software. + +Also, for each author's protection and ours, we want to make certain that +everyone understands that there is no warranty for this free software. If the +software is modified by someone else and passed on, we want its recipients to +know that what they have is not the original, so that any problems introduced +by others will not reflect on the original authors' reputations. + +Finally, any free program is threatened constantly by software patents. We +wish to avoid the danger that redistributors of a free program will +individually obtain patent licenses, in effect making the program proprietary. +To prevent this, we have made it clear that any patent must be licensed for +everyone's free use or not licensed at all. + +The precise terms and conditions for copying, distribution and modification +follow. + +TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + +0. This License applies to any program or other work which contains a notice +placed by the copyright holder saying it may be distributed under the terms of +this General Public License. The "Program", below, refers to any such program +or work, and a "work based on the Program" means either the Program or any +derivative work under copyright law: that is to say, a work containing the +Program or a portion of it, either verbatim or with modifications and/or +translated into another language. (Hereinafter, translation is included +without limitation in the term "modification".) Each licensee is addressed as +"you". + +Activities other than copying, distribution and modification are not covered by +this License; they are outside its scope. The act of running the Program is +not restricted, and the output from the Program is covered only if its contents +constitute a work based on the Program (independent of having been made by +running the Program). Whether that is true depends on what the Program does. + +1. You may copy and distribute verbatim copies of the Program's source code as +you receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice and +disclaimer of warranty; keep intact all the notices that refer to this License +and to the absence of any warranty; and give any other recipients of the +Program a copy of this License along with the Program. + +You may charge a fee for the physical act of transferring a copy, and you may +at your option offer warranty protection in exchange for a fee. + +2. You may modify your copy or copies of the Program or any portion of it, thus +forming a work based on the Program, and copy and distribute such modifications +or work under the terms of Section 1 above, provided that you also meet all of +these conditions: + + a) You must cause the modified files to carry prominent notices stating + that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in whole or + in part contains or is derived from the Program or any part thereof, to be + licensed as a whole at no charge to all third parties under the terms of + this License. + + c) If the modified program normally reads commands interactively when run, + you must cause it, when started running for such interactive use in the + most ordinary way, to print or display an announcement including an + appropriate copyright notice and a notice that there is no warranty (or + else, saying that you provide a warranty) and that users may redistribute + the program under these conditions, and telling the user how to view a copy + of this License. (Exception: if the Program itself is interactive but does + not normally print such an announcement, your work based on the Program is + not required to print an announcement.) + +These requirements apply to the modified work as a whole. If identifiable +sections of that work are not derived from the Program, and can be reasonably +considered independent and separate works in themselves, then this License, and +its terms, do not apply to those sections when you distribute them as separate +works. But when you distribute the same sections as part of a whole which is a +work based on the Program, the distribution of the whole must be on the terms +of this License, whose permissions for other licensees extend to the entire +whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest your +rights to work written entirely by you; rather, the intent is to exercise the +right to control the distribution of derivative or collective works based on +the Program. + +In addition, mere aggregation of another work not based on the Program with the +Program (or with a work based on the Program) on a volume of a storage or +distribution medium does not bring the other work under the scope of this +License. + +3. You may copy and distribute the Program (or a work based on it, under +Section 2) in object code or executable form under the terms of Sections 1 and +2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable source + code, which must be distributed under the terms of Sections 1 and 2 above + on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three years, to + give any third party, for a charge no more than your cost of physically + performing source distribution, a complete machine-readable copy of the + corresponding source code, to be distributed under the terms of Sections 1 + and 2 above on a medium customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer to + distribute corresponding source code. (This alternative is allowed only + for noncommercial distribution and only if you received the program in + object code or executable form with such an offer, in accord with + Subsection b above.) + +The source code for a work means the preferred form of the work for making +modifications to it. For an executable work, complete source code means all +the source code for all modules it contains, plus any associated interface +definition files, plus the scripts used to control compilation and installation +of the executable. However, as a special exception, the source code +distributed need not include anything that is normally distributed (in either +source or binary form) with the major components (compiler, kernel, and so on) +of the operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the source +code from the same place counts as distribution of the source code, even though +third parties are not compelled to copy the source along with the object code. + +4. You may not copy, modify, sublicense, or distribute the Program except as +expressly provided under this License. Any attempt otherwise to copy, modify, +sublicense or distribute the Program is void, and will automatically terminate +your rights under this License. However, parties who have received copies, or +rights, from you under this License will not have their licenses terminated so +long as such parties remain in full compliance. + +5. You are not required to accept this License, since you have not signed it. +However, nothing else grants you permission to modify or distribute the Program +or its derivative works. These actions are prohibited by law if you do not +accept this License. Therefore, by modifying or distributing the Program (or +any work based on the Program), you indicate your acceptance of this License to +do so, and all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + +6. Each time you redistribute the Program (or any work based on the Program), +the recipient automatically receives a license from the original licensor to +copy, distribute or modify the Program subject to these terms and conditions. +You may not impose any further restrictions on the recipients' exercise of the +rights granted herein. You are not responsible for enforcing compliance by +third parties to this License. + +7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), conditions +are imposed on you (whether by court order, agreement or otherwise) that +contradict the conditions of this License, they do not excuse you from the +conditions of this License. If you cannot distribute so as to satisfy +simultaneously your obligations under this License and any other pertinent +obligations, then as a consequence you may not distribute the Program at all. +For example, if a patent license would not permit royalty-free redistribution +of the Program by all those who receive copies directly or indirectly through +you, then the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply and +the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any patents or +other property right claims or to contest validity of any such claims; this +section has the sole purpose of protecting the integrity of the free software +distribution system, which is implemented by public license practices. Many +people have made generous contributions to the wide range of software +distributed through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing to +distribute software through any other system and a licensee cannot impose that +choice. + +This section is intended to make thoroughly clear what is believed to be a +consequence of the rest of this License. + +8. If the distribution and/or use of the Program is restricted in certain +countries either by patents or by copyrighted interfaces, the original +copyright holder who places the Program under this License may add an explicit +geographical distribution limitation excluding those countries, so that +distribution is permitted only in or among countries not thus excluded. In +such case, this License incorporates the limitation as if written in the body +of this License. + +9. The Free Software Foundation may publish revised and/or new versions of the +General Public License from time to time. Such new versions will be similar in +spirit to the present version, but may differ in detail to address new problems +or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any later +version", you have the option of following the terms and conditions either of +that version or of any later version published by the Free Software Foundation. +If the Program does not specify a version number of this License, you may +choose any version ever published by the Free Software Foundation. + +10. If you wish to incorporate parts of the Program into other free programs +whose distribution conditions are different, write to the author to ask for +permission. For software which is copyrighted by the Free Software Foundation, +write to the Free Software Foundation; we sometimes make exceptions for this. +Our decision will be guided by the two goals of preserving the free status of +all derivatives of our free software and of promoting the sharing and reuse of +software generally. + +NO WARRANTY + +11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR +THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE +STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE +PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND +PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, +YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL +ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE +PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR +INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA +BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER +OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +END OF TERMS AND CONDITIONS + +How to Apply These Terms to Your New Programs + +If you develop a new program, and you want it to be of the greatest possible +use to the public, the best way to achieve this is to make it free software +which everyone can redistribute and change under these terms. + +To do so, attach the following notices to the program. It is safest to attach +them to the start of each source file to most effectively convey the exclusion +of warranty; and each file should have at least the "copyright" line and a +pointer to where the full notice is found. + + One line to give the program's name and a brief idea of what it does. + + Copyright (C) + + This program is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2 of the License, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this when it +starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author Gnomovision comes + with ABSOLUTELY NO WARRANTY; for details type 'show w'. This is free + software, and you are welcome to redistribute it under certain conditions; + type 'show c' for details. + +The hypothetical commands 'show w' and 'show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may be +called something other than 'show w' and 'show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your school, +if any, to sign a "copyright disclaimer" for the program, if necessary. Here +is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + 'Gnomovision' (which makes passes at compilers) written by James Hacker. + + signature of Ty Coon, 1 April 1989 + + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General Public +License instead of this License. + + +"CLASSPATH" EXCEPTION TO THE GPL + +Certain source files distributed by Oracle America and/or its affiliates are +subject to the following clarification and special exception to the GPL, but +only where Oracle has expressly included in the particular source file's header +the words "Oracle designates this particular file as subject to the "Classpath" +exception as provided by Oracle in the LICENSE file that accompanied this code." + + Linking this library statically or dynamically with other modules is making + a combined work based on this library. Thus, the terms and conditions of + the GNU General Public License cover the whole combination. + + As a special exception, the copyright holders of this library give you + permission to link this library with independent modules to produce an + executable, regardless of the license terms of these independent modules, + and to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent module, + the terms and conditions of the license of that module. An independent + module is a module which is not derived from or based on this library. If + you modify this library, you may extend this exception to your version of + the library, but you are not obligated to do so. If you do not wish to do + so, delete this exception statement from your version. diff --git a/static/41/javadoc/legal/jquery.md b/static/41/javadoc/legal/jquery.md new file mode 100644 index 000000000..a763ec6f1 --- /dev/null +++ b/static/41/javadoc/legal/jquery.md @@ -0,0 +1,26 @@ +## jQuery v3.7.1 + +### jQuery License +``` +jQuery v 3.7.1 +Copyright OpenJS Foundation and other contributors, https://openjsf.org/ + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +``` diff --git a/static/41/javadoc/legal/jqueryUI.md b/static/41/javadoc/legal/jqueryUI.md new file mode 100644 index 000000000..8bda9d7a8 --- /dev/null +++ b/static/41/javadoc/legal/jqueryUI.md @@ -0,0 +1,49 @@ +## jQuery UI v1.13.2 + +### jQuery UI License +``` +Copyright jQuery Foundation and other contributors, https://jquery.org/ + +This software consists of voluntary contributions made by many +individuals. For exact contribution history, see the revision history +available at https://github.com/jquery/jquery-ui + +The following license applies to all parts of this software except as +documented below: + +==== + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +==== + +Copyright and related rights for sample code are waived via CC0. Sample +code is defined as all source code contained within the demos directory. + +CC0: http://creativecommons.org/publicdomain/zero/1.0/ + +==== + +All files located in the node_modules and external directories are +externally maintained libraries used by this software which have their +own licenses; we recommend you read them, as their terms may differ from +the terms above. + +``` diff --git a/static/41/javadoc/member-search-index.js b/static/41/javadoc/member-search-index.js new file mode 100644 index 000000000..b2f19089c --- /dev/null +++ b/static/41/javadoc/member-search-index.js @@ -0,0 +1 @@ +memberSearchIndex = [{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerLoginModule","l":"abort()"},{"p":"org.apache.kafka.common.security.plain","c":"PlainLoginModule","l":"abort()"},{"p":"org.apache.kafka.common.security.scram","c":"ScramLoginModule","l":"abort()"},{"p":"org.apache.kafka.clients.producer","c":"KafkaProducer","l":"abortTransaction()"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"abortTransaction()"},{"p":"org.apache.kafka.clients.producer","c":"Producer","l":"abortTransaction()"},{"p":"org.apache.kafka.connect.source","c":"TransactionContext","l":"abortTransaction()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"abortTransaction(AbortTransactionSpec)","u":"abortTransaction(org.apache.kafka.clients.admin.AbortTransactionSpec)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"abortTransaction(AbortTransactionSpec, AbortTransactionOptions)","u":"abortTransaction(org.apache.kafka.clients.admin.AbortTransactionSpec,org.apache.kafka.clients.admin.AbortTransactionOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"abortTransaction(AbortTransactionSpec, AbortTransactionOptions)","u":"abortTransaction(org.apache.kafka.clients.admin.AbortTransactionSpec,org.apache.kafka.clients.admin.AbortTransactionOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"abortTransaction(AbortTransactionSpec, AbortTransactionOptions)","u":"abortTransaction(org.apache.kafka.clients.admin.AbortTransactionSpec,org.apache.kafka.clients.admin.AbortTransactionOptions)"},{"p":"org.apache.kafka.connect.source","c":"TransactionContext","l":"abortTransaction(SourceRecord)","u":"abortTransaction(org.apache.kafka.connect.source.SourceRecord)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"abortTransactionException"},{"p":"org.apache.kafka.clients.admin","c":"AbortTransactionOptions","l":"AbortTransactionOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"AbortTransactionSpec","l":"AbortTransactionSpec(TopicPartition, long, short, int)","u":"%3Cinit%3E(org.apache.kafka.common.TopicPartition,long,short,int)"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"AbstractConfig(ConfigDef, Map)","u":"%3Cinit%3E(org.apache.kafka.common.config.ConfigDef,java.util.Map)"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"AbstractConfig(ConfigDef, Map, boolean)","u":"%3Cinit%3E(org.apache.kafka.common.config.ConfigDef,java.util.Map,boolean)"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"AbstractConfig(ConfigDef, Map, Map, boolean)","u":"%3Cinit%3E(org.apache.kafka.common.config.ConfigDef,java.util.Map,java.util.Map,boolean)"},{"p":"org.apache.kafka.clients.admin","c":"AbstractOptions","l":"AbstractOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.health","c":"AbstractState","l":"AbstractState(String, String, String)","u":"%3Cinit%3E(java.lang.String,java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"AcknowledgeType","l":"ACCEPT"},{"p":"org.apache.kafka.common","c":"KafkaFuture.BiConsumer","l":"accept(A, B)","u":"accept(A,B)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"ACCEPTABLE_RECOVERY_LAG_CONFIG"},{"p":"org.apache.kafka.common.metrics","c":"Quota","l":"acceptable(double)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"AssignmentConfigs","l":"acceptableRecoveryLag()"},{"p":"org.apache.kafka.streams.state","c":"QueryableStoreType","l":"accepts(StateStore)","u":"accepts(org.apache.kafka.streams.processor.StateStore)"},{"p":"org.apache.kafka.streams.state","c":"QueryableStoreTypes.SessionStoreType","l":"accepts(StateStore)","u":"accepts(org.apache.kafka.streams.processor.StateStore)"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntry","l":"AccessControlEntry(String, String, AclOperation, AclPermissionType)","u":"%3Cinit%3E(java.lang.String,java.lang.String,org.apache.kafka.common.acl.AclOperation,org.apache.kafka.common.acl.AclPermissionType)"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntryFilter","l":"AccessControlEntryFilter(String, String, AclOperation, AclPermissionType)","u":"%3Cinit%3E(java.lang.String,java.lang.String,org.apache.kafka.common.acl.AclOperation,org.apache.kafka.common.acl.AclPermissionType)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaShareConsumer","l":"acknowledge(ConsumerRecord)","u":"acknowledge(org.apache.kafka.clients.consumer.ConsumerRecord)"},{"p":"org.apache.kafka.clients.consumer","c":"MockShareConsumer","l":"acknowledge(ConsumerRecord)","u":"acknowledge(org.apache.kafka.clients.consumer.ConsumerRecord)"},{"p":"org.apache.kafka.clients.consumer","c":"ShareConsumer","l":"acknowledge(ConsumerRecord)","u":"acknowledge(org.apache.kafka.clients.consumer.ConsumerRecord)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaShareConsumer","l":"acknowledge(ConsumerRecord, AcknowledgeType)","u":"acknowledge(org.apache.kafka.clients.consumer.ConsumerRecord,org.apache.kafka.clients.consumer.AcknowledgeType)"},{"p":"org.apache.kafka.clients.consumer","c":"MockShareConsumer","l":"acknowledge(ConsumerRecord, AcknowledgeType)","u":"acknowledge(org.apache.kafka.clients.consumer.ConsumerRecord,org.apache.kafka.clients.consumer.AcknowledgeType)"},{"p":"org.apache.kafka.clients.consumer","c":"ShareConsumer","l":"acknowledge(ConsumerRecord, AcknowledgeType)","u":"acknowledge(org.apache.kafka.clients.consumer.ConsumerRecord,org.apache.kafka.clients.consumer.AcknowledgeType)"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"ACKS_CONFIG"},{"p":"org.apache.kafka.server.authorizer","c":"AclDeleteResult.AclBindingDeleteResult","l":"aclBinding()"},{"p":"org.apache.kafka.common.acl","c":"AclBinding","l":"AclBinding(ResourcePattern, AccessControlEntry)","u":"%3Cinit%3E(org.apache.kafka.common.resource.ResourcePattern,org.apache.kafka.common.acl.AccessControlEntry)"},{"p":"org.apache.kafka.server.authorizer","c":"AclDeleteResult.AclBindingDeleteResult","l":"AclBindingDeleteResult(AclBinding)","u":"%3Cinit%3E(org.apache.kafka.common.acl.AclBinding)"},{"p":"org.apache.kafka.server.authorizer","c":"AclDeleteResult.AclBindingDeleteResult","l":"AclBindingDeleteResult(AclBinding, ApiException)","u":"%3Cinit%3E(org.apache.kafka.common.acl.AclBinding,org.apache.kafka.common.errors.ApiException)"},{"p":"org.apache.kafka.server.authorizer","c":"AclDeleteResult","l":"aclBindingDeleteResults()"},{"p":"org.apache.kafka.common.acl","c":"AclBindingFilter","l":"AclBindingFilter(ResourcePatternFilter, AccessControlEntryFilter)","u":"%3Cinit%3E(org.apache.kafka.common.resource.ResourcePatternFilter,org.apache.kafka.common.acl.AccessControlEntryFilter)"},{"p":"org.apache.kafka.server.authorizer","c":"Authorizer","l":"aclCount()"},{"p":"org.apache.kafka.server.authorizer","c":"AclCreateResult","l":"AclCreateResult(ApiException)","u":"%3Cinit%3E(org.apache.kafka.common.errors.ApiException)"},{"p":"org.apache.kafka.server.authorizer","c":"AclDeleteResult","l":"AclDeleteResult(ApiException)","u":"%3Cinit%3E(org.apache.kafka.common.errors.ApiException)"},{"p":"org.apache.kafka.server.authorizer","c":"AclDeleteResult","l":"AclDeleteResult(Collection)","u":"%3Cinit%3E(java.util.Collection)"},{"p":"org.apache.kafka.server.authorizer","c":"Authorizer","l":"acls(AclBindingFilter)","u":"acls(org.apache.kafka.common.acl.AclBindingFilter)"},{"p":"org.apache.kafka.server.authorizer","c":"Action","l":"Action(AclOperation, ResourcePattern, int, boolean, boolean)","u":"%3Cinit%3E(org.apache.kafka.common.acl.AclOperation,org.apache.kafka.common.resource.ResourcePattern,int,boolean,boolean)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsAssignment.AssignedTask.Type","l":"ACTIVE"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignor.AssignmentError","l":"ACTIVE_TASK_ASSIGNED_MULTIPLE_TIMES"},{"p":"org.apache.kafka.streams","c":"KeyQueryMetadata","l":"activeHost()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeProducersResult.PartitionProducerState","l":"activeProducers()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberAssignment","l":"activeTasks()"},{"p":"org.apache.kafka.streams","c":"ThreadMetadata","l":"activeTasks()"},{"p":"org.apache.kafka.common.metrics","c":"Sensor","l":"add(CompoundStat)","u":"add(org.apache.kafka.common.metrics.CompoundStat)"},{"p":"org.apache.kafka.common.metrics","c":"Sensor","l":"add(CompoundStat, MetricConfig)","u":"add(org.apache.kafka.common.metrics.CompoundStat,org.apache.kafka.common.metrics.MetricConfig)"},{"p":"org.apache.kafka.common.header","c":"Headers","l":"add(Header)","u":"add(org.apache.kafka.common.header.Header)"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"add(Header)","u":"add(org.apache.kafka.connect.header.Header)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"add(Header)","u":"add(org.apache.kafka.connect.header.Header)"},{"p":"org.apache.kafka.common.metrics","c":"Sensor","l":"add(MetricName, MeasurableStat)","u":"add(org.apache.kafka.common.MetricName,org.apache.kafka.common.metrics.MeasurableStat)"},{"p":"org.apache.kafka.common.metrics","c":"Sensor","l":"add(MetricName, MeasurableStat, MetricConfig)","u":"add(org.apache.kafka.common.MetricName,org.apache.kafka.common.metrics.MeasurableStat,org.apache.kafka.common.metrics.MetricConfig)"},{"p":"org.apache.kafka.common.header","c":"Headers","l":"add(String, byte[])","u":"add(java.lang.String,byte[])"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"add(String, Object, Schema)","u":"add(java.lang.String,java.lang.Object,org.apache.kafka.connect.data.Schema)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"add(String, Object, Schema)","u":"add(java.lang.String,java.lang.Object,org.apache.kafka.connect.data.Schema)"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"add(String, SchemaAndValue)","u":"add(java.lang.String,org.apache.kafka.connect.data.SchemaAndValue)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"add(String, SchemaAndValue)","u":"add(java.lang.String,org.apache.kafka.connect.data.SchemaAndValue)"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"addBoolean(String, boolean)","u":"addBoolean(java.lang.String,boolean)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"addBoolean(String, boolean)","u":"addBoolean(java.lang.String,boolean)"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"addByte(String, byte)","u":"addByte(java.lang.String,byte)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"addByte(String, byte)","u":"addByte(java.lang.String,byte)"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"addBytes(String, byte[])","u":"addBytes(java.lang.String,byte[])"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"addBytes(String, byte[])","u":"addBytes(java.lang.String,byte[])"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"addClientSaslSupport(ConfigDef)","u":"addClientSaslSupport(org.apache.kafka.common.config.ConfigDef)"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"addClientSslSupport(ConfigDef)","u":"addClientSslSupport(org.apache.kafka.common.config.ConfigDef)"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"addDate(String, Date)","u":"addDate(java.lang.String,java.util.Date)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"addDate(String, Date)","u":"addDate(java.lang.String,java.util.Date)"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"addDecimal(String, BigDecimal)","u":"addDecimal(java.lang.String,java.math.BigDecimal)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"addDecimal(String, BigDecimal)","u":"addDecimal(java.lang.String,java.math.BigDecimal)"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"addDouble(String, double)","u":"addDouble(java.lang.String,double)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"addDouble(String, double)","u":"addDouble(java.lang.String,double)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"addedMetrics()"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"addedMetrics()"},{"p":"org.apache.kafka.common.config","c":"ConfigValue","l":"addErrorMessage(String)","u":"addErrorMessage(java.lang.String)"},{"p":"org.apache.kafka.streams.query","c":"QueryResult","l":"addExecutionInfo(String)","u":"addExecutionInfo(java.lang.String)"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"addFloat(String, float)","u":"addFloat(java.lang.String,float)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"addFloat(String, float)","u":"addFloat(java.lang.String,float)"},{"p":"org.apache.kafka.streams","c":"StreamsBuilder","l":"addGlobalStore(StoreBuilder, String, Consumed, ProcessorSupplier)","u":"addGlobalStore(org.apache.kafka.streams.state.StoreBuilder,java.lang.String,org.apache.kafka.streams.kstream.Consumed,org.apache.kafka.streams.processor.api.ProcessorSupplier)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addGlobalStore(StoreBuilder, String, Deserializer, Deserializer, String, String, ProcessorSupplier)","u":"addGlobalStore(org.apache.kafka.streams.state.StoreBuilder,java.lang.String,org.apache.kafka.common.serialization.Deserializer,org.apache.kafka.common.serialization.Deserializer,java.lang.String,java.lang.String,org.apache.kafka.streams.processor.api.ProcessorSupplier)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addGlobalStore(StoreBuilder, String, TimestampExtractor, Deserializer, Deserializer, String, String, ProcessorSupplier)","u":"addGlobalStore(org.apache.kafka.streams.state.StoreBuilder,java.lang.String,org.apache.kafka.streams.processor.TimestampExtractor,org.apache.kafka.common.serialization.Deserializer,org.apache.kafka.common.serialization.Deserializer,java.lang.String,java.lang.String,org.apache.kafka.streams.processor.api.ProcessorSupplier)"},{"p":"org.apache.kafka.clients.admin","c":"PartitionReassignment","l":"addingReplicas()"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"addInt(String, int)","u":"addInt(java.lang.String,int)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"addInt(String, int)","u":"addInt(java.lang.String,int)"},{"p":"org.apache.kafka.streams","c":"StreamsMetrics","l":"addLatencyRateTotalSensor(String, String, String, Sensor.RecordingLevel, String...)","u":"addLatencyRateTotalSensor(java.lang.String,java.lang.String,java.lang.String,org.apache.kafka.common.metrics.Sensor.RecordingLevel,java.lang.String...)"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"addList(String, List, Schema)","u":"addList(java.lang.String,java.util.List,org.apache.kafka.connect.data.Schema)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"addList(String, List, Schema)","u":"addList(java.lang.String,java.util.List,org.apache.kafka.connect.data.Schema)"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"addLong(String, long)","u":"addLong(java.lang.String,long)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"addLong(String, long)","u":"addLong(java.lang.String,long)"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"addMap(String, Map, Schema)","u":"addMap(java.lang.String,java.util.Map,org.apache.kafka.connect.data.Schema)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"addMap(String, Map, Schema)","u":"addMap(java.lang.String,java.util.Map,org.apache.kafka.connect.data.Schema)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"addMetric(MetricName, Measurable)","u":"addMetric(org.apache.kafka.common.MetricName,org.apache.kafka.common.metrics.Measurable)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"addMetric(MetricName, MetricConfig, Measurable)","u":"addMetric(org.apache.kafka.common.MetricName,org.apache.kafka.common.metrics.MetricConfig,org.apache.kafka.common.metrics.Measurable)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"addMetric(MetricName, MetricConfig, MetricValueProvider)","u":"addMetric(org.apache.kafka.common.MetricName,org.apache.kafka.common.metrics.MetricConfig,org.apache.kafka.common.metrics.MetricValueProvider)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"addMetric(MetricName, MetricValueProvider)","u":"addMetric(org.apache.kafka.common.MetricName,org.apache.kafka.common.metrics.MetricValueProvider)"},{"p":"org.apache.kafka.common.metrics","c":"PluginMetrics","l":"addMetric(MetricName, MetricValueProvider)","u":"addMetric(org.apache.kafka.common.MetricName,org.apache.kafka.common.metrics.MetricValueProvider)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"addMetricIfAbsent(MetricName, MetricConfig, MetricValueProvider)","u":"addMetricIfAbsent(org.apache.kafka.common.MetricName,org.apache.kafka.common.metrics.MetricConfig,org.apache.kafka.common.metrics.MetricValueProvider)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addProcessor(String, ProcessorSupplier, String...)","u":"addProcessor(java.lang.String,org.apache.kafka.streams.processor.api.ProcessorSupplier,java.lang.String...)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"addRaftVoter(int, Uuid, Set)","u":"addRaftVoter(int,org.apache.kafka.common.Uuid,java.util.Set)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"addRaftVoter(int, Uuid, Set, AddRaftVoterOptions)","u":"addRaftVoter(int,org.apache.kafka.common.Uuid,java.util.Set,org.apache.kafka.clients.admin.AddRaftVoterOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"addRaftVoter(int, Uuid, Set, AddRaftVoterOptions)","u":"addRaftVoter(int,org.apache.kafka.common.Uuid,java.util.Set,org.apache.kafka.clients.admin.AddRaftVoterOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"addRaftVoter(int, Uuid, Set, AddRaftVoterOptions)","u":"addRaftVoter(int,org.apache.kafka.common.Uuid,java.util.Set,org.apache.kafka.clients.admin.AddRaftVoterOptions)"},{"p":"org.apache.kafka.clients.admin","c":"AddRaftVoterOptions","l":"AddRaftVoterOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams","c":"StreamsMetrics","l":"addRateTotalSensor(String, String, String, Sensor.RecordingLevel, String...)","u":"addRateTotalSensor(java.lang.String,java.lang.String,java.lang.String,org.apache.kafka.common.metrics.Sensor.RecordingLevel,java.lang.String...)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addReadOnlyStateStore(StoreBuilder, String, Deserializer, Deserializer, String, String, ProcessorSupplier)","u":"addReadOnlyStateStore(org.apache.kafka.streams.state.StoreBuilder,java.lang.String,org.apache.kafka.common.serialization.Deserializer,org.apache.kafka.common.serialization.Deserializer,java.lang.String,java.lang.String,org.apache.kafka.streams.processor.api.ProcessorSupplier)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addReadOnlyStateStore(StoreBuilder, String, TimestampExtractor, Deserializer, Deserializer, String, String, ProcessorSupplier)","u":"addReadOnlyStateStore(org.apache.kafka.streams.state.StoreBuilder,java.lang.String,org.apache.kafka.streams.processor.TimestampExtractor,org.apache.kafka.common.serialization.Deserializer,org.apache.kafka.common.serialization.Deserializer,java.lang.String,java.lang.String,org.apache.kafka.streams.processor.api.ProcessorSupplier)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"addRecord(ConsumerRecord)","u":"addRecord(org.apache.kafka.clients.consumer.ConsumerRecord)"},{"p":"org.apache.kafka.clients.consumer","c":"MockShareConsumer","l":"addRecord(ConsumerRecord)","u":"addRecord(org.apache.kafka.clients.consumer.ConsumerRecord)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogMetadataManager","l":"addRemoteLogSegmentMetadata(RemoteLogSegmentMetadata)","u":"addRemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"addReporter(MetricsReporter)","u":"addReporter(org.apache.kafka.common.metrics.MetricsReporter)"},{"p":"org.apache.kafka.streams.query","c":"StateQueryResult","l":"addResult(int, QueryResult)","u":"addResult(int,org.apache.kafka.streams.query.QueryResult)"},{"p":"org.apache.kafka.common.metrics","c":"PluginMetrics","l":"addSensor(String)","u":"addSensor(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"StreamsMetrics","l":"addSensor(String, Sensor.RecordingLevel)","u":"addSensor(java.lang.String,org.apache.kafka.common.metrics.Sensor.RecordingLevel)"},{"p":"org.apache.kafka.streams","c":"StreamsMetrics","l":"addSensor(String, Sensor.RecordingLevel, Sensor...)","u":"addSensor(java.lang.String,org.apache.kafka.common.metrics.Sensor.RecordingLevel,org.apache.kafka.common.metrics.Sensor...)"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"addShort(String, short)","u":"addShort(java.lang.String,short)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"addShort(String, short)","u":"addShort(java.lang.String,short)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSink(String, String, Serializer, Serializer, StreamPartitioner, String...)","u":"addSink(java.lang.String,java.lang.String,org.apache.kafka.common.serialization.Serializer,org.apache.kafka.common.serialization.Serializer,org.apache.kafka.streams.processor.StreamPartitioner,java.lang.String...)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSink(String, String, Serializer, Serializer, String...)","u":"addSink(java.lang.String,java.lang.String,org.apache.kafka.common.serialization.Serializer,org.apache.kafka.common.serialization.Serializer,java.lang.String...)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSink(String, String, StreamPartitioner, String...)","u":"addSink(java.lang.String,java.lang.String,org.apache.kafka.streams.processor.StreamPartitioner,java.lang.String...)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSink(String, String, String...)","u":"addSink(java.lang.String,java.lang.String,java.lang.String...)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSink(String, TopicNameExtractor, Serializer, Serializer, StreamPartitioner, String...)","u":"addSink(java.lang.String,org.apache.kafka.streams.processor.TopicNameExtractor,org.apache.kafka.common.serialization.Serializer,org.apache.kafka.common.serialization.Serializer,org.apache.kafka.streams.processor.StreamPartitioner,java.lang.String...)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSink(String, TopicNameExtractor, Serializer, Serializer, String...)","u":"addSink(java.lang.String,org.apache.kafka.streams.processor.TopicNameExtractor,org.apache.kafka.common.serialization.Serializer,org.apache.kafka.common.serialization.Serializer,java.lang.String...)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSink(String, TopicNameExtractor, StreamPartitioner, String...)","u":"addSink(java.lang.String,org.apache.kafka.streams.processor.TopicNameExtractor,org.apache.kafka.streams.processor.StreamPartitioner,java.lang.String...)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSink(String, TopicNameExtractor, String...)","u":"addSink(java.lang.String,org.apache.kafka.streams.processor.TopicNameExtractor,java.lang.String...)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(AutoOffsetReset, String, Deserializer, Deserializer, Pattern)","u":"addSource(org.apache.kafka.streams.AutoOffsetReset,java.lang.String,org.apache.kafka.common.serialization.Deserializer,org.apache.kafka.common.serialization.Deserializer,java.util.regex.Pattern)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(AutoOffsetReset, String, Deserializer, Deserializer, String...)","u":"addSource(org.apache.kafka.streams.AutoOffsetReset,java.lang.String,org.apache.kafka.common.serialization.Deserializer,org.apache.kafka.common.serialization.Deserializer,java.lang.String...)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(AutoOffsetReset, String, Pattern)","u":"addSource(org.apache.kafka.streams.AutoOffsetReset,java.lang.String,java.util.regex.Pattern)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(AutoOffsetReset, String, String...)","u":"addSource(org.apache.kafka.streams.AutoOffsetReset,java.lang.String,java.lang.String...)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(AutoOffsetReset, String, TimestampExtractor, Deserializer, Deserializer, Pattern)","u":"addSource(org.apache.kafka.streams.AutoOffsetReset,java.lang.String,org.apache.kafka.streams.processor.TimestampExtractor,org.apache.kafka.common.serialization.Deserializer,org.apache.kafka.common.serialization.Deserializer,java.util.regex.Pattern)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(AutoOffsetReset, String, TimestampExtractor, Deserializer, Deserializer, String...)","u":"addSource(org.apache.kafka.streams.AutoOffsetReset,java.lang.String,org.apache.kafka.streams.processor.TimestampExtractor,org.apache.kafka.common.serialization.Deserializer,org.apache.kafka.common.serialization.Deserializer,java.lang.String...)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(AutoOffsetReset, TimestampExtractor, String, Pattern)","u":"addSource(org.apache.kafka.streams.AutoOffsetReset,org.apache.kafka.streams.processor.TimestampExtractor,java.lang.String,java.util.regex.Pattern)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(AutoOffsetReset, TimestampExtractor, String, String...)","u":"addSource(org.apache.kafka.streams.AutoOffsetReset,org.apache.kafka.streams.processor.TimestampExtractor,java.lang.String,java.lang.String...)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(String, Deserializer, Deserializer, Pattern)","u":"addSource(java.lang.String,org.apache.kafka.common.serialization.Deserializer,org.apache.kafka.common.serialization.Deserializer,java.util.regex.Pattern)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(String, Deserializer, Deserializer, String...)","u":"addSource(java.lang.String,org.apache.kafka.common.serialization.Deserializer,org.apache.kafka.common.serialization.Deserializer,java.lang.String...)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(String, Pattern)","u":"addSource(java.lang.String,java.util.regex.Pattern)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(String, String...)","u":"addSource(java.lang.String,java.lang.String...)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(TimestampExtractor, String, Pattern)","u":"addSource(org.apache.kafka.streams.processor.TimestampExtractor,java.lang.String,java.util.regex.Pattern)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(TimestampExtractor, String, String...)","u":"addSource(org.apache.kafka.streams.processor.TimestampExtractor,java.lang.String,java.lang.String...)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(Topology.AutoOffsetReset, String, Deserializer, Deserializer, Pattern)","u":"addSource(org.apache.kafka.streams.Topology.AutoOffsetReset,java.lang.String,org.apache.kafka.common.serialization.Deserializer,org.apache.kafka.common.serialization.Deserializer,java.util.regex.Pattern)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(Topology.AutoOffsetReset, String, Deserializer, Deserializer, String...)","u":"addSource(org.apache.kafka.streams.Topology.AutoOffsetReset,java.lang.String,org.apache.kafka.common.serialization.Deserializer,org.apache.kafka.common.serialization.Deserializer,java.lang.String...)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(Topology.AutoOffsetReset, String, Pattern)","u":"addSource(org.apache.kafka.streams.Topology.AutoOffsetReset,java.lang.String,java.util.regex.Pattern)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(Topology.AutoOffsetReset, String, String...)","u":"addSource(org.apache.kafka.streams.Topology.AutoOffsetReset,java.lang.String,java.lang.String...)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(Topology.AutoOffsetReset, String, TimestampExtractor, Deserializer, Deserializer, Pattern)","u":"addSource(org.apache.kafka.streams.Topology.AutoOffsetReset,java.lang.String,org.apache.kafka.streams.processor.TimestampExtractor,org.apache.kafka.common.serialization.Deserializer,org.apache.kafka.common.serialization.Deserializer,java.util.regex.Pattern)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(Topology.AutoOffsetReset, String, TimestampExtractor, Deserializer, Deserializer, String...)","u":"addSource(org.apache.kafka.streams.Topology.AutoOffsetReset,java.lang.String,org.apache.kafka.streams.processor.TimestampExtractor,org.apache.kafka.common.serialization.Deserializer,org.apache.kafka.common.serialization.Deserializer,java.lang.String...)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(Topology.AutoOffsetReset, TimestampExtractor, String, Pattern)","u":"addSource(org.apache.kafka.streams.Topology.AutoOffsetReset,org.apache.kafka.streams.processor.TimestampExtractor,java.lang.String,java.util.regex.Pattern)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addSource(Topology.AutoOffsetReset, TimestampExtractor, String, String...)","u":"addSource(org.apache.kafka.streams.Topology.AutoOffsetReset,org.apache.kafka.streams.processor.TimestampExtractor,java.lang.String,java.lang.String...)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"addStateStore(S)"},{"p":"org.apache.kafka.streams","c":"StreamsBuilder","l":"addStateStore(StoreBuilder)","u":"addStateStore(org.apache.kafka.streams.state.StoreBuilder)"},{"p":"org.apache.kafka.streams","c":"Topology","l":"addStateStore(StoreBuilder, String...)","u":"addStateStore(org.apache.kafka.streams.state.StoreBuilder,java.lang.String...)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"addStreamThread()"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"addString(String, String)","u":"addString(java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"addString(String, String)","u":"addString(java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"addStruct(String, Struct)","u":"addStruct(java.lang.String,org.apache.kafka.connect.data.Struct)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"addStruct(String, Struct)","u":"addStruct(java.lang.String,org.apache.kafka.connect.data.Struct)"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"addTime(String, Date)","u":"addTime(java.lang.String,java.util.Date)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"addTime(String, Date)","u":"addTime(java.lang.String,java.util.Date)"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"addTimestamp(String, Date)","u":"addTimestamp(java.lang.String,java.util.Date)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"addTimestamp(String, Date)","u":"addTimestamp(java.lang.String,java.util.Date)"},{"p":"org.apache.kafka.connect.connector.policy","c":"ConnectorClientConfigRequest.ClientType","l":"ADMIN"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClientConfig","l":"ADMIN_CLIENT_PREFIX"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"ADMIN_CLIENT_PREFIX"},{"p":"org.apache.kafka.clients.admin","c":"AdminClient","l":"AdminClient()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"AdminClientConfig(Map)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.streams","c":"ThreadMetadata","l":"adminClientId()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"adminClientPrefix(String)","u":"adminClientPrefix(java.lang.String)"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClientConfig","l":"adminConfig()"},{"p":"org.apache.kafka.streams","c":"ClientInstanceIds","l":"adminInstanceId()"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindows","l":"advanceBy(Duration)","u":"advanceBy(java.time.Duration)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindows","l":"advanceMs"},{"p":"org.apache.kafka.streams","c":"TestInputTopic","l":"advanceTime(Duration)","u":"advanceTime(java.time.Duration)"},{"p":"org.apache.kafka.streams","c":"TopologyTestDriver","l":"advanceWallClockTime(Duration)","u":"advanceWallClockTime(java.time.Duration)"},{"p":"org.apache.kafka.streams.kstream","c":"JoinWindows","l":"after(Duration)","u":"after(java.time.Duration)"},{"p":"org.apache.kafka.streams.kstream","c":"JoinWindows","l":"afterMs"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedCogroupedKStream","l":"aggregate(Initializer)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedCogroupedKStream","l":"aggregate(Initializer, Materialized>)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedCogroupedKStream","l":"aggregate(Initializer, Merger)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Merger)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedCogroupedKStream","l":"aggregate(Initializer, Merger, Materialized>)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Merger,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedCogroupedKStream","l":"aggregate(Initializer, Merger, Named)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Merger,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedCogroupedKStream","l":"aggregate(Initializer, Merger, Named, Materialized>)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Merger,org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedCogroupedKStream","l":"aggregate(Initializer, Named)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedCogroupedKStream","l":"aggregate(Initializer, Named, Materialized>)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"CogroupedKStream","l":"aggregate(Initializer)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer)"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedStream","l":"aggregate(Initializer, Aggregator)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Aggregator)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedKStream","l":"aggregate(Initializer, Aggregator)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Aggregator)"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedStream","l":"aggregate(Initializer, Aggregator, Materialized>)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Aggregator,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedKStream","l":"aggregate(Initializer, Aggregator, Materialized>)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Aggregator,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedKStream","l":"aggregate(Initializer, Aggregator, Merger)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Aggregator,org.apache.kafka.streams.kstream.Merger)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedKStream","l":"aggregate(Initializer, Aggregator, Merger, Materialized>)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Aggregator,org.apache.kafka.streams.kstream.Merger,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedKStream","l":"aggregate(Initializer, Aggregator, Merger, Named)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Aggregator,org.apache.kafka.streams.kstream.Merger,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedKStream","l":"aggregate(Initializer, Aggregator, Merger, Named, Materialized>)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Aggregator,org.apache.kafka.streams.kstream.Merger,org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedKStream","l":"aggregate(Initializer, Aggregator, Named)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Aggregator,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedStream","l":"aggregate(Initializer, Aggregator, Named, Materialized>)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Aggregator,org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedKStream","l":"aggregate(Initializer, Aggregator, Named, Materialized>)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Aggregator,org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"CogroupedKStream","l":"aggregate(Initializer, Materialized>)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"CogroupedKStream","l":"aggregate(Initializer, Named)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"CogroupedKStream","l":"aggregate(Initializer, Named, Materialized>)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedTable","l":"aggregate(Initializer, Aggregator, Aggregator)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Aggregator,org.apache.kafka.streams.kstream.Aggregator)"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedTable","l":"aggregate(Initializer, Aggregator, Aggregator, Materialized>)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Aggregator,org.apache.kafka.streams.kstream.Aggregator,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedTable","l":"aggregate(Initializer, Aggregator, Aggregator, Named)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Aggregator,org.apache.kafka.streams.kstream.Aggregator,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedTable","l":"aggregate(Initializer, Aggregator, Aggregator, Named, Materialized>)","u":"aggregate(org.apache.kafka.streams.kstream.Initializer,org.apache.kafka.streams.kstream.Aggregator,org.apache.kafka.streams.kstream.Aggregator,org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"ALL"},{"p":"org.apache.kafka.clients.admin","c":"AbortTransactionResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"AddRaftVoterResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"AlterClientQuotasResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"AlterConfigsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"AlterConsumerGroupOffsetsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"AlterPartitionReassignmentsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"AlterReplicaLogDirsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"AlterShareGroupOffsetsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"AlterStreamsGroupOffsetsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"AlterUserScramCredentialsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"CreateAclsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"CreatePartitionsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"CreateTopicsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"DeleteAclsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"DeleteConsumerGroupOffsetsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"DeleteConsumerGroupsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"DeleteRecordsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"DeleteShareGroupOffsetsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"DeleteShareGroupsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"DeleteStreamsGroupOffsetsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"DeleteStreamsGroupsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"DeleteTopicsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeClassicGroupsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeConfigsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeConsumerGroupsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeProducersResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeReplicaLogDirsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeShareGroupsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeStreamsGroupsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeTransactionsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeUserScramCredentialsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"ElectLeadersResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"FenceProducersResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"ListClientMetricsResourcesResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"ListConfigResourcesResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupOffsetsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"ListGroupsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"ListOffsetsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"ListShareGroupOffsetsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"ListStreamsGroupOffsetsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"ListTransactionsResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"RemoveMembersFromConsumerGroupResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"RemoveRaftVoterResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"UnregisterBrokerResult","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"UpdateFeaturesResult","l":"all()"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaFilter","l":"all()"},{"p":"org.apache.kafka.streams.processor","c":"To","l":"all()"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlyKeyValueStore","l":"all()"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlyWindowStore","l":"all()"},{"p":"org.apache.kafka.clients.admin","c":"ListTransactionsResult","l":"allByBrokerId()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeLogDirsResult","l":"allDescriptions()"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"allLocalStorePartitionLags()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"allMetrics()"},{"p":"org.apache.kafka.common","c":"KafkaFuture","l":"allOf(KafkaFuture...)","u":"allOf(org.apache.kafka.common.KafkaFuture...)"},{"p":"org.apache.kafka.common.acl","c":"AclPermissionType","l":"ALLOW"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"ALLOW_AUTO_CREATE_TOPICS_CONFIG"},{"p":"org.apache.kafka.server.authorizer","c":"AuthorizationResult","l":"ALLOWED"},{"p":"org.apache.kafka.common.config.provider","c":"DirectoryConfigProvider","l":"ALLOWED_PATHS_CONFIG"},{"p":"org.apache.kafka.common.config.provider","c":"FileConfigProvider","l":"ALLOWED_PATHS_CONFIG"},{"p":"org.apache.kafka.common.config.provider","c":"DirectoryConfigProvider","l":"ALLOWED_PATHS_DOC"},{"p":"org.apache.kafka.common.config.provider","c":"FileConfigProvider","l":"ALLOWED_PATHS_DOC"},{"p":"org.apache.kafka.common.config.provider","c":"EnvVarConfigProvider","l":"ALLOWLIST_PATTERN_CONFIG"},{"p":"org.apache.kafka.common.config.provider","c":"EnvVarConfigProvider","l":"ALLOWLIST_PATTERN_CONFIG_DOC"},{"p":"org.apache.kafka.clients.admin","c":"AlterPartitionReassignmentsOptions","l":"allowReplicationFactorChange()"},{"p":"org.apache.kafka.clients.admin","c":"AlterPartitionReassignmentsOptions","l":"allowReplicationFactorChange(boolean)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"ApplicationState","l":"allTasks()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeTopicsResult","l":"allTopicIds()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeTopicsResult","l":"allTopicNames()"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"allWithName(String)","u":"allWithName(java.lang.String)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"allWithName(String)","u":"allWithName(java.lang.String)"},{"p":"org.apache.kafka.connect.errors","c":"AlreadyExistsException","l":"AlreadyExistsException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.connect.errors","c":"AlreadyExistsException","l":"AlreadyExistsException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.connect.errors","c":"AlreadyExistsException","l":"AlreadyExistsException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"ALTER"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"ALTER_CONFIGS"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"alterClientQuotas(Collection)","u":"alterClientQuotas(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"alterClientQuotas(Collection, AlterClientQuotasOptions)","u":"alterClientQuotas(java.util.Collection,org.apache.kafka.clients.admin.AlterClientQuotasOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"alterClientQuotas(Collection, AlterClientQuotasOptions)","u":"alterClientQuotas(java.util.Collection,org.apache.kafka.clients.admin.AlterClientQuotasOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"alterClientQuotas(Collection, AlterClientQuotasOptions)","u":"alterClientQuotas(java.util.Collection,org.apache.kafka.clients.admin.AlterClientQuotasOptions)"},{"p":"org.apache.kafka.clients.admin","c":"AlterClientQuotasOptions","l":"AlterClientQuotasOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"AlterClientQuotasResult","l":"AlterClientQuotasResult(Map>)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"AlterConfigOp","l":"AlterConfigOp(ConfigEntry, AlterConfigOp.OpType)","u":"%3Cinit%3E(org.apache.kafka.clients.admin.ConfigEntry,org.apache.kafka.clients.admin.AlterConfigOp.OpType)"},{"p":"org.apache.kafka.clients.admin","c":"AlterConfigsOptions","l":"AlterConfigsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"alterConsumerGroupOffsets(String, Map)","u":"alterConsumerGroupOffsets(java.lang.String,java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"alterConsumerGroupOffsets(String, Map, AlterConsumerGroupOffsetsOptions)","u":"alterConsumerGroupOffsets(java.lang.String,java.util.Map,org.apache.kafka.clients.admin.AlterConsumerGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"alterConsumerGroupOffsets(String, Map, AlterConsumerGroupOffsetsOptions)","u":"alterConsumerGroupOffsets(java.lang.String,java.util.Map,org.apache.kafka.clients.admin.AlterConsumerGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"alterConsumerGroupOffsets(String, Map, AlterConsumerGroupOffsetsOptions)","u":"alterConsumerGroupOffsets(java.lang.String,java.util.Map,org.apache.kafka.clients.admin.AlterConsumerGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"AlterConsumerGroupOffsetsOptions","l":"AlterConsumerGroupOffsetsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ConfigKey","l":"alternativeString"},{"p":"org.apache.kafka.connect.source","c":"SourceConnector","l":"alterOffsets(Map, Map, Map>)","u":"alterOffsets(java.util.Map,java.util.Map)"},{"p":"org.apache.kafka.connect.sink","c":"SinkConnector","l":"alterOffsets(Map, Map)","u":"alterOffsets(java.util.Map,java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"alterPartitionReassignments(Map>)","u":"alterPartitionReassignments(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"alterPartitionReassignments(Map>, AlterPartitionReassignmentsOptions)","u":"alterPartitionReassignments(java.util.Map,org.apache.kafka.clients.admin.AlterPartitionReassignmentsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"alterPartitionReassignments(Map>, AlterPartitionReassignmentsOptions)","u":"alterPartitionReassignments(java.util.Map,org.apache.kafka.clients.admin.AlterPartitionReassignmentsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"alterPartitionReassignments(Map>, AlterPartitionReassignmentsOptions)","u":"alterPartitionReassignments(java.util.Map,org.apache.kafka.clients.admin.AlterPartitionReassignmentsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"AlterPartitionReassignmentsOptions","l":"AlterPartitionReassignmentsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"alterReplicaLogDirs(Map)","u":"alterReplicaLogDirs(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"alterReplicaLogDirs(Map, AlterReplicaLogDirsOptions)","u":"alterReplicaLogDirs(java.util.Map,org.apache.kafka.clients.admin.AlterReplicaLogDirsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"alterReplicaLogDirs(Map, AlterReplicaLogDirsOptions)","u":"alterReplicaLogDirs(java.util.Map,org.apache.kafka.clients.admin.AlterReplicaLogDirsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"alterReplicaLogDirs(Map, AlterReplicaLogDirsOptions)","u":"alterReplicaLogDirs(java.util.Map,org.apache.kafka.clients.admin.AlterReplicaLogDirsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"AlterReplicaLogDirsOptions","l":"AlterReplicaLogDirsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"alterShareGroupOffsets(String, Map)","u":"alterShareGroupOffsets(java.lang.String,java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"alterShareGroupOffsets(String, Map, AlterShareGroupOffsetsOptions)","u":"alterShareGroupOffsets(java.lang.String,java.util.Map,org.apache.kafka.clients.admin.AlterShareGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"alterShareGroupOffsets(String, Map, AlterShareGroupOffsetsOptions)","u":"alterShareGroupOffsets(java.lang.String,java.util.Map,org.apache.kafka.clients.admin.AlterShareGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"alterShareGroupOffsets(String, Map, AlterShareGroupOffsetsOptions)","u":"alterShareGroupOffsets(java.lang.String,java.util.Map,org.apache.kafka.clients.admin.AlterShareGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"AlterShareGroupOffsetsOptions","l":"AlterShareGroupOffsetsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"alterStreamsGroupOffsets(String, Map)","u":"alterStreamsGroupOffsets(java.lang.String,java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"alterStreamsGroupOffsets(String, Map, AlterStreamsGroupOffsetsOptions)","u":"alterStreamsGroupOffsets(java.lang.String,java.util.Map,org.apache.kafka.clients.admin.AlterStreamsGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"alterStreamsGroupOffsets(String, Map, AlterStreamsGroupOffsetsOptions)","u":"alterStreamsGroupOffsets(java.lang.String,java.util.Map,org.apache.kafka.clients.admin.AlterStreamsGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"alterStreamsGroupOffsets(String, Map, AlterStreamsGroupOffsetsOptions)","u":"alterStreamsGroupOffsets(java.lang.String,java.util.Map,org.apache.kafka.clients.admin.AlterStreamsGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"AlterStreamsGroupOffsetsOptions","l":"AlterStreamsGroupOffsetsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"alterUserScramCredentials(List)","u":"alterUserScramCredentials(java.util.List)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"alterUserScramCredentials(List, AlterUserScramCredentialsOptions)","u":"alterUserScramCredentials(java.util.List,org.apache.kafka.clients.admin.AlterUserScramCredentialsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"alterUserScramCredentials(List, AlterUserScramCredentialsOptions)","u":"alterUserScramCredentials(java.util.List,org.apache.kafka.clients.admin.AlterUserScramCredentialsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"alterUserScramCredentials(List, AlterUserScramCredentialsOptions)","u":"alterUserScramCredentials(java.util.List,org.apache.kafka.clients.admin.AlterUserScramCredentialsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"AlterUserScramCredentialsOptions","l":"AlterUserScramCredentialsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"AlterUserScramCredentialsResult","l":"AlterUserScramCredentialsResult(Map>)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.common.security.auth","c":"KafkaPrincipal","l":"ANONYMOUS"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"ANY"},{"p":"org.apache.kafka.common.acl","c":"AclPermissionType","l":"ANY"},{"p":"org.apache.kafka.common.resource","c":"PatternType","l":"ANY"},{"p":"org.apache.kafka.common.resource","c":"ResourceType","l":"ANY"},{"p":"org.apache.kafka.streams.query","c":"ResultOrder","l":"ANY"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntryFilter","l":"ANY"},{"p":"org.apache.kafka.common.acl","c":"AclBindingFilter","l":"ANY"},{"p":"org.apache.kafka.common.resource","c":"ResourcePatternFilter","l":"ANY"},{"p":"org.apache.kafka.common.errors","c":"ApiException","l":"ApiException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"ApiException","l":"ApiException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"ApiException","l":"ApiException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"ApiException","l":"ApiException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"appConfigs()"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessingContext","l":"appConfigs()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"appConfigs()"},{"p":"org.apache.kafka.streams.processor","c":"ProcessorContext","l":"appConfigs()"},{"p":"org.apache.kafka.streams.processor","c":"StateStoreContext","l":"appConfigs()"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"appConfigsWithPrefix(String)","u":"appConfigsWithPrefix(java.lang.String)"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessingContext","l":"appConfigsWithPrefix(String)","u":"appConfigsWithPrefix(java.lang.String)"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"appConfigsWithPrefix(String)","u":"appConfigsWithPrefix(java.lang.String)"},{"p":"org.apache.kafka.streams.processor","c":"ProcessorContext","l":"appConfigsWithPrefix(String)","u":"appConfigsWithPrefix(java.lang.String)"},{"p":"org.apache.kafka.streams.processor","c":"StateStoreContext","l":"appConfigsWithPrefix(String)","u":"appConfigsWithPrefix(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"AlterConfigOp.OpType","l":"APPEND"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"appendDeserializerToConfig(Map, Deserializer, Deserializer)","u":"appendDeserializerToConfig(java.util.Map,org.apache.kafka.common.serialization.Deserializer,org.apache.kafka.common.serialization.Deserializer)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"APPLICATION_ID_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"APPLICATION_SERVER_CONFIG"},{"p":"org.apache.kafka.streams","c":"TopologyConfig","l":"applicationConfigs"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"applicationId()"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessingContext","l":"applicationId()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"applicationId()"},{"p":"org.apache.kafka.streams.processor","c":"ProcessorContext","l":"applicationId()"},{"p":"org.apache.kafka.streams.processor","c":"StateStoreContext","l":"applicationId()"},{"p":"org.apache.kafka.common.errors","c":"ApplicationRecoverableException","l":"ApplicationRecoverableException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"ApplicationRecoverableException","l":"ApplicationRecoverableException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"ApplicationRecoverableException","l":"ApplicationRecoverableException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"ApplicationRecoverableException","l":"ApplicationRecoverableException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.streams.kstream","c":"Initializer","l":"apply()"},{"p":"org.apache.kafka.common","c":"KafkaFuture.BaseFunction","l":"apply(A)"},{"p":"org.apache.kafka.connect.header","c":"Headers.HeaderTransform","l":"apply(Header)","u":"apply(org.apache.kafka.connect.header.Header)"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"apply(Headers.HeaderTransform)","u":"apply(org.apache.kafka.connect.header.Headers.HeaderTransform)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"apply(Headers.HeaderTransform)","u":"apply(org.apache.kafka.connect.header.Headers.HeaderTransform)"},{"p":"org.apache.kafka.streams.kstream","c":"ForeachAction","l":"apply(K, V)","u":"apply(K,V)"},{"p":"org.apache.kafka.streams.kstream","c":"KeyValueMapper","l":"apply(K, V)","u":"apply(K,V)"},{"p":"org.apache.kafka.streams.kstream","c":"ValueMapperWithKey","l":"apply(K, V)","u":"apply(K,V)"},{"p":"org.apache.kafka.streams.kstream","c":"Merger","l":"apply(K, V, V)","u":"apply(K,V,V)"},{"p":"org.apache.kafka.streams.kstream","c":"Aggregator","l":"apply(K, V, VAgg)","u":"apply(K,V,VAgg)"},{"p":"org.apache.kafka.streams.kstream","c":"ValueJoinerWithKey","l":"apply(K1, V1, V2)","u":"apply(K1,V1,V2)"},{"p":"org.apache.kafka.connect.transforms","c":"Transformation","l":"apply(R)"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"apply(String, Headers.HeaderTransform)","u":"apply(java.lang.String,org.apache.kafka.connect.header.Headers.HeaderTransform)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"apply(String, Headers.HeaderTransform)","u":"apply(java.lang.String,org.apache.kafka.connect.header.Headers.HeaderTransform)"},{"p":"org.apache.kafka.streams.kstream","c":"ValueMapper","l":"apply(V)"},{"p":"org.apache.kafka.streams.kstream","c":"Reducer","l":"apply(V, V)","u":"apply(V,V)"},{"p":"org.apache.kafka.streams.kstream","c":"ValueJoiner","l":"apply(V1, V2)","u":"apply(V1,V2)"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlyKeyValueStore","l":"approximateNumEntries()"},{"p":"org.apache.kafka.connect.data","c":"Schema.Type","l":"ARRAY"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"array(Schema)","u":"array(org.apache.kafka.connect.data.Schema)"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized","l":"as(DslStoreSuppliers)","u":"as(org.apache.kafka.streams.state.DslStoreSuppliers)"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized","l":"as(KeyValueBytesStoreSupplier)","u":"as(org.apache.kafka.streams.state.KeyValueBytesStoreSupplier)"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized","l":"as(SessionBytesStoreSupplier)","u":"as(org.apache.kafka.streams.state.SessionBytesStoreSupplier)"},{"p":"org.apache.kafka.streams.kstream","c":"Branched","l":"as(String)","u":"as(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Consumed","l":"as(String)","u":"as(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Grouped","l":"as(String)","u":"as(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Joined","l":"as(String)","u":"as(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized","l":"as(String)","u":"as(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Named","l":"as(String)","u":"as(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Produced","l":"as(String)","u":"as(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Repartitioned","l":"as(String)","u":"as(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"StreamJoined","l":"as(String)","u":"as(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"TableJoined","l":"as(String)","u":"as(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized","l":"as(WindowBytesStoreSupplier)","u":"as(org.apache.kafka.streams.state.WindowBytesStoreSupplier)"},{"p":"org.apache.kafka.streams.query","c":"ResultOrder","l":"ASCENDING"},{"p":"org.apache.kafka.streams.query","c":"VersionedKeyQuery","l":"asOf(Instant)","u":"asOf(java.time.Instant)"},{"p":"org.apache.kafka.streams.query","c":"VersionedKeyQuery","l":"asOfTimestamp()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"ASSIGN_FROM_SUBSCRIBED_ASSIGNORS"},{"p":"org.apache.kafka.streams.processor.assignment.assignors","c":"StickyTaskAssignor","l":"assign(ApplicationState)","u":"assign(org.apache.kafka.streams.processor.assignment.ApplicationState)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignor","l":"assign(ApplicationState)","u":"assign(org.apache.kafka.streams.processor.assignment.ApplicationState)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor","l":"assign(Cluster, ConsumerPartitionAssignor.GroupSubscription)","u":"assign(org.apache.kafka.common.Cluster,org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"assign(Collection)","u":"assign(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"assign(Collection)","u":"assign(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"assign(Collection)","u":"assign(java.util.Collection)"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"PartitionAssignor","l":"assign(GroupSpec, SubscribedTopicDescriber)","u":"assign(org.apache.kafka.coordinator.group.api.assignor.GroupSpec,org.apache.kafka.coordinator.group.api.assignor.SubscribedTopicDescriber)"},{"p":"org.apache.kafka.clients.consumer","c":"RangeAssignor","l":"assign(Map, Map)","u":"assign(java.util.Map,java.util.Map)"},{"p":"org.apache.kafka.clients.consumer","c":"RoundRobinAssignor","l":"assign(Map, Map)","u":"assign(java.util.Map,java.util.Map)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsAssignment.AssignedTask","l":"AssignedTask(TaskId, KafkaStreamsAssignment.AssignedTask.Type)","u":"%3Cinit%3E(org.apache.kafka.streams.processor.TaskId,org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment.AssignedTask.Type)"},{"p":"org.apache.kafka.common","c":"ConsumerGroupState","l":"ASSIGNING"},{"p":"org.apache.kafka.common","c":"GroupState","l":"ASSIGNING"},{"p":"org.apache.kafka.streams","c":"StreamsConfig.InternalConfig","l":"ASSIGNMENT_LISTENER"},{"p":"org.apache.kafka.clients.admin","c":"MemberDescription","l":"assignment()"},{"p":"org.apache.kafka.clients.admin","c":"ShareMemberDescription","l":"assignment()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription","l":"assignment()"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"assignment()"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"assignment()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"assignment()"},{"p":"org.apache.kafka.connect.sink","c":"SinkTaskContext","l":"assignment()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignor.TaskAssignment","l":"assignment()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.Assignment","l":"Assignment(List)","u":"%3Cinit%3E(java.util.List)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.Assignment","l":"Assignment(List, ByteBuffer)","u":"%3Cinit%3E(java.util.List,java.nio.ByteBuffer)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"ApplicationState","l":"assignmentConfigs()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"AssignmentConfigs","l":"AssignmentConfigs(long, int, int, long, List, int, int, String)","u":"%3Cinit%3E(long,int,int,long,java.util.List,int,int,java.lang.String)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"AssignmentConfigs","l":"AssignmentConfigs(long, int, int, long, List, OptionalInt, OptionalInt, String)","u":"%3Cinit%3E(long,int,int,long,java.util.List,java.util.OptionalInt,java.util.OptionalInt,java.lang.String)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"AssignmentConfigs","l":"AssignmentConfigs(Long, Integer, Integer, Long, List)","u":"%3Cinit%3E(java.lang.Long,java.lang.Integer,java.lang.Integer,java.lang.Long,java.util.List)"},{"p":"org.apache.kafka.clients.admin","c":"NewPartitions","l":"assignments()"},{"p":"org.apache.kafka.clients.consumer","c":"CooperativeStickyAssignor","l":"assignPartitions(Map>, Map)","u":"assignPartitions(java.util.Map,java.util.Map)"},{"p":"org.apache.kafka.clients.consumer","c":"RangeAssignor","l":"assignPartitions(Map>, Map)","u":"assignPartitions(java.util.Map,java.util.Map)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsAssignment","l":"assignTask(KafkaStreamsAssignment.AssignedTask)","u":"assignTask(org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment.AssignedTask)"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessorWrapper","l":"asWrapped(ProcessorSupplier)","u":"asWrapped(org.apache.kafka.streams.processor.api.ProcessorSupplier)"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessorWrapper","l":"asWrappedFixedKey(FixedKeyProcessorSupplier)","u":"asWrappedFixedKey(org.apache.kafka.streams.processor.api.FixedKeyProcessorSupplier)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"AT_LEAST_ONCE"},{"p":"org.apache.kafka.streams.query","c":"PositionBound","l":"at(Position)","u":"at(org.apache.kafka.streams.query.Position)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Range","l":"atLeast(Number)","u":"atLeast(java.lang.Number)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ListSize","l":"atMostOfSize(int)"},{"p":"org.apache.kafka.common.security.plain","c":"PlainAuthenticateCallback","l":"authenticated()"},{"p":"org.apache.kafka.common.security.plain","c":"PlainAuthenticateCallback","l":"authenticated(boolean)"},{"p":"org.apache.kafka.common.errors","c":"AuthenticationException","l":"AuthenticationException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"AuthenticationException","l":"AuthenticationException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"AuthenticationException","l":"AuthenticationException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"AuthorizationException","l":"AuthorizationException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"AuthorizationException","l":"AuthorizationException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.server.authorizer","c":"Authorizer","l":"authorize(AuthorizableRequestContext, List)","u":"authorize(org.apache.kafka.server.authorizer.AuthorizableRequestContext,java.util.List)"},{"p":"org.apache.kafka.server.authorizer","c":"Authorizer","l":"authorizeByResourceType(AuthorizableRequestContext, AclOperation, ResourceType)","u":"authorizeByResourceType(org.apache.kafka.server.authorizer.AuthorizableRequestContext,org.apache.kafka.common.acl.AclOperation,org.apache.kafka.common.resource.ResourceType)"},{"p":"org.apache.kafka.clients.admin","c":"ClassicGroupDescription","l":"authorizedOperations()"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupDescription","l":"authorizedOperations()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeClusterResult","l":"authorizedOperations()"},{"p":"org.apache.kafka.clients.admin","c":"ShareGroupDescription","l":"authorizedOperations()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupDescription","l":"authorizedOperations()"},{"p":"org.apache.kafka.clients.admin","c":"TopicDescription","l":"authorizedOperations()"},{"p":"org.apache.kafka.common.errors","c":"AuthorizerNotReadyException","l":"AuthorizerNotReadyException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"AUTO_COMMIT_INTERVAL_MS_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"AUTO_OFFSET_RESET_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"AUTO_OFFSET_RESET_DOC"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"AUTOMATIC_CONFIG_PROVIDERS_PROPERTY"},{"p":"org.apache.kafka.common","c":"Cluster","l":"availablePartitionsForTopic(String)","u":"availablePartitionsForTopic(java.lang.String)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Avg","l":"Avg()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlyWindowStore","l":"backwardAll()"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlySessionStore","l":"backwardFetch(K)"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlyWindowStore","l":"backwardFetch(K, Instant, Instant)","u":"backwardFetch(K,java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"WindowStore","l":"backwardFetch(K, Instant, Instant)","u":"backwardFetch(K,java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlySessionStore","l":"backwardFetch(K, K)","u":"backwardFetch(K,K)"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlyWindowStore","l":"backwardFetch(K, K, Instant, Instant)","u":"backwardFetch(K,K,java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"WindowStore","l":"backwardFetch(K, K, Instant, Instant)","u":"backwardFetch(K,K,java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"WindowStore","l":"backwardFetch(K, K, long, long)","u":"backwardFetch(K,K,long,long)"},{"p":"org.apache.kafka.streams.state","c":"WindowStore","l":"backwardFetch(K, long, long)","u":"backwardFetch(K,long,long)"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlyWindowStore","l":"backwardFetchAll(Instant, Instant)","u":"backwardFetchAll(java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"WindowStore","l":"backwardFetchAll(Instant, Instant)","u":"backwardFetchAll(java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"WindowStore","l":"backwardFetchAll(long, long)","u":"backwardFetchAll(long,long)"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlySessionStore","l":"backwardFindSessions(K, Instant, Instant)","u":"backwardFindSessions(K,java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"SessionStore","l":"backwardFindSessions(K, Instant, Instant)","u":"backwardFindSessions(K,java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlySessionStore","l":"backwardFindSessions(K, K, Instant, Instant)","u":"backwardFindSessions(K,K,java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"SessionStore","l":"backwardFindSessions(K, K, Instant, Instant)","u":"backwardFindSessions(K,K,java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlySessionStore","l":"backwardFindSessions(K, K, long, long)","u":"backwardFindSessions(K,K,long,long)"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlySessionStore","l":"backwardFindSessions(K, long, long)","u":"backwardFindSessions(K,long,long)"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"BATCH_SIZE_CONFIG"},{"p":"org.apache.kafka.streams.kstream","c":"JoinWindows","l":"before(Duration)","u":"before(java.time.Duration)"},{"p":"org.apache.kafka.streams.kstream","c":"JoinWindows","l":"beforeMs"},{"p":"org.apache.kafka.clients.admin","c":"RecordsToDelete","l":"beforeOffset()"},{"p":"org.apache.kafka.clients.admin","c":"RecordsToDelete","l":"beforeOffset(long)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"beginningOffsets(Collection)","u":"beginningOffsets(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"beginningOffsets(Collection)","u":"beginningOffsets(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"beginningOffsets(Collection)","u":"beginningOffsets(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"beginningOffsets(Collection, Duration)","u":"beginningOffsets(java.util.Collection,java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"beginningOffsets(Collection, Duration)","u":"beginningOffsets(java.util.Collection,java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"beginningOffsets(Collection, Duration)","u":"beginningOffsets(java.util.Collection,java.time.Duration)"},{"p":"org.apache.kafka.clients.producer","c":"KafkaProducer","l":"beginTransaction()"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"beginTransaction()"},{"p":"org.apache.kafka.clients.producer","c":"Producer","l":"beginTransaction()"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"beginTransactionException"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Range","l":"between(Number, Number)","u":"between(java.lang.Number,java.lang.Number)"},{"p":"org.apache.kafka.clients.admin","c":"DeleteAclsResult.FilterResult","l":"binding()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Histogram.BinScheme","l":"bins()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Histogram.ConstantBinScheme","l":"bins()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Histogram.LinearBinScheme","l":"bins()"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"bool()"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigType","l":"BOOLEAN"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Type","l":"BOOLEAN"},{"p":"org.apache.kafka.connect.data","c":"Schema.Type","l":"BOOLEAN"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"BOOLEAN_SCHEMA"},{"p":"org.apache.kafka.streams.state","c":"StateSerdes","l":"BOOLEAN_SIZE"},{"p":"org.apache.kafka.common.serialization","c":"Serdes","l":"Boolean()"},{"p":"org.apache.kafka.common.serialization","c":"BooleanDeserializer","l":"BooleanDeserializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"Serdes.BooleanSerde","l":"BooleanSerde()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"BooleanSerializer","l":"BooleanSerializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"BOOTSTRAP_CONTROLLERS_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"BOOTSTRAP_CONTROLLERS_DOC"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"BOOTSTRAP_SERVERS_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"BOOTSTRAP_SERVERS_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"BOOTSTRAP_SERVERS_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"BOOTSTRAP_SERVERS_CONFIG"},{"p":"org.apache.kafka.common","c":"Cluster","l":"bootstrap(List)","u":"bootstrap(java.util.List)"},{"p":"org.apache.kafka.common.metrics","c":"Quota","l":"bound()"},{"p":"org.apache.kafka.common.metrics","c":"QuotaViolationException","l":"bound()"},{"p":"org.apache.kafka.streams.kstream","c":"BranchedKStream","l":"branch(Predicate)","u":"branch(org.apache.kafka.streams.kstream.Predicate)"},{"p":"org.apache.kafka.streams.kstream","c":"BranchedKStream","l":"branch(Predicate, Branched)","u":"branch(org.apache.kafka.streams.kstream.Predicate,org.apache.kafka.streams.kstream.Branched)"},{"p":"org.apache.kafka.clients.admin","c":"EndpointType","l":"BROKER"},{"p":"org.apache.kafka.common.config","c":"ConfigResource.Type","l":"BROKER"},{"p":"org.apache.kafka.common.config","c":"ConfigResource.Type","l":"BROKER_LOGGER"},{"p":"org.apache.kafka.clients.admin","c":"DescribeProducersOptions","l":"brokerId()"},{"p":"org.apache.kafka.common","c":"TopicPartitionReplica","l":"brokerId()"},{"p":"org.apache.kafka.server.authorizer","c":"AuthorizerServerInfo","l":"brokerId()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogMetadata","l":"brokerId()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeProducersOptions","l":"brokerId(int)"},{"p":"org.apache.kafka.common.errors","c":"BrokerIdNotRegisteredException","l":"BrokerIdNotRegisteredException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"BrokerIdNotRegisteredException","l":"BrokerIdNotRegisteredException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"BrokerJwtValidator","l":"BrokerJwtValidator()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"BrokerNotAvailableException","l":"BrokerNotAvailableException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"BrokerNotAvailableException","l":"BrokerNotAvailableException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.streams.errors","c":"BrokerNotFoundException","l":"BrokerNotFoundException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"BrokerNotFoundException","l":"BrokerNotFoundException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.streams.errors","c":"BrokerNotFoundException","l":"BrokerNotFoundException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"brokerTopicStatsMetrics()"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"BUFFER_MEMORY_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"BUFFERED_RECORDS_PER_PARTITION_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"BUFFERED_RECORDS_PER_PARTITION_DOC"},{"p":"org.apache.kafka.clients.producer","c":"BufferExhaustedException","l":"BufferExhaustedException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"BUILD_REMOTE_LOG_AUX_STATE_REQUESTS_PER_SEC_METRIC"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"build()"},{"p":"org.apache.kafka.streams.state","c":"StoreBuilder","l":"build()"},{"p":"org.apache.kafka.streams","c":"StreamsBuilder","l":"build()"},{"p":"org.apache.kafka.common.security.auth","c":"KafkaPrincipalBuilder","l":"build(AuthenticationContext)","u":"build(org.apache.kafka.common.security.auth.AuthenticationContext)"},{"p":"org.apache.kafka.streams","c":"StreamsBuilder","l":"build(Properties)","u":"build(java.util.Properties)"},{"p":"org.apache.kafka.connect.data","c":"Date","l":"builder()"},{"p":"org.apache.kafka.connect.data","c":"Time","l":"builder()"},{"p":"org.apache.kafka.connect.data","c":"Timestamp","l":"builder()"},{"p":"org.apache.kafka.connect.data","c":"Decimal","l":"builder(int)"},{"p":"org.apache.kafka.streams.state","c":"HostInfo","l":"buildFromEndpoint(String)","u":"buildFromEndpoint(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"BUILT_IN_METRICS_VERSION_CONFIG"},{"p":"org.apache.kafka.streams.state","c":"BuiltInDslStoreSuppliers","l":"BuiltInDslStoreSuppliers()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"ListTransactionsResult","l":"byBrokerId()"},{"p":"org.apache.kafka.streams","c":"AutoOffsetReset","l":"byDuration(Duration)","u":"byDuration(java.time.Duration)"},{"p":"org.apache.kafka.common.serialization","c":"Serdes","l":"ByteArray()"},{"p":"org.apache.kafka.common.serialization","c":"ByteArrayDeserializer","l":"ByteArrayDeserializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"Serdes.ByteArraySerde","l":"ByteArraySerde()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"ByteArraySerializer","l":"ByteArraySerializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"Serdes","l":"ByteBuffer()"},{"p":"org.apache.kafka.common.serialization","c":"ByteBufferDeserializer","l":"ByteBufferDeserializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"Serdes.ByteBufferSerde","l":"ByteBufferSerde()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"ByteBufferSerializer","l":"ByteBufferSerializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.data","c":"Schema.Type","l":"BYTES"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"BYTES_SCHEMA"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"bytes()"},{"p":"org.apache.kafka.common.serialization","c":"Serdes","l":"Bytes()"},{"p":"org.apache.kafka.common.serialization","c":"BytesDeserializer","l":"BytesDeserializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"Serdes.BytesSerde","l":"BytesSerde()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"BytesSerializer","l":"BytesSerializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"CACHE_MAX_BYTES_BUFFERING_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"CACHE_MAX_BYTES_BUFFERING_DOC"},{"p":"org.apache.kafka.streams","c":"TopologyConfig","l":"cacheSize"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext.CapturedPunctuator","l":"cancel()"},{"p":"org.apache.kafka.streams.processor","c":"Cancellable","l":"cancel()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext.CapturedPunctuator","l":"cancel()"},{"p":"org.apache.kafka.common","c":"KafkaFuture","l":"cancel(boolean)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext.CapturedPunctuator","l":"cancelled()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext.CapturedPunctuator","l":"cancelled()"},{"p":"org.apache.kafka.connect.source","c":"SourceConnector","l":"canDefineTransactionBoundaries(Map)","u":"canDefineTransactionBoundaries(java.util.Map)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignmentUtils.MoveStandbyTaskPredicate","l":"canMoveStandbyTask(KafkaStreamsState, KafkaStreamsState, TaskId, Map)","u":"canMoveStandbyTask(org.apache.kafka.streams.processor.assignment.KafkaStreamsState,org.apache.kafka.streams.processor.assignment.KafkaStreamsState,org.apache.kafka.streams.processor.TaskId,java.util.Map)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext.CapturedForward","l":"CapturedForward(Record)","u":"%3Cinit%3E(org.apache.kafka.streams.processor.api.Record)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext.CapturedForward","l":"CapturedForward(Record, Optional)","u":"%3Cinit%3E(org.apache.kafka.streams.processor.api.Record,java.util.Optional)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Frequency","l":"centerValue()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"CHECK_CRCS_CONFIG"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"Checkpoint(String, TopicPartition, long, long, String)","u":"%3Cinit%3E(java.lang.String,org.apache.kafka.common.TopicPartition,long,long,java.lang.String)"},{"p":"org.apache.kafka.connect.mirror","c":"DefaultReplicationPolicy","l":"checkpointsTopic(String)","u":"checkpointsTopic(java.lang.String)"},{"p":"org.apache.kafka.connect.mirror","c":"ReplicationPolicy","l":"checkpointsTopic(String)","u":"checkpointsTopic(java.lang.String)"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClient","l":"checkpointTopics()"},{"p":"org.apache.kafka.connect.mirror","c":"RemoteClusterUtils","l":"checkpointTopics(Map)","u":"checkpointTopics(java.util.Map)"},{"p":"org.apache.kafka.common.metrics","c":"Sensor","l":"checkQuotas()"},{"p":"org.apache.kafka.common.metrics","c":"Sensor","l":"checkQuotas(long)"},{"p":"org.apache.kafka.streams.processor","c":"To","l":"child(String)","u":"child(java.lang.String)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext.CapturedForward","l":"childName()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext.CapturedForward","l":"childName()"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigType","l":"CLASS"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Type","l":"CLASS"},{"p":"org.apache.kafka.clients.consumer","c":"GroupProtocol","l":"CLASSIC"},{"p":"org.apache.kafka.common","c":"GroupType","l":"CLASSIC"},{"p":"org.apache.kafka.streams","c":"GroupProtocol","l":"CLASSIC"},{"p":"org.apache.kafka.clients.admin","c":"ClassicGroupDescription","l":"ClassicGroupDescription(String, String, String, Collection, ClassicGroupState, Node)","u":"%3Cinit%3E(java.lang.String,java.lang.String,java.lang.String,java.util.Collection,org.apache.kafka.common.ClassicGroupState,org.apache.kafka.common.Node)"},{"p":"org.apache.kafka.clients.admin","c":"ClassicGroupDescription","l":"ClassicGroupDescription(String, String, String, Collection, ClassicGroupState, Node, Set)","u":"%3Cinit%3E(java.lang.String,java.lang.String,java.lang.String,java.util.Collection,org.apache.kafka.common.ClassicGroupState,org.apache.kafka.common.Node,java.util.Set)"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"CLEANUP_POLICY_COMPACT"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"CLEANUP_POLICY_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"CLEANUP_POLICY_DELETE"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"CLEANUP_POLICY_DOC"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"cleanUp()"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"clear()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Histogram","l":"clear()"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"clear()"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"clear()"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"CLIENT_DNS_LOOKUP_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"CLIENT_DNS_LOOKUP_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"CLIENT_DNS_LOOKUP_CONFIG"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaEntity.ConfigEntityType","l":"CLIENT_ID"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaEntity","l":"CLIENT_ID"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"CLIENT_ID_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"CLIENT_ID_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"CLIENT_ID_CONFIG"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerLoginCallbackHandler","l":"CLIENT_ID_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"CLIENT_ID_CONFIG"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerLoginCallbackHandler","l":"CLIENT_ID_DOC"},{"p":"org.apache.kafka.common.config","c":"ConfigResource.Type","l":"CLIENT_METRICS"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"CLIENT_RACK_CONFIG"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerLoginCallbackHandler","l":"CLIENT_SECRET_CONFIG"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerLoginCallbackHandler","l":"CLIENT_SECRET_DOC"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"CLIENT_TAG_PREFIX"},{"p":"org.apache.kafka.common.security.auth","c":"AuthenticationContext","l":"clientAddress()"},{"p":"org.apache.kafka.common.security.auth","c":"PlaintextAuthenticationContext","l":"clientAddress()"},{"p":"org.apache.kafka.common.security.auth","c":"SaslAuthenticationContext","l":"clientAddress()"},{"p":"org.apache.kafka.common.security.auth","c":"SslAuthenticationContext","l":"clientAddress()"},{"p":"org.apache.kafka.server.authorizer","c":"AuthorizableRequestContext","l":"clientAddress()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"ClientCredentialsJwtRetriever","l":"ClientCredentialsJwtRetriever()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription","l":"clientHost()"},{"p":"org.apache.kafka.clients.admin","c":"MemberDescription","l":"clientId()"},{"p":"org.apache.kafka.clients.admin","c":"ShareMemberDescription","l":"clientId()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription","l":"clientId()"},{"p":"org.apache.kafka.server.authorizer","c":"AuthorizableRequestContext","l":"clientId()"},{"p":"org.apache.kafka.server.telemetry","c":"ClientTelemetryPayload","l":"clientInstanceId()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"clientInstanceId(Duration)","u":"clientInstanceId(java.time.Duration)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"clientInstanceId(Duration)","u":"clientInstanceId(java.time.Duration)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"clientInstanceId(Duration)","u":"clientInstanceId(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"clientInstanceId(Duration)","u":"clientInstanceId(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"clientInstanceId(Duration)","u":"clientInstanceId(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaShareConsumer","l":"clientInstanceId(Duration)","u":"clientInstanceId(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"clientInstanceId(Duration)","u":"clientInstanceId(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"MockShareConsumer","l":"clientInstanceId(Duration)","u":"clientInstanceId(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"ShareConsumer","l":"clientInstanceId(Duration)","u":"clientInstanceId(java.time.Duration)"},{"p":"org.apache.kafka.clients.producer","c":"KafkaProducer","l":"clientInstanceId(Duration)","u":"clientInstanceId(java.time.Duration)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"clientInstanceId(Duration)","u":"clientInstanceId(java.time.Duration)"},{"p":"org.apache.kafka.clients.producer","c":"Producer","l":"clientInstanceId(Duration)","u":"clientInstanceId(java.time.Duration)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"clientInstanceIds(Duration)","u":"clientInstanceIds(java.time.Duration)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"ClientJwtValidator","l":"ClientJwtValidator()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"ClientMetricsResourceListing","l":"ClientMetricsResourceListing(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.connect.connector.policy","c":"ConnectorClientConfigRequest","l":"clientProps()"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaAlteration","l":"ClientQuotaAlteration(ClientQuotaEntity, Collection)","u":"%3Cinit%3E(org.apache.kafka.common.quota.ClientQuotaEntity,java.util.Collection)"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaEntity","l":"ClientQuotaEntity(Map)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.server.telemetry","c":"ClientTelemetry","l":"clientReceiver()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"clientTagPrefix(String)","u":"clientTagPrefix(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription","l":"clientTags()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsState","l":"clientTags()"},{"p":"org.apache.kafka.connect.connector.policy","c":"ConnectorClientConfigRequest","l":"clientType()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"close()"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"close()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerInterceptor","l":"close()"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"close()"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaShareConsumer","l":"close()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"close()"},{"p":"org.apache.kafka.clients.consumer","c":"MockShareConsumer","l":"close()"},{"p":"org.apache.kafka.clients.consumer","c":"ShareConsumer","l":"close()"},{"p":"org.apache.kafka.clients.producer","c":"KafkaProducer","l":"close()"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"close()"},{"p":"org.apache.kafka.clients.producer","c":"Partitioner","l":"close()"},{"p":"org.apache.kafka.clients.producer","c":"Producer","l":"close()"},{"p":"org.apache.kafka.clients.producer","c":"ProducerInterceptor","l":"close()"},{"p":"org.apache.kafka.clients.producer","c":"RoundRobinPartitioner","l":"close()"},{"p":"org.apache.kafka.common.config.provider","c":"DirectoryConfigProvider","l":"close()"},{"p":"org.apache.kafka.common.config.provider","c":"EnvVarConfigProvider","l":"close()"},{"p":"org.apache.kafka.common.config.provider","c":"FileConfigProvider","l":"close()"},{"p":"org.apache.kafka.common","c":"MessageFormatter","l":"close()"},{"p":"org.apache.kafka.common.metrics","c":"JmxReporter","l":"close()"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"close()"},{"p":"org.apache.kafka.common.metrics","c":"MetricsReporter","l":"close()"},{"p":"org.apache.kafka.common.security.auth","c":"AuthenticateCallbackHandler","l":"close()"},{"p":"org.apache.kafka.common.security.auth","c":"Login","l":"close()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"ClientCredentialsJwtRetriever","l":"close()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"DefaultJwtRetriever","l":"close()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"DefaultJwtValidator","l":"close()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"JwtBearerJwtRetriever","l":"close()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerLoginCallbackHandler","l":"close()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerValidatorCallbackHandler","l":"close()"},{"p":"org.apache.kafka.common.serialization","c":"Deserializer","l":"close()"},{"p":"org.apache.kafka.common.serialization","c":"ListDeserializer","l":"close()"},{"p":"org.apache.kafka.common.serialization","c":"ListSerializer","l":"close()"},{"p":"org.apache.kafka.common.serialization","c":"Serde","l":"close()"},{"p":"org.apache.kafka.common.serialization","c":"Serdes.WrapperSerde","l":"close()"},{"p":"org.apache.kafka.common.serialization","c":"Serializer","l":"close()"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClient","l":"close()"},{"p":"org.apache.kafka.connect.storage","c":"Converter","l":"close()"},{"p":"org.apache.kafka.connect.storage","c":"SimpleHeaderConverter","l":"close()"},{"p":"org.apache.kafka.connect.storage","c":"StringConverter","l":"close()"},{"p":"org.apache.kafka.connect.transforms.predicates","c":"Predicate","l":"close()"},{"p":"org.apache.kafka.connect.transforms","c":"Transformation","l":"close()"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaCallback","l":"close()"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"close()"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedDeserializer","l":"close()"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedSerializer","l":"close()"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedDeserializer","l":"close()"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedSerializer","l":"close()"},{"p":"org.apache.kafka.streams.kstream","c":"Transformer","l":"close()"},{"p":"org.apache.kafka.streams.kstream","c":"ValueTransformer","l":"close()"},{"p":"org.apache.kafka.streams.kstream","c":"ValueTransformerWithKey","l":"close()"},{"p":"org.apache.kafka.streams.processor.api","c":"FixedKeyProcessor","l":"close()"},{"p":"org.apache.kafka.streams.processor.api","c":"Processor","l":"close()"},{"p":"org.apache.kafka.streams.processor","c":"StateStore","l":"close()"},{"p":"org.apache.kafka.streams.state","c":"KeyValueIterator","l":"close()"},{"p":"org.apache.kafka.streams.state","c":"VersionedRecordIterator","l":"close()"},{"p":"org.apache.kafka.streams.state","c":"WindowStoreIterator","l":"close()"},{"p":"org.apache.kafka.streams","c":"TopologyTestDriver","l":"close()"},{"p":"org.apache.kafka.tools.api","c":"RecordReader","l":"close()"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"close(CloseOptions)","u":"close(org.apache.kafka.clients.consumer.CloseOptions)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"close(CloseOptions)","u":"close(org.apache.kafka.clients.consumer.CloseOptions)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"close(CloseOptions)","u":"close(org.apache.kafka.clients.consumer.CloseOptions)"},{"p":"org.apache.kafka.connect.sink","c":"SinkTask","l":"close(Collection)","u":"close(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"close(Duration)","u":"close(java.time.Duration)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"close(Duration)","u":"close(java.time.Duration)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"close(Duration)","u":"close(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"close(Duration)","u":"close(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"close(Duration)","u":"close(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaShareConsumer","l":"close(Duration)","u":"close(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"close(Duration)","u":"close(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"MockShareConsumer","l":"close(Duration)","u":"close(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"ShareConsumer","l":"close(Duration)","u":"close(java.time.Duration)"},{"p":"org.apache.kafka.clients.producer","c":"KafkaProducer","l":"close(Duration)","u":"close(java.time.Duration)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"close(Duration)","u":"close(java.time.Duration)"},{"p":"org.apache.kafka.clients.producer","c":"Producer","l":"close(Duration)","u":"close(java.time.Duration)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"close(Duration)","u":"close(java.time.Duration)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"close(KafkaStreams.CloseOptions)","u":"close(org.apache.kafka.streams.KafkaStreams.CloseOptions)"},{"p":"org.apache.kafka.streams.state","c":"RocksDBConfigSetter","l":"close(String, Options)","u":"close(java.lang.String,org.rocksdb.Options)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"closed()"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"closed()"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"closeException"},{"p":"org.apache.kafka.streams","c":"KafkaStreams.CloseOptions","l":"CloseOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.resource","c":"ResourceType","l":"CLUSTER"},{"p":"org.apache.kafka.common.resource","c":"Resource","l":"CLUSTER"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"CLUSTER_ACTION"},{"p":"org.apache.kafka.common.resource","c":"Resource","l":"CLUSTER_NAME"},{"p":"org.apache.kafka.common","c":"Cluster","l":"Cluster(String, Collection, Collection, Set, Set)","u":"%3Cinit%3E(java.lang.String,java.util.Collection,java.util.Collection,java.util.Set,java.util.Set)"},{"p":"org.apache.kafka.common","c":"Cluster","l":"Cluster(String, Collection, Collection, Set, Set, Node)","u":"%3Cinit%3E(java.lang.String,java.util.Collection,java.util.Collection,java.util.Set,java.util.Set,org.apache.kafka.common.Node)"},{"p":"org.apache.kafka.common","c":"Cluster","l":"Cluster(String, Collection, Collection, Set, Set, Set, Node)","u":"%3Cinit%3E(java.lang.String,java.util.Collection,java.util.Collection,java.util.Set,java.util.Set,java.util.Set,org.apache.kafka.common.Node)"},{"p":"org.apache.kafka.common","c":"Cluster","l":"Cluster(String, Collection, Collection, Set, Set, Set, Node, Map)","u":"%3Cinit%3E(java.lang.String,java.util.Collection,java.util.Collection,java.util.Set,java.util.Set,java.util.Set,org.apache.kafka.common.Node,java.util.Map)"},{"p":"org.apache.kafka.common.errors","c":"ClusterAuthorizationException","l":"ClusterAuthorizationException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"ClusterAuthorizationException","l":"ClusterAuthorizationException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.connect.health","c":"ConnectClusterState","l":"clusterDetails()"},{"p":"org.apache.kafka.clients.admin","c":"AddRaftVoterOptions","l":"clusterId()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeClusterResult","l":"clusterId()"},{"p":"org.apache.kafka.clients.admin","c":"RemoveRaftVoterOptions","l":"clusterId()"},{"p":"org.apache.kafka.common","c":"ClusterResource","l":"clusterId()"},{"p":"org.apache.kafka.common","c":"Cluster","l":"clusterResource()"},{"p":"org.apache.kafka.server.authorizer","c":"AuthorizerServerInfo","l":"clusterResource()"},{"p":"org.apache.kafka.common","c":"ClusterResource","l":"ClusterResource(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.connect.rest","c":"ConnectRestExtensionContext","l":"clusterState()"},{"p":"org.apache.kafka.clients.admin","c":"FeatureUpdate.UpgradeType","l":"code()"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"code()"},{"p":"org.apache.kafka.common.acl","c":"AclPermissionType","l":"code()"},{"p":"org.apache.kafka.common.resource","c":"PatternType","l":"code()"},{"p":"org.apache.kafka.common.resource","c":"ResourceType","l":"code()"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedStream","l":"cogroup(Aggregator)","u":"cogroup(org.apache.kafka.streams.kstream.Aggregator)"},{"p":"org.apache.kafka.streams.kstream","c":"CogroupedKStream","l":"cogroup(KGroupedStream, Aggregator)","u":"cogroup(org.apache.kafka.streams.kstream.KGroupedStream,org.apache.kafka.streams.kstream.Aggregator)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Avg","l":"combine(List, MetricConfig, long)","u":"combine(java.util.List,org.apache.kafka.common.metrics.MetricConfig,long)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Frequencies","l":"combine(List, MetricConfig, long)","u":"combine(java.util.List,org.apache.kafka.common.metrics.MetricConfig,long)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Max","l":"combine(List, MetricConfig, long)","u":"combine(java.util.List,org.apache.kafka.common.metrics.MetricConfig,long)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Min","l":"combine(List, MetricConfig, long)","u":"combine(java.util.List,org.apache.kafka.common.metrics.MetricConfig,long)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Percentiles","l":"combine(List, MetricConfig, long)","u":"combine(java.util.List,org.apache.kafka.common.metrics.MetricConfig,long)"},{"p":"org.apache.kafka.common.metrics.stats","c":"SampledStat","l":"combine(List, MetricConfig, long)","u":"combine(java.util.List,org.apache.kafka.common.metrics.MetricConfig,long)"},{"p":"org.apache.kafka.common.metrics.stats","c":"WindowedSum","l":"combine(List, MetricConfig, long)","u":"combine(java.util.List,org.apache.kafka.common.metrics.MetricConfig,long)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"COMMIT_INTERVAL_MS_CONFIG"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerLoginModule","l":"commit()"},{"p":"org.apache.kafka.common.security.plain","c":"PlainLoginModule","l":"commit()"},{"p":"org.apache.kafka.common.security.scram","c":"ScramLoginModule","l":"commit()"},{"p":"org.apache.kafka.connect.source","c":"SourceTask","l":"commit()"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"commit()"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessingContext","l":"commit()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"commit()"},{"p":"org.apache.kafka.streams.processor","c":"ProcessorContext","l":"commit()"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"commitAsync()"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"commitAsync()"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaShareConsumer","l":"commitAsync()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"commitAsync()"},{"p":"org.apache.kafka.clients.consumer","c":"MockShareConsumer","l":"commitAsync()"},{"p":"org.apache.kafka.clients.consumer","c":"ShareConsumer","l":"commitAsync()"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"commitAsync(Map, OffsetCommitCallback)","u":"commitAsync(java.util.Map,org.apache.kafka.clients.consumer.OffsetCommitCallback)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"commitAsync(Map, OffsetCommitCallback)","u":"commitAsync(java.util.Map,org.apache.kafka.clients.consumer.OffsetCommitCallback)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"commitAsync(Map, OffsetCommitCallback)","u":"commitAsync(java.util.Map,org.apache.kafka.clients.consumer.OffsetCommitCallback)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"commitAsync(OffsetCommitCallback)","u":"commitAsync(org.apache.kafka.clients.consumer.OffsetCommitCallback)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"commitAsync(OffsetCommitCallback)","u":"commitAsync(org.apache.kafka.clients.consumer.OffsetCommitCallback)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"commitAsync(OffsetCommitCallback)","u":"commitAsync(org.apache.kafka.clients.consumer.OffsetCommitCallback)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"commitCount()"},{"p":"org.apache.kafka.clients.consumer","c":"CommitFailedException","l":"CommitFailedException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.consumer","c":"CommitFailedException","l":"CommitFailedException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.connect.source","c":"SourceTask","l":"commitRecord(SourceRecord, RecordMetadata)","u":"commitRecord(org.apache.kafka.connect.source.SourceRecord,org.apache.kafka.clients.producer.RecordMetadata)"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSourceTask","l":"commitRecord(SourceRecord, RecordMetadata)","u":"commitRecord(org.apache.kafka.connect.source.SourceRecord,org.apache.kafka.clients.producer.RecordMetadata)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"commitSync()"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"commitSync()"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaShareConsumer","l":"commitSync()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"commitSync()"},{"p":"org.apache.kafka.clients.consumer","c":"MockShareConsumer","l":"commitSync()"},{"p":"org.apache.kafka.clients.consumer","c":"ShareConsumer","l":"commitSync()"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"commitSync(Duration)","u":"commitSync(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"commitSync(Duration)","u":"commitSync(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaShareConsumer","l":"commitSync(Duration)","u":"commitSync(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"commitSync(Duration)","u":"commitSync(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"MockShareConsumer","l":"commitSync(Duration)","u":"commitSync(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"ShareConsumer","l":"commitSync(Duration)","u":"commitSync(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"commitSync(Map)","u":"commitSync(java.util.Map)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"commitSync(Map)","u":"commitSync(java.util.Map)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"commitSync(Map)","u":"commitSync(java.util.Map)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"commitSync(Map, Duration)","u":"commitSync(java.util.Map,java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"commitSync(Map, Duration)","u":"commitSync(java.util.Map,java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"commitSync(Map, Duration)","u":"commitSync(java.util.Map,java.time.Duration)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"committed()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"committed()"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"committed(Set)","u":"committed(java.util.Set)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"committed(Set)","u":"committed(java.util.Set)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"committed(Set)","u":"committed(java.util.Set)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"committed(Set, Duration)","u":"committed(java.util.Set,java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"committed(Set, Duration)","u":"committed(java.util.Set,java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"committed(Set, Duration)","u":"committed(java.util.Set,java.time.Duration)"},{"p":"org.apache.kafka.streams","c":"TaskMetadata","l":"committedOffsets()"},{"p":"org.apache.kafka.clients.producer","c":"KafkaProducer","l":"commitTransaction()"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"commitTransaction()"},{"p":"org.apache.kafka.clients.producer","c":"Producer","l":"commitTransaction()"},{"p":"org.apache.kafka.connect.source","c":"TransactionContext","l":"commitTransaction()"},{"p":"org.apache.kafka.connect.source","c":"TransactionContext","l":"commitTransaction(SourceRecord)","u":"commitTransaction(org.apache.kafka.connect.source.SourceRecord)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"commitTransactionException"},{"p":"org.apache.kafka.streams.processor.assignment","c":"ProcessId","l":"compareTo(ProcessId)","u":"compareTo(org.apache.kafka.streams.processor.assignment.ProcessId)"},{"p":"org.apache.kafka.streams.processor","c":"TaskId","l":"compareTo(TaskId)","u":"compareTo(org.apache.kafka.streams.processor.TaskId)"},{"p":"org.apache.kafka.common","c":"Uuid","l":"compareTo(Uuid)","u":"compareTo(org.apache.kafka.common.Uuid)"},{"p":"org.apache.kafka.common.metrics","c":"JmxReporter","l":"compilePredicate(Map)","u":"compilePredicate(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"TransactionState","l":"COMPLETE_ABORT"},{"p":"org.apache.kafka.clients.admin","c":"TransactionState","l":"COMPLETE_COMMIT"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSourceTask","l":"COMPLETE_RECORD_DATA_CONFIG"},{"p":"org.apache.kafka.common","c":"KafkaFuture","l":"completedFuture(U)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"completeNext()"},{"p":"org.apache.kafka.common","c":"ClassicGroupState","l":"COMPLETING_REBALANCE"},{"p":"org.apache.kafka.common","c":"ConsumerGroupState","l":"COMPLETING_REBALANCE"},{"p":"org.apache.kafka.common","c":"GroupState","l":"COMPLETING_REBALANCE"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaFilter","l":"components()"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"COMPRESSION_GZIP_LEVEL_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"COMPRESSION_GZIP_LEVEL_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"COMPRESSION_GZIP_LEVEL_DOC"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"COMPRESSION_LZ4_LEVEL_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"COMPRESSION_LZ4_LEVEL_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"COMPRESSION_LZ4_LEVEL_DOC"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"COMPRESSION_TYPE_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"COMPRESSION_TYPE_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"COMPRESSION_TYPE_DOC"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"COMPRESSION_ZSTD_LEVEL_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"COMPRESSION_ZSTD_LEVEL_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"COMPRESSION_ZSTD_LEVEL_DOC"},{"p":"org.apache.kafka.common.errors","c":"ConcurrentTransactionsException","l":"ConcurrentTransactionsException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"CONFIG_PROVIDERS_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"CreateTopicsResult.TopicMetadataAndConfig","l":"config()"},{"p":"org.apache.kafka.common.metrics","c":"KafkaMetric","l":"config()"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"config()"},{"p":"org.apache.kafka.connect.connector","c":"Connector","l":"config()"},{"p":"org.apache.kafka.connect.storage","c":"Converter","l":"config()"},{"p":"org.apache.kafka.connect.storage","c":"HeaderConverter","l":"config()"},{"p":"org.apache.kafka.connect.storage","c":"SimpleHeaderConverter","l":"config()"},{"p":"org.apache.kafka.connect.storage","c":"StringConverter","l":"config()"},{"p":"org.apache.kafka.connect.tools","c":"MockConnector","l":"config()"},{"p":"org.apache.kafka.connect.tools","c":"MockSinkConnector","l":"config()"},{"p":"org.apache.kafka.connect.tools","c":"MockSourceConnector","l":"config()"},{"p":"org.apache.kafka.connect.tools","c":"SchemaSourceConnector","l":"config()"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSinkConnector","l":"config()"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSourceConnector","l":"config()"},{"p":"org.apache.kafka.connect.transforms.predicates","c":"Predicate","l":"config()"},{"p":"org.apache.kafka.connect.transforms","c":"Transformation","l":"config()"},{"p":"org.apache.kafka.clients.admin","c":"Config","l":"Config(Collection)","u":"%3Cinit%3E(java.util.Collection)"},{"p":"org.apache.kafka.common.config","c":"Config","l":"Config(List)","u":"%3Cinit%3E(java.util.List)"},{"p":"org.apache.kafka.common.metrics","c":"KafkaMetric","l":"config(MetricConfig)","u":"config(org.apache.kafka.common.metrics.MetricConfig)"},{"p":"org.apache.kafka.clients.admin","c":"CreateTopicsResult","l":"config(String)","u":"config(java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"ConfigData","l":"ConfigData(Map)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.common.config","c":"ConfigData","l":"ConfigData(Map, Long)","u":"%3Cinit%3E(java.util.Map,java.lang.Long)"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"configDef()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"configDef()"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"configDef()"},{"p":"org.apache.kafka.connect.storage","c":"StringConverterConfig","l":"configDef()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"configDef()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"ConfigDef()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"ConfigDef(ConfigDef)","u":"%3Cinit%3E(org.apache.kafka.common.config.ConfigDef)"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaEntity","l":"configEntities()"},{"p":"org.apache.kafka.clients.admin","c":"AlterConfigOp","l":"configEntry()"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry","l":"ConfigEntry(String, String)","u":"%3Cinit%3E(java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry","l":"ConfigEntry(String, String, ConfigEntry.ConfigSource, boolean, boolean, List, ConfigEntry.ConfigType, String)","u":"%3Cinit%3E(java.lang.String,java.lang.String,org.apache.kafka.clients.admin.ConfigEntry.ConfigSource,boolean,boolean,java.util.List,org.apache.kafka.clients.admin.ConfigEntry.ConfigType,java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"ConfigException","l":"ConfigException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"ConfigException","l":"ConfigException(String, Object)","u":"%3Cinit%3E(java.lang.String,java.lang.Object)"},{"p":"org.apache.kafka.common.config","c":"ConfigException","l":"ConfigException(String, Object, String)","u":"%3Cinit%3E(java.lang.String,java.lang.Object,java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ConfigKey","l":"ConfigKey(String, ConfigDef.Type, Object, ConfigDef.Validator, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, List, ConfigDef.Recommender, boolean)","u":"%3Cinit%3E(java.lang.String,org.apache.kafka.common.config.ConfigDef.Type,java.lang.Object,org.apache.kafka.common.config.ConfigDef.Validator,org.apache.kafka.common.config.ConfigDef.Importance,java.lang.String,java.lang.String,int,org.apache.kafka.common.config.ConfigDef.Width,java.lang.String,java.util.List,org.apache.kafka.common.config.ConfigDef.Recommender,boolean)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"configKeys()"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"configNames()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"configNames()"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"configNames()"},{"p":"org.apache.kafka.common.config","c":"ConfigResource","l":"ConfigResource(ConfigResource.Type, String)","u":"%3Cinit%3E(org.apache.kafka.common.config.ConfigResource.Type,java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"NewTopic","l":"configs()"},{"p":"org.apache.kafka.connect.sink","c":"SinkTaskContext","l":"configs()"},{"p":"org.apache.kafka.connect.source","c":"SourceTaskContext","l":"configs()"},{"p":"org.apache.kafka.server.policy","c":"AlterConfigPolicy.RequestMetadata","l":"configs()"},{"p":"org.apache.kafka.server.policy","c":"CreateTopicPolicy.RequestMetadata","l":"configs()"},{"p":"org.apache.kafka.clients.admin","c":"NewTopic","l":"configs(Map)","u":"configs(java.util.Map)"},{"p":"org.apache.kafka.common.config","c":"ConfigTransformer","l":"ConfigTransformer(Map)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.common.config","c":"ConfigTransformerResult","l":"ConfigTransformerResult(Map, Map)","u":"%3Cinit%3E(java.util.Map,java.util.Map)"},{"p":"org.apache.kafka.connect.rest","c":"ConnectRestExtensionContext","l":"configurable()"},{"p":"org.apache.kafka.clients.producer","c":"RoundRobinPartitioner","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.common.config.provider","c":"DirectoryConfigProvider","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.common.config.provider","c":"EnvVarConfigProvider","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.common.config.provider","c":"FileConfigProvider","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.common","c":"Configurable","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.common","c":"MessageFormatter","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.common.metrics","c":"JmxReporter","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.common.security.auth","c":"SecurityProviderCreator","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.connect.mirror","c":"DefaultReplicationPolicy","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.connect.mirror","c":"IdentityReplicationPolicy","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.connect.storage","c":"SimpleHeaderConverter","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.connect.storage","c":"StringConverter","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.streams.errors","c":"DefaultProductionExceptionHandler","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.streams.errors","c":"LogAndContinueExceptionHandler","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.streams.errors","c":"LogAndContinueProcessingExceptionHandler","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.streams.errors","c":"LogAndFailExceptionHandler","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.streams.errors","c":"LogAndFailProcessingExceptionHandler","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized.StoreType","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessorWrapper","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignor","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.streams.state","c":"DslStoreSuppliers","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.tools.api","c":"RecordReader","l":"configure(Map)","u":"configure(java.util.Map)"},{"p":"org.apache.kafka.common.serialization","c":"Deserializer","l":"configure(Map, boolean)","u":"configure(java.util.Map,boolean)"},{"p":"org.apache.kafka.common.serialization","c":"ListDeserializer","l":"configure(Map, boolean)","u":"configure(java.util.Map,boolean)"},{"p":"org.apache.kafka.common.serialization","c":"ListSerializer","l":"configure(Map, boolean)","u":"configure(java.util.Map,boolean)"},{"p":"org.apache.kafka.common.serialization","c":"Serde","l":"configure(Map, boolean)","u":"configure(java.util.Map,boolean)"},{"p":"org.apache.kafka.common.serialization","c":"Serdes.WrapperSerde","l":"configure(Map, boolean)","u":"configure(java.util.Map,boolean)"},{"p":"org.apache.kafka.common.serialization","c":"Serializer","l":"configure(Map, boolean)","u":"configure(java.util.Map,boolean)"},{"p":"org.apache.kafka.common.serialization","c":"StringDeserializer","l":"configure(Map, boolean)","u":"configure(java.util.Map,boolean)"},{"p":"org.apache.kafka.common.serialization","c":"StringSerializer","l":"configure(Map, boolean)","u":"configure(java.util.Map,boolean)"},{"p":"org.apache.kafka.common.serialization","c":"UUIDDeserializer","l":"configure(Map, boolean)","u":"configure(java.util.Map,boolean)"},{"p":"org.apache.kafka.common.serialization","c":"UUIDSerializer","l":"configure(Map, boolean)","u":"configure(java.util.Map,boolean)"},{"p":"org.apache.kafka.connect.storage","c":"Converter","l":"configure(Map, boolean)","u":"configure(java.util.Map,boolean)"},{"p":"org.apache.kafka.connect.storage","c":"StringConverter","l":"configure(Map, boolean)","u":"configure(java.util.Map,boolean)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedDeserializer","l":"configure(Map, boolean)","u":"configure(java.util.Map,boolean)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedSerializer","l":"configure(Map, boolean)","u":"configure(java.util.Map,boolean)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedDeserializer","l":"configure(Map, boolean)","u":"configure(java.util.Map,boolean)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedSerializer","l":"configure(Map, boolean)","u":"configure(java.util.Map,boolean)"},{"p":"org.apache.kafka.common.security.auth","c":"Login","l":"configure(Map, String, Configuration, AuthenticateCallbackHandler)","u":"configure(java.util.Map,java.lang.String,javax.security.auth.login.Configuration,org.apache.kafka.common.security.auth.AuthenticateCallbackHandler)"},{"p":"org.apache.kafka.common.security.auth","c":"AuthenticateCallbackHandler","l":"configure(Map, String, List)","u":"configure(java.util.Map,java.lang.String,java.util.List)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"BrokerJwtValidator","l":"configure(Map, String, List)","u":"configure(java.util.Map,java.lang.String,java.util.List)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"ClientCredentialsJwtRetriever","l":"configure(Map, String, List)","u":"configure(java.util.Map,java.lang.String,java.util.List)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"ClientJwtValidator","l":"configure(Map, String, List)","u":"configure(java.util.Map,java.lang.String,java.util.List)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"DefaultJwtRetriever","l":"configure(Map, String, List)","u":"configure(java.util.Map,java.lang.String,java.util.List)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"DefaultJwtValidator","l":"configure(Map, String, List)","u":"configure(java.util.Map,java.lang.String,java.util.List)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"FileJwtRetriever","l":"configure(Map, String, List)","u":"configure(java.util.Map,java.lang.String,java.util.List)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"JwtBearerJwtRetriever","l":"configure(Map, String, List)","u":"configure(java.util.Map,java.lang.String,java.util.List)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerLoginCallbackHandler","l":"configure(Map, String, List)","u":"configure(java.util.Map,java.lang.String,java.util.List)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerValidatorCallbackHandler","l":"configure(Map, String, List)","u":"configure(java.util.Map,java.lang.String,java.util.List)"},{"p":"org.apache.kafka.common.config","c":"ConfigValue","l":"ConfigValue(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"ConfigValue","l":"ConfigValue(String, Object, List, List)","u":"%3Cinit%3E(java.lang.String,java.lang.Object,java.util.List,java.util.List)"},{"p":"org.apache.kafka.common.config","c":"Config","l":"configValues()"},{"p":"org.apache.kafka.connect.errors","c":"ConnectException","l":"ConnectException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.connect.errors","c":"ConnectException","l":"ConnectException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.connect.errors","c":"ConnectException","l":"ConnectException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"ConnectHeaders()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"ConnectHeaders(Iterable
)","u":"%3Cinit%3E(java.lang.Iterable)"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"CONNECTIONS_MAX_IDLE_MS_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"CONNECTIONS_MAX_IDLE_MS_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"CONNECTIONS_MAX_IDLE_MS_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"CONNECTIONS_MAX_IDLE_MS_CONFIG"},{"p":"org.apache.kafka.connect.source","c":"SourceTask.TransactionBoundary","l":"CONNECTOR"},{"p":"org.apache.kafka.connect.tools","c":"MockConnector","l":"CONNECTOR_FAILURE"},{"p":"org.apache.kafka.connect.connector","c":"Connector","l":"Connector()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.connector.policy","c":"ConnectorClientConfigRequest","l":"connectorClass()"},{"p":"org.apache.kafka.connect.connector.policy","c":"ConnectorClientConfigRequest","l":"ConnectorClientConfigRequest(String, ConnectorType, Class, Map, ConnectorClientConfigRequest.ClientType)","u":"%3Cinit%3E(java.lang.String,org.apache.kafka.connect.health.ConnectorType,java.lang.Class,java.util.Map,org.apache.kafka.connect.connector.policy.ConnectorClientConfigRequest.ClientType)"},{"p":"org.apache.kafka.connect.health","c":"ConnectClusterState","l":"connectorConfig(String)","u":"connectorConfig(java.lang.String)"},{"p":"org.apache.kafka.connect.health","c":"ConnectClusterState","l":"connectorHealth(String)","u":"connectorHealth(java.lang.String)"},{"p":"org.apache.kafka.connect.health","c":"ConnectorHealth","l":"ConnectorHealth(String, ConnectorState, Map, ConnectorType)","u":"%3Cinit%3E(java.lang.String,org.apache.kafka.connect.health.ConnectorState,java.util.Map,org.apache.kafka.connect.health.ConnectorType)"},{"p":"org.apache.kafka.connect.connector.policy","c":"ConnectorClientConfigRequest","l":"connectorName()"},{"p":"org.apache.kafka.connect.health","c":"ConnectClusterState","l":"connectors()"},{"p":"org.apache.kafka.connect.health","c":"ConnectorHealth","l":"connectorState()"},{"p":"org.apache.kafka.connect.health","c":"ConnectorState","l":"ConnectorState(String, String, String)","u":"%3Cinit%3E(java.lang.String,java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.connect.connector.policy","c":"ConnectorClientConfigRequest","l":"connectorType()"},{"p":"org.apache.kafka.connect.util","c":"ConnectorUtils","l":"ConnectorUtils()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams","c":"Topology","l":"connectProcessorAndStateStores(String, String...)","u":"connectProcessorAndStateStores(java.lang.String,java.lang.String...)"},{"p":"org.apache.kafka.connect.connector","c":"ConnectRecord","l":"ConnectRecord(String, Integer, Schema, Object, Schema, Object, Long)","u":"%3Cinit%3E(java.lang.String,java.lang.Integer,org.apache.kafka.connect.data.Schema,java.lang.Object,org.apache.kafka.connect.data.Schema,java.lang.Object,java.lang.Long)"},{"p":"org.apache.kafka.connect.connector","c":"ConnectRecord","l":"ConnectRecord(String, Integer, Schema, Object, Schema, Object, Long, Iterable
)","u":"%3Cinit%3E(java.lang.String,java.lang.Integer,org.apache.kafka.connect.data.Schema,java.lang.Object,org.apache.kafka.connect.data.Schema,java.lang.Object,java.lang.Long,java.lang.Iterable)"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"ConnectSchema(Schema.Type)","u":"%3Cinit%3E(org.apache.kafka.connect.data.Schema.Type)"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"ConnectSchema(Schema.Type, boolean, Object, String, Integer, String)","u":"%3Cinit%3E(org.apache.kafka.connect.data.Schema.Type,boolean,java.lang.Object,java.lang.String,java.lang.Integer,java.lang.String)"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"ConnectSchema(Schema.Type, boolean, Object, String, Integer, String, Map, List, Schema, Schema)","u":"%3Cinit%3E(org.apache.kafka.connect.data.Schema.Type,boolean,java.lang.Object,java.lang.String,java.lang.Integer,java.lang.String,java.util.Map,java.util.List,org.apache.kafka.connect.data.Schema,org.apache.kafka.connect.data.Schema)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Percentiles.BucketSizing","l":"CONSTANT"},{"p":"org.apache.kafka.common.metrics.stats","c":"Histogram.ConstantBinScheme","l":"ConstantBinScheme(int, double, double)","u":"%3Cinit%3E(int,double,double)"},{"p":"org.apache.kafka.clients.consumer","c":"GroupProtocol","l":"CONSUMER"},{"p":"org.apache.kafka.common","c":"GroupType","l":"CONSUMER"},{"p":"org.apache.kafka.connect.connector.policy","c":"ConnectorClientConfigRequest.ClientType","l":"CONSUMER"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClientConfig","l":"CONSUMER_CLIENT_PREFIX"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"CONSUMER_GROUP_ID_KEY"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"CONSUMER_PREFIX"},{"p":"org.apache.kafka.streams","c":"ThreadMetadata","l":"consumerClientId()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsState","l":"consumerClientIds()"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClientConfig","l":"consumerConfig()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"ConsumerConfig(Map)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"ConsumerConfig(Properties)","u":"%3Cinit%3E(java.util.Properties)"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupDescription","l":"ConsumerGroupDescription(String, boolean, Collection, String, ConsumerGroupState, Node)","u":"%3Cinit%3E(java.lang.String,boolean,java.util.Collection,java.lang.String,org.apache.kafka.common.ConsumerGroupState,org.apache.kafka.common.Node)"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupDescription","l":"ConsumerGroupDescription(String, boolean, Collection, String, ConsumerGroupState, Node, Set)","u":"%3Cinit%3E(java.lang.String,boolean,java.util.Collection,java.lang.String,org.apache.kafka.common.ConsumerGroupState,org.apache.kafka.common.Node,java.util.Set)"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupDescription","l":"ConsumerGroupDescription(String, boolean, Collection, String, GroupType, ConsumerGroupState, Node, Set)","u":"%3Cinit%3E(java.lang.String,boolean,java.util.Collection,java.lang.String,org.apache.kafka.common.GroupType,org.apache.kafka.common.ConsumerGroupState,org.apache.kafka.common.Node,java.util.Set)"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupDescription","l":"ConsumerGroupDescription(String, boolean, Collection, String, GroupType, GroupState, Node, Set, Optional, Optional)","u":"%3Cinit%3E(java.lang.String,boolean,java.util.Collection,java.lang.String,org.apache.kafka.common.GroupType,org.apache.kafka.common.GroupState,org.apache.kafka.common.Node,java.util.Set,java.util.Optional,java.util.Optional)"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"consumerGroupId()"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupListing","l":"ConsumerGroupListing(String, boolean)","u":"%3Cinit%3E(java.lang.String,boolean)"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupListing","l":"ConsumerGroupListing(String, boolean, Optional)","u":"%3Cinit%3E(java.lang.String,boolean,java.util.Optional)"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupListing","l":"ConsumerGroupListing(String, boolean, Optional, Optional)","u":"%3Cinit%3E(java.lang.String,boolean,java.util.Optional,java.util.Optional)"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupListing","l":"ConsumerGroupListing(String, Optional, boolean)","u":"%3Cinit%3E(java.lang.String,java.util.Optional,boolean)"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupListing","l":"ConsumerGroupListing(String, Optional, Optional, boolean)","u":"%3Cinit%3E(java.lang.String,java.util.Optional,java.util.Optional,boolean)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerGroupMetadata","l":"ConsumerGroupMetadata(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerGroupMetadata","l":"ConsumerGroupMetadata(String, int, String, Optional)","u":"%3Cinit%3E(java.lang.String,int,java.lang.String,java.util.Optional)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"consumerGroupOffsetsHistory()"},{"p":"org.apache.kafka.clients.admin","c":"MemberDescription","l":"consumerId()"},{"p":"org.apache.kafka.clients.admin","c":"ShareMemberDescription","l":"consumerId()"},{"p":"org.apache.kafka.streams","c":"ClientInstanceIds","l":"consumerInstanceIds()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"consumerPrefix(String)","u":"consumerPrefix(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecord","l":"ConsumerRecord(String, int, long, K, V)","u":"%3Cinit%3E(java.lang.String,int,long,K,V)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecord","l":"ConsumerRecord(String, int, long, long, TimestampType, int, int, K, V, Headers, Optional)","u":"%3Cinit%3E(java.lang.String,int,long,long,org.apache.kafka.common.record.TimestampType,int,int,K,V,org.apache.kafka.common.header.Headers,java.util.Optional)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecord","l":"ConsumerRecord(String, int, long, long, TimestampType, int, int, K, V, Headers, Optional, Optional)","u":"%3Cinit%3E(java.lang.String,int,long,long,org.apache.kafka.common.record.TimestampType,int,int,K,V,org.apache.kafka.common.header.Headers,java.util.Optional,java.util.Optional)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecords","l":"ConsumerRecords(Map>>)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecords","l":"ConsumerRecords(Map>>, Map)","u":"%3Cinit%3E(java.util.Map,java.util.Map)"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaFilter","l":"contains(Collection)","u":"contains(java.util.Collection)"},{"p":"org.apache.kafka.common.metrics","c":"JmxReporter","l":"containsMbean(String)","u":"containsMbean(java.lang.String)"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaFilter","l":"containsOnly(Collection)","u":"containsOnly(java.util.Collection)"},{"p":"org.apache.kafka.server.telemetry","c":"ClientTelemetryPayload","l":"contentType()"},{"p":"org.apache.kafka.common.metrics","c":"JmxReporter","l":"contextChange(MetricsContext)","u":"contextChange(org.apache.kafka.common.metrics.MetricsContext)"},{"p":"org.apache.kafka.common.metrics","c":"MetricsReporter","l":"contextChange(MetricsContext)","u":"contextChange(org.apache.kafka.common.metrics.MetricsContext)"},{"p":"org.apache.kafka.common.metrics","c":"KafkaMetricsContext","l":"contextLabels()"},{"p":"org.apache.kafka.common.metrics","c":"MetricsContext","l":"contextLabels()"},{"p":"org.apache.kafka.streams.errors","c":"DeserializationExceptionHandler.DeserializationHandlerResponse","l":"CONTINUE"},{"p":"org.apache.kafka.streams.errors","c":"ProcessingExceptionHandler.ProcessingHandlerResponse","l":"CONTINUE"},{"p":"org.apache.kafka.streams.errors","c":"ProductionExceptionHandler.ProductionExceptionHandlerResponse","l":"CONTINUE"},{"p":"org.apache.kafka.clients.admin","c":"EndpointType","l":"CONTROLLER"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaType","l":"CONTROLLER_MUTATION"},{"p":"org.apache.kafka.clients.admin","c":"DescribeClusterResult","l":"controller()"},{"p":"org.apache.kafka.common","c":"Cluster","l":"controller()"},{"p":"org.apache.kafka.common.errors","c":"ControllerMovedException","l":"ControllerMovedException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"ControllerMovedException","l":"ControllerMovedException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.connect.data","c":"Values","l":"convertToBoolean(Schema, Object)","u":"convertToBoolean(org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.data","c":"Values","l":"convertToByte(Schema, Object)","u":"convertToByte(org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.data","c":"Values","l":"convertToDate(Schema, Object)","u":"convertToDate(org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.data","c":"Values","l":"convertToDecimal(Schema, Object, int)","u":"convertToDecimal(org.apache.kafka.connect.data.Schema,java.lang.Object,int)"},{"p":"org.apache.kafka.connect.data","c":"Values","l":"convertToDouble(Schema, Object)","u":"convertToDouble(org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.data","c":"Values","l":"convertToFloat(Schema, Object)","u":"convertToFloat(org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.data","c":"Values","l":"convertToInteger(Schema, Object)","u":"convertToInteger(org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.data","c":"Values","l":"convertToList(Schema, Object)","u":"convertToList(org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.data","c":"Values","l":"convertToLong(Schema, Object)","u":"convertToLong(org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.data","c":"Values","l":"convertToMap(Schema, Object)","u":"convertToMap(org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.data","c":"Values","l":"convertToShort(Schema, Object)","u":"convertToShort(org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"convertToString(Object, ConfigDef.Type)","u":"convertToString(java.lang.Object,org.apache.kafka.common.config.ConfigDef.Type)"},{"p":"org.apache.kafka.connect.data","c":"Values","l":"convertToString(Schema, Object)","u":"convertToString(org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"convertToStringMapWithPasswordValues(Map)","u":"convertToStringMapWithPasswordValues(java.util.Map)"},{"p":"org.apache.kafka.connect.data","c":"Values","l":"convertToStruct(Schema, Object)","u":"convertToStruct(org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.data","c":"Values","l":"convertToTime(Schema, Object)","u":"convertToTime(org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.data","c":"Values","l":"convertToTimestamp(Schema, Object)","u":"convertToTimestamp(org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.streams.state","c":"TimestampedBytesStore","l":"convertToTimestampedFormat(byte[])"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.RebalanceProtocol","l":"COOPERATIVE"},{"p":"org.apache.kafka.clients.consumer","c":"CooperativeStickyAssignor","l":"COOPERATIVE_STICKY_ASSIGNOR_NAME"},{"p":"org.apache.kafka.clients.consumer","c":"CooperativeStickyAssignor","l":"CooperativeStickyAssignor()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"ClassicGroupDescription","l":"coordinator()"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupDescription","l":"coordinator()"},{"p":"org.apache.kafka.clients.admin","c":"ShareGroupDescription","l":"coordinator()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupDescription","l":"coordinator()"},{"p":"org.apache.kafka.clients.admin","c":"AbortTransactionSpec","l":"coordinatorEpoch()"},{"p":"org.apache.kafka.clients.admin","c":"ProducerState","l":"coordinatorEpoch()"},{"p":"org.apache.kafka.clients.admin","c":"TransactionDescription","l":"coordinatorId()"},{"p":"org.apache.kafka.common.errors","c":"CoordinatorLoadInProgressException","l":"CoordinatorLoadInProgressException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"CoordinatorLoadInProgressException","l":"CoordinatorLoadInProgressException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"CoordinatorNotAvailableException","l":"CoordinatorNotAvailableException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"CoordinatorNotAvailableException","l":"CoordinatorNotAvailableException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentState","l":"COPY_SEGMENT_FINISHED"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentState","l":"COPY_SEGMENT_STARTED"},{"p":"org.apache.kafka.streams.query","c":"Position","l":"copy()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageManager","l":"copyLogSegmentData(RemoteLogSegmentMetadata, LogSegmentData)","u":"copyLogSegmentData(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata,org.apache.kafka.server.log.remote.storage.LogSegmentData)"},{"p":"org.apache.kafka.server.authorizer","c":"AuthorizableRequestContext","l":"correlationId()"},{"p":"org.apache.kafka.streams.errors","c":"TaskCorruptedException","l":"corruptedTasks()"},{"p":"org.apache.kafka.common.errors","c":"CorruptRecordException","l":"CorruptRecordException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"CorruptRecordException","l":"CorruptRecordException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"CorruptRecordException","l":"CorruptRecordException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"CorruptRecordException","l":"CorruptRecordException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecords","l":"count()"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedStream","l":"count()"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedTable","l":"count()"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedKStream","l":"count()"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedKStream","l":"count()"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedStream","l":"count(Materialized>)","u":"count(org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedTable","l":"count(Materialized>)","u":"count(org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedKStream","l":"count(Materialized>)","u":"count(org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedKStream","l":"count(Materialized>)","u":"count(org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedStream","l":"count(Named)","u":"count(org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedTable","l":"count(Named)","u":"count(org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedKStream","l":"count(Named)","u":"count(org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedKStream","l":"count(Named)","u":"count(org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedStream","l":"count(Named, Materialized>)","u":"count(org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedTable","l":"count(Named, Materialized>)","u":"count(org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedKStream","l":"count(Named, Materialized>)","u":"count(org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedKStream","l":"count(Named, Materialized>)","u":"count(org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Histogram","l":"counts()"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"CREATE"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"CREATE_TOKENS"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"create(Map)","u":"create(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"AdminClient","l":"create(Map)","u":"create(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"create(Properties)","u":"create(java.util.Properties)"},{"p":"org.apache.kafka.clients.admin","c":"AdminClient","l":"create(Properties)","u":"create(java.util.Properties)"},{"p":"org.apache.kafka.streams.processor.api","c":"InternalFixedKeyRecordFactory","l":"create(Record)","u":"create(org.apache.kafka.streams.processor.api.Record)"},{"p":"org.apache.kafka.streams.state","c":"QueryableStoreType","l":"create(StateStoreProvider, String)","u":"create(org.apache.kafka.streams.state.internals.StateStoreProvider,java.lang.String)"},{"p":"org.apache.kafka.streams.state","c":"QueryableStoreTypes.KeyValueStoreType","l":"create(StateStoreProvider, String)","u":"create(org.apache.kafka.streams.state.internals.StateStoreProvider,java.lang.String)"},{"p":"org.apache.kafka.streams.state","c":"QueryableStoreTypes.SessionStoreType","l":"create(StateStoreProvider, String)","u":"create(org.apache.kafka.streams.state.internals.StateStoreProvider,java.lang.String)"},{"p":"org.apache.kafka.streams.state","c":"QueryableStoreTypes.WindowStoreType","l":"create(StateStoreProvider, String)","u":"create(org.apache.kafka.streams.state.internals.StateStoreProvider,java.lang.String)"},{"p":"org.apache.kafka.server.authorizer","c":"Authorizer","l":"createAcls(AuthorizableRequestContext, List)","u":"createAcls(org.apache.kafka.server.authorizer.AuthorizableRequestContext,java.util.List)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"createAcls(Collection)","u":"createAcls(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"createAcls(Collection, CreateAclsOptions)","u":"createAcls(java.util.Collection,org.apache.kafka.clients.admin.CreateAclsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"createAcls(Collection, CreateAclsOptions)","u":"createAcls(java.util.Collection,org.apache.kafka.clients.admin.CreateAclsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"createAcls(Collection, CreateAclsOptions)","u":"createAcls(java.util.Collection,org.apache.kafka.clients.admin.CreateAclsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"CreateAclsOptions","l":"CreateAclsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.security.auth","c":"SslEngineFactory","l":"createClientSslEngine(String, int, String)","u":"createClientSslEngine(java.lang.String,int,java.lang.String)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams.State","l":"CREATED"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"createDelegationToken()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"createDelegationToken(CreateDelegationTokenOptions)","u":"createDelegationToken(org.apache.kafka.clients.admin.CreateDelegationTokenOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"createDelegationToken(CreateDelegationTokenOptions)","u":"createDelegationToken(org.apache.kafka.clients.admin.CreateDelegationTokenOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"createDelegationToken(CreateDelegationTokenOptions)","u":"createDelegationToken(org.apache.kafka.clients.admin.CreateDelegationTokenOptions)"},{"p":"org.apache.kafka.clients.admin","c":"CreateDelegationTokenOptions","l":"CreateDelegationTokenOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams","c":"TopologyTestDriver","l":"createInputTopic(String, Serializer, Serializer)","u":"createInputTopic(java.lang.String,org.apache.kafka.common.serialization.Serializer,org.apache.kafka.common.serialization.Serializer)"},{"p":"org.apache.kafka.streams","c":"TopologyTestDriver","l":"createInputTopic(String, Serializer, Serializer, Instant, Duration)","u":"createInputTopic(java.lang.String,org.apache.kafka.common.serialization.Serializer,org.apache.kafka.common.serialization.Serializer,java.time.Instant,java.time.Duration)"},{"p":"org.apache.kafka.streams","c":"TopologyTestDriver","l":"createOutputTopic(String, Deserializer, Deserializer)","u":"createOutputTopic(java.lang.String,org.apache.kafka.common.serialization.Deserializer,org.apache.kafka.common.serialization.Deserializer)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"createPartitions(Map)","u":"createPartitions(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"createPartitions(Map, CreatePartitionsOptions)","u":"createPartitions(java.util.Map,org.apache.kafka.clients.admin.CreatePartitionsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"createPartitions(Map, CreatePartitionsOptions)","u":"createPartitions(java.util.Map,org.apache.kafka.clients.admin.CreatePartitionsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"createPartitions(Map, CreatePartitionsOptions)","u":"createPartitions(java.util.Map,org.apache.kafka.clients.admin.CreatePartitionsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"CreatePartitionsOptions","l":"CreatePartitionsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.security.auth","c":"SslEngineFactory","l":"createServerSslEngine(String, int)","u":"createServerSslEngine(java.lang.String,int)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"createTopics(Collection)","u":"createTopics(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"createTopics(Collection, CreateTopicsOptions)","u":"createTopics(java.util.Collection,org.apache.kafka.clients.admin.CreateTopicsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"createTopics(Collection, CreateTopicsOptions)","u":"createTopics(java.util.Collection,org.apache.kafka.clients.admin.CreateTopicsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"createTopics(Collection, CreateTopicsOptions)","u":"createTopics(java.util.Collection,org.apache.kafka.clients.admin.CreateTopicsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"CreateTopicsOptions","l":"CreateTopicsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata","l":"createWithUpdates(RemoteLogSegmentMetadataUpdate)","u":"createWithUpdates(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate)"},{"p":"org.apache.kafka.clients.admin","c":"UserScramCredentialUpsertion","l":"credentialInfo()"},{"p":"org.apache.kafka.clients.admin","c":"UserScramCredentialsDescription","l":"credentialInfos()"},{"p":"org.apache.kafka.common.metrics.stats","c":"CumulativeCount","l":"CumulativeCount()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.metrics.stats","c":"CumulativeSum","l":"CumulativeSum()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.metrics.stats","c":"CumulativeSum","l":"CumulativeSum(double)","u":"%3Cinit%3E(double)"},{"p":"org.apache.kafka.common.metrics.stats","c":"SampledStat","l":"current(long)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"currentLag(TopicPartition)","u":"currentLag(org.apache.kafka.common.TopicPartition)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"currentLag(TopicPartition)","u":"currentLag(org.apache.kafka.common.TopicPartition)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"currentLag(TopicPartition)","u":"currentLag(org.apache.kafka.common.TopicPartition)"},{"p":"org.apache.kafka.streams","c":"LagInfo","l":"currentOffsetPosition()"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"currentStreamTimeMs()"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessingContext","l":"currentStreamTimeMs()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"currentStreamTimeMs()"},{"p":"org.apache.kafka.streams.processor","c":"ProcessorContext","l":"currentStreamTimeMs()"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"currentSystemTimeMs()"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessingContext","l":"currentSystemTimeMs()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"currentSystemTimeMs()"},{"p":"org.apache.kafka.streams.processor","c":"ProcessorContext","l":"currentSystemTimeMs()"},{"p":"org.apache.kafka.clients.admin","c":"ProducerState","l":"currentTransactionStartOffset()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata","l":"customMetadata()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadataUpdate","l":"customMetadata()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata.CustomMetadata","l":"CustomMetadata(byte[])","u":"%3Cinit%3E(byte[])"},{"p":"org.apache.kafka.common.config","c":"ConfigData","l":"data()"},{"p":"org.apache.kafka.common.config","c":"ConfigTransformerResult","l":"data()"},{"p":"org.apache.kafka.server.telemetry","c":"ClientTelemetryPayload","l":"data()"},{"p":"org.apache.kafka.connect.errors","c":"DataException","l":"DataException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.connect.errors","c":"DataException","l":"DataException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.connect.errors","c":"DataException","l":"DataException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.connect.data","c":"Date","l":"Date()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.data","c":"Values","l":"dateFormatFor(Date)","u":"dateFormatFor(java.util.Date)"},{"p":"org.apache.kafka.common","c":"ClassicGroupState","l":"DEAD"},{"p":"org.apache.kafka.common","c":"ConsumerGroupState","l":"DEAD"},{"p":"org.apache.kafka.common","c":"GroupState","l":"DEAD"},{"p":"org.apache.kafka.common.metrics","c":"Sensor.RecordingLevel","l":"DEBUG"},{"p":"org.apache.kafka.common.config","c":"LogLevelConfig","l":"DEBUG_LOG_LEVEL"},{"p":"org.apache.kafka.connect.data","c":"Decimal","l":"Decimal()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.consumer","c":"CloseOptions.GroupMembershipOperation","l":"DEFAULT"},{"p":"org.apache.kafka.connect.source","c":"SourceTask.TransactionBoundary","l":"DEFAULT"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"DEFAULT_ALLOW_AUTO_CREATE_TOPICS"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"DEFAULT_API_TIMEOUT_MS_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"DEFAULT_API_TIMEOUT_MS_CONFIG"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaEntity.ConfigEntityType","l":"DEFAULT_CLIENT_ID"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"DEFAULT_CLIENT_RACK"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"DEFAULT_CLIENT_SUPPLIER_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"DEFAULT_CLIENT_SUPPLIER_DOC"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigSource","l":"DEFAULT_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_DOC"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"DEFAULT_DSL_STORE"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"DEFAULT_DSL_STORE_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"DEFAULT_DSL_STORE_DOC"},{"p":"org.apache.kafka.common.metrics","c":"JmxReporter","l":"DEFAULT_EXCLUDE"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"DEFAULT_EXCLUDE_INTERNAL_TOPICS"},{"p":"org.apache.kafka.connect.tools","c":"MockConnector","l":"DEFAULT_FAILURE_DELAY_MS"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"DEFAULT_FETCH_MAX_BYTES"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"DEFAULT_FETCH_MAX_WAIT_MS"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"DEFAULT_FETCH_MIN_BYTES"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"DEFAULT_GROUP_PROTOCOL"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"DEFAULT_GROUP_PROTOCOL"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"DEFAULT_GROUP_REMOTE_ASSIGNOR"},{"p":"org.apache.kafka.common.metrics","c":"JmxReporter","l":"DEFAULT_INCLUDE"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"DEFAULT_ISOLATION_LEVEL"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_KERBEROS_KINIT_CMD"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_KERBEROS_MIN_TIME_BEFORE_RELOGIN"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_KERBEROS_TICKET_RENEW_JITTER"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_KERBEROS_TICKET_RENEW_WINDOW_FACTOR"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"DEFAULT_KEY_SERDE_CLASS_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_LOGIN_REFRESH_BUFFER_SECONDS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_LOGIN_REFRESH_MIN_PERIOD_SECONDS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_LOGIN_REFRESH_WINDOW_FACTOR"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_LOGIN_REFRESH_WINDOW_JITTER"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"DEFAULT_MAX_PARTITION_FETCH_BYTES"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"DEFAULT_MAX_POLL_RECORDS"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"DEFAULT_METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"DEFAULT_METADATA_RECOVERY_STRATEGY"},{"p":"org.apache.kafka.common.metrics","c":"MetricConfig","l":"DEFAULT_NUM_SAMPLES"},{"p":"org.apache.kafka.common.config","c":"ConfigTransformer","l":"DEFAULT_PATTERN"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"DEFAULT_PRODUCTION_EXCEPTION_HANDLER_CLASS_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_SASL_LOGIN_RETRY_BACKOFF_MAX_MS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_SASL_LOGIN_RETRY_BACKOFF_MS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_SASL_MECHANISM"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_SASL_OAUTHBEARER_ASSERTION_ALGORITHM"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_SASL_OAUTHBEARER_HEADER_URLENCODE"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_SASL_OAUTHBEARER_SCOPE_CLAIM_NAME"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"DEFAULT_SASL_OAUTHBEARER_SUB_CLAIM_NAME"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"DEFAULT_SECURITY_PROTOCOL"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"DEFAULT_SSL_ENABLED_PROTOCOLS"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"DEFAULT_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"DEFAULT_SSL_KEYMANGER_ALGORITHM"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"DEFAULT_SSL_KEYSTORE_TYPE"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"DEFAULT_SSL_PROTOCOL"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"DEFAULT_SSL_TRUSTMANAGER_ALGORITHM"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"DEFAULT_SSL_TRUSTSTORE_TYPE"},{"p":"org.apache.kafka.streams.processor.assignment.assignors","c":"StickyTaskAssignor","l":"DEFAULT_STICKY_NON_OVERLAP_COST"},{"p":"org.apache.kafka.streams.processor.assignment.assignors","c":"StickyTaskAssignor","l":"DEFAULT_STICKY_TRAFFIC_COST"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_DOC"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaEntity.ConfigEntityType","l":"DEFAULT_USER"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"DEFAULT_VALUE_SERDE_CLASS_CONFIG"},{"p":"org.apache.kafka.streams.kstream","c":"BranchedKStream","l":"defaultBranch()"},{"p":"org.apache.kafka.streams.kstream","c":"BranchedKStream","l":"defaultBranch(Branched)","u":"defaultBranch(org.apache.kafka.streams.kstream.Branched)"},{"p":"org.apache.kafka.tools.api","c":"DefaultDecoder","l":"DefaultDecoder()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"defaultDeserializationExceptionHandler()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"DefaultJwtRetriever","l":"DefaultJwtRetriever()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"DefaultJwtValidator","l":"DefaultJwtValidator()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"DefaultJwtValidator","l":"DefaultJwtValidator(CloseableVerificationKeyResolver)","u":"%3Cinit%3E(org.apache.kafka.common.security.oauthbearer.internals.secured.CloseableVerificationKeyResolver)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"defaultKeySerde()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"defaultProductionExceptionHandler()"},{"p":"org.apache.kafka.streams.errors","c":"DefaultProductionExceptionHandler","l":"DefaultProductionExceptionHandler()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.mirror","c":"DefaultReplicationPolicy","l":"DefaultReplicationPolicy()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignmentUtils","l":"defaultStandbyTaskAssignment(ApplicationState, Map)","u":"defaultStandbyTaskAssignment(org.apache.kafka.streams.processor.assignment.ApplicationState,java.util.Map)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"defaultTimestampExtractor()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ConfigKey","l":"defaultValue"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"defaultValue()"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"defaultValue()"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"defaultValue()"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"defaultValue(Object)","u":"defaultValue(java.lang.Object)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"defaultValues()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"defaultValueSerde()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"define(ConfigDef.ConfigKey)","u":"define(org.apache.kafka.common.config.ConfigDef.ConfigKey)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"define(String, ConfigDef.Type, ConfigDef.Importance, String)","u":"define(java.lang.String,org.apache.kafka.common.config.ConfigDef.Type,org.apache.kafka.common.config.ConfigDef.Importance,java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"define(String, ConfigDef.Type, ConfigDef.Importance, String, String, int, ConfigDef.Width, String)","u":"define(java.lang.String,org.apache.kafka.common.config.ConfigDef.Type,org.apache.kafka.common.config.ConfigDef.Importance,java.lang.String,java.lang.String,int,org.apache.kafka.common.config.ConfigDef.Width,java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"define(String, ConfigDef.Type, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, ConfigDef.Recommender)","u":"define(java.lang.String,org.apache.kafka.common.config.ConfigDef.Type,org.apache.kafka.common.config.ConfigDef.Importance,java.lang.String,java.lang.String,int,org.apache.kafka.common.config.ConfigDef.Width,java.lang.String,org.apache.kafka.common.config.ConfigDef.Recommender)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"define(String, ConfigDef.Type, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, List)","u":"define(java.lang.String,org.apache.kafka.common.config.ConfigDef.Type,org.apache.kafka.common.config.ConfigDef.Importance,java.lang.String,java.lang.String,int,org.apache.kafka.common.config.ConfigDef.Width,java.lang.String,java.util.List)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"define(String, ConfigDef.Type, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, List, ConfigDef.Recommender)","u":"define(java.lang.String,org.apache.kafka.common.config.ConfigDef.Type,org.apache.kafka.common.config.ConfigDef.Importance,java.lang.String,java.lang.String,int,org.apache.kafka.common.config.ConfigDef.Width,java.lang.String,java.util.List,org.apache.kafka.common.config.ConfigDef.Recommender)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"define(String, ConfigDef.Type, Object, ConfigDef.Importance, String)","u":"define(java.lang.String,org.apache.kafka.common.config.ConfigDef.Type,java.lang.Object,org.apache.kafka.common.config.ConfigDef.Importance,java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"define(String, ConfigDef.Type, Object, ConfigDef.Importance, String, String)","u":"define(java.lang.String,org.apache.kafka.common.config.ConfigDef.Type,java.lang.Object,org.apache.kafka.common.config.ConfigDef.Importance,java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"define(String, ConfigDef.Type, Object, ConfigDef.Importance, String, String, int, ConfigDef.Width, String)","u":"define(java.lang.String,org.apache.kafka.common.config.ConfigDef.Type,java.lang.Object,org.apache.kafka.common.config.ConfigDef.Importance,java.lang.String,java.lang.String,int,org.apache.kafka.common.config.ConfigDef.Width,java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"define(String, ConfigDef.Type, Object, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, ConfigDef.Recommender)","u":"define(java.lang.String,org.apache.kafka.common.config.ConfigDef.Type,java.lang.Object,org.apache.kafka.common.config.ConfigDef.Importance,java.lang.String,java.lang.String,int,org.apache.kafka.common.config.ConfigDef.Width,java.lang.String,org.apache.kafka.common.config.ConfigDef.Recommender)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"define(String, ConfigDef.Type, Object, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, List)","u":"define(java.lang.String,org.apache.kafka.common.config.ConfigDef.Type,java.lang.Object,org.apache.kafka.common.config.ConfigDef.Importance,java.lang.String,java.lang.String,int,org.apache.kafka.common.config.ConfigDef.Width,java.lang.String,java.util.List)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"define(String, ConfigDef.Type, Object, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, List, ConfigDef.Recommender)","u":"define(java.lang.String,org.apache.kafka.common.config.ConfigDef.Type,java.lang.Object,org.apache.kafka.common.config.ConfigDef.Importance,java.lang.String,java.lang.String,int,org.apache.kafka.common.config.ConfigDef.Width,java.lang.String,java.util.List,org.apache.kafka.common.config.ConfigDef.Recommender)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"define(String, ConfigDef.Type, Object, ConfigDef.Validator, ConfigDef.Importance, String)","u":"define(java.lang.String,org.apache.kafka.common.config.ConfigDef.Type,java.lang.Object,org.apache.kafka.common.config.ConfigDef.Validator,org.apache.kafka.common.config.ConfigDef.Importance,java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"define(String, ConfigDef.Type, Object, ConfigDef.Validator, ConfigDef.Importance, String, String, int, ConfigDef.Width, String)","u":"define(java.lang.String,org.apache.kafka.common.config.ConfigDef.Type,java.lang.Object,org.apache.kafka.common.config.ConfigDef.Validator,org.apache.kafka.common.config.ConfigDef.Importance,java.lang.String,java.lang.String,int,org.apache.kafka.common.config.ConfigDef.Width,java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"define(String, ConfigDef.Type, Object, ConfigDef.Validator, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, ConfigDef.Recommender)","u":"define(java.lang.String,org.apache.kafka.common.config.ConfigDef.Type,java.lang.Object,org.apache.kafka.common.config.ConfigDef.Validator,org.apache.kafka.common.config.ConfigDef.Importance,java.lang.String,java.lang.String,int,org.apache.kafka.common.config.ConfigDef.Width,java.lang.String,org.apache.kafka.common.config.ConfigDef.Recommender)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"define(String, ConfigDef.Type, Object, ConfigDef.Validator, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, List)","u":"define(java.lang.String,org.apache.kafka.common.config.ConfigDef.Type,java.lang.Object,org.apache.kafka.common.config.ConfigDef.Validator,org.apache.kafka.common.config.ConfigDef.Importance,java.lang.String,java.lang.String,int,org.apache.kafka.common.config.ConfigDef.Width,java.lang.String,java.util.List)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"define(String, ConfigDef.Type, Object, ConfigDef.Validator, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, List, ConfigDef.Recommender)","u":"define(java.lang.String,org.apache.kafka.common.config.ConfigDef.Type,java.lang.Object,org.apache.kafka.common.config.ConfigDef.Validator,org.apache.kafka.common.config.ConfigDef.Importance,java.lang.String,java.lang.String,int,org.apache.kafka.common.config.ConfigDef.Width,java.lang.String,java.util.List,org.apache.kafka.common.config.ConfigDef.Recommender)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"define(String, ConfigDef.Type, Object, ConfigDef.Validator, ConfigDef.Importance, String, String, int, ConfigDef.Width, String, List, ConfigDef.Recommender, String)","u":"define(java.lang.String,org.apache.kafka.common.config.ConfigDef.Type,java.lang.Object,org.apache.kafka.common.config.ConfigDef.Validator,org.apache.kafka.common.config.ConfigDef.Importance,java.lang.String,java.lang.String,int,org.apache.kafka.common.config.ConfigDef.Width,java.lang.String,java.util.List,org.apache.kafka.common.config.ConfigDef.Recommender,java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"defineInternal(String, ConfigDef.Type, Object, ConfigDef.Importance)","u":"defineInternal(java.lang.String,org.apache.kafka.common.config.ConfigDef.Type,java.lang.Object,org.apache.kafka.common.config.ConfigDef.Importance)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"defineInternal(String, ConfigDef.Type, Object, ConfigDef.Validator, ConfigDef.Importance, String)","u":"defineInternal(java.lang.String,org.apache.kafka.common.config.ConfigDef.Type,java.lang.Object,org.apache.kafka.common.config.ConfigDef.Validator,org.apache.kafka.common.config.ConfigDef.Importance,java.lang.String)"},{"p":"org.apache.kafka.connect.tools","c":"MockConnector","l":"DELAY_MS_KEY"},{"p":"org.apache.kafka.common.resource","c":"ResourceType","l":"DELEGATION_TOKEN"},{"p":"org.apache.kafka.clients.admin","c":"CreateDelegationTokenResult","l":"delegationToken()"},{"p":"org.apache.kafka.common.security.token.delegation","c":"DelegationToken","l":"DelegationToken(TokenInformation, byte[])","u":"%3Cinit%3E(org.apache.kafka.common.security.token.delegation.TokenInformation,byte[])"},{"p":"org.apache.kafka.common.errors","c":"DelegationTokenAuthorizationException","l":"DelegationTokenAuthorizationException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"DelegationTokenAuthorizationException","l":"DelegationTokenAuthorizationException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"DelegationTokenDisabledException","l":"DelegationTokenDisabledException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"DelegationTokenDisabledException","l":"DelegationTokenDisabledException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"DelegationTokenExpiredException","l":"DelegationTokenExpiredException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"DelegationTokenExpiredException","l":"DelegationTokenExpiredException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"DelegationTokenNotFoundException","l":"DelegationTokenNotFoundException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"DelegationTokenNotFoundException","l":"DelegationTokenNotFoundException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"DelegationTokenOwnerMismatchException","l":"DelegationTokenOwnerMismatchException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"DelegationTokenOwnerMismatchException","l":"DelegationTokenOwnerMismatchException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeDelegationTokenResult","l":"delegationTokens()"},{"p":"org.apache.kafka.clients.admin","c":"AlterConfigOp.OpType","l":"DELETE"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"DELETE"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemotePartitionDeleteState","l":"DELETE_PARTITION_FINISHED"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemotePartitionDeleteState","l":"DELETE_PARTITION_MARKED"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemotePartitionDeleteState","l":"DELETE_PARTITION_STARTED"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"DELETE_RETENTION_MS_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"DELETE_RETENTION_MS_DOC"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentState","l":"DELETE_SEGMENT_FINISHED"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentState","l":"DELETE_SEGMENT_STARTED"},{"p":"org.apache.kafka.streams.state","c":"VersionedBytesStore","l":"delete(Bytes, long)","u":"delete(org.apache.kafka.common.utils.Bytes,long)"},{"p":"org.apache.kafka.streams.state","c":"KeyValueStore","l":"delete(K)"},{"p":"org.apache.kafka.streams.state","c":"VersionedKeyValueStore","l":"delete(K, long)","u":"delete(K,long)"},{"p":"org.apache.kafka.server.authorizer","c":"Authorizer","l":"deleteAcls(AuthorizableRequestContext, List)","u":"deleteAcls(org.apache.kafka.server.authorizer.AuthorizableRequestContext,java.util.List)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"deleteAcls(Collection)","u":"deleteAcls(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"deleteAcls(Collection, DeleteAclsOptions)","u":"deleteAcls(java.util.Collection,org.apache.kafka.clients.admin.DeleteAclsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"deleteAcls(Collection, DeleteAclsOptions)","u":"deleteAcls(java.util.Collection,org.apache.kafka.clients.admin.DeleteAclsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"deleteAcls(Collection, DeleteAclsOptions)","u":"deleteAcls(java.util.Collection,org.apache.kafka.clients.admin.DeleteAclsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DeleteAclsOptions","l":"DeleteAclsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"deleteConsumerGroupOffsets(String, Set)","u":"deleteConsumerGroupOffsets(java.lang.String,java.util.Set)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"deleteConsumerGroupOffsets(String, Set, DeleteConsumerGroupOffsetsOptions)","u":"deleteConsumerGroupOffsets(java.lang.String,java.util.Set,org.apache.kafka.clients.admin.DeleteConsumerGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"deleteConsumerGroupOffsets(String, Set, DeleteConsumerGroupOffsetsOptions)","u":"deleteConsumerGroupOffsets(java.lang.String,java.util.Set,org.apache.kafka.clients.admin.DeleteConsumerGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"deleteConsumerGroupOffsets(String, Set, DeleteConsumerGroupOffsetsOptions)","u":"deleteConsumerGroupOffsets(java.lang.String,java.util.Set,org.apache.kafka.clients.admin.DeleteConsumerGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DeleteConsumerGroupOffsetsOptions","l":"DeleteConsumerGroupOffsetsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"deleteConsumerGroups(Collection)","u":"deleteConsumerGroups(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"deleteConsumerGroups(Collection, DeleteConsumerGroupsOptions)","u":"deleteConsumerGroups(java.util.Collection,org.apache.kafka.clients.admin.DeleteConsumerGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"deleteConsumerGroups(Collection, DeleteConsumerGroupsOptions)","u":"deleteConsumerGroups(java.util.Collection,org.apache.kafka.clients.admin.DeleteConsumerGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"deleteConsumerGroups(Collection, DeleteConsumerGroupsOptions)","u":"deleteConsumerGroups(java.util.Collection,org.apache.kafka.clients.admin.DeleteConsumerGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DeleteConsumerGroupsOptions","l":"DeleteConsumerGroupsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"DeleteConsumerGroupsResult","l":"deletedGroups()"},{"p":"org.apache.kafka.clients.admin","c":"DeleteShareGroupsResult","l":"deletedGroups()"},{"p":"org.apache.kafka.clients.admin","c":"DeleteStreamsGroupsResult","l":"deletedGroups()"},{"p":"org.apache.kafka.clients.admin","c":"DeletedRecords","l":"DeletedRecords(long)","u":"%3Cinit%3E(long)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageManager","l":"deleteLogSegmentData(RemoteLogSegmentMetadata)","u":"deleteLogSegmentData(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"deleteRecords(Map)","u":"deleteRecords(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"deleteRecords(Map, DeleteRecordsOptions)","u":"deleteRecords(java.util.Map,org.apache.kafka.clients.admin.DeleteRecordsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"deleteRecords(Map, DeleteRecordsOptions)","u":"deleteRecords(java.util.Map,org.apache.kafka.clients.admin.DeleteRecordsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"deleteRecords(Map, DeleteRecordsOptions)","u":"deleteRecords(java.util.Map,org.apache.kafka.clients.admin.DeleteRecordsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DeleteRecordsOptions","l":"DeleteRecordsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"DeleteRecordsResult","l":"DeleteRecordsResult(Map>)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"deleteShareGroupOffsets(String, Set)","u":"deleteShareGroupOffsets(java.lang.String,java.util.Set)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"deleteShareGroupOffsets(String, Set, DeleteShareGroupOffsetsOptions)","u":"deleteShareGroupOffsets(java.lang.String,java.util.Set,org.apache.kafka.clients.admin.DeleteShareGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"deleteShareGroupOffsets(String, Set, DeleteShareGroupOffsetsOptions)","u":"deleteShareGroupOffsets(java.lang.String,java.util.Set,org.apache.kafka.clients.admin.DeleteShareGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"deleteShareGroupOffsets(String, Set, DeleteShareGroupOffsetsOptions)","u":"deleteShareGroupOffsets(java.lang.String,java.util.Set,org.apache.kafka.clients.admin.DeleteShareGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DeleteShareGroupOffsetsOptions","l":"DeleteShareGroupOffsetsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"deleteShareGroups(Collection)","u":"deleteShareGroups(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"deleteShareGroups(Collection, DeleteShareGroupsOptions)","u":"deleteShareGroups(java.util.Collection,org.apache.kafka.clients.admin.DeleteShareGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"deleteShareGroups(Collection, DeleteShareGroupsOptions)","u":"deleteShareGroups(java.util.Collection,org.apache.kafka.clients.admin.DeleteShareGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"deleteShareGroups(Collection, DeleteShareGroupsOptions)","u":"deleteShareGroups(java.util.Collection,org.apache.kafka.clients.admin.DeleteShareGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DeleteShareGroupsOptions","l":"DeleteShareGroupsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"deleteStreamsGroupOffsets(String, Set)","u":"deleteStreamsGroupOffsets(java.lang.String,java.util.Set)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"deleteStreamsGroupOffsets(String, Set, DeleteStreamsGroupOffsetsOptions)","u":"deleteStreamsGroupOffsets(java.lang.String,java.util.Set,org.apache.kafka.clients.admin.DeleteStreamsGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"deleteStreamsGroupOffsets(String, Set, DeleteStreamsGroupOffsetsOptions)","u":"deleteStreamsGroupOffsets(java.lang.String,java.util.Set,org.apache.kafka.clients.admin.DeleteStreamsGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"deleteStreamsGroupOffsets(String, Set, DeleteStreamsGroupOffsetsOptions)","u":"deleteStreamsGroupOffsets(java.lang.String,java.util.Set,org.apache.kafka.clients.admin.DeleteStreamsGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DeleteStreamsGroupOffsetsOptions","l":"DeleteStreamsGroupOffsetsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"deleteStreamsGroups(Collection)","u":"deleteStreamsGroups(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"deleteStreamsGroups(Collection, DeleteStreamsGroupsOptions)","u":"deleteStreamsGroups(java.util.Collection,org.apache.kafka.clients.admin.DeleteStreamsGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"deleteStreamsGroups(Collection, DeleteStreamsGroupsOptions)","u":"deleteStreamsGroups(java.util.Collection,org.apache.kafka.clients.admin.DeleteStreamsGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"deleteStreamsGroups(Collection, DeleteStreamsGroupsOptions)","u":"deleteStreamsGroups(java.util.Collection,org.apache.kafka.clients.admin.DeleteStreamsGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DeleteStreamsGroupsOptions","l":"DeleteStreamsGroupsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"deleteTopics(Collection)","u":"deleteTopics(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"deleteTopics(Collection, DeleteTopicsOptions)","u":"deleteTopics(java.util.Collection,org.apache.kafka.clients.admin.DeleteTopicsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"deleteTopics(TopicCollection)","u":"deleteTopics(org.apache.kafka.common.TopicCollection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"deleteTopics(TopicCollection, DeleteTopicsOptions)","u":"deleteTopics(org.apache.kafka.common.TopicCollection,org.apache.kafka.clients.admin.DeleteTopicsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"deleteTopics(TopicCollection, DeleteTopicsOptions)","u":"deleteTopics(org.apache.kafka.common.TopicCollection,org.apache.kafka.clients.admin.DeleteTopicsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"deleteTopics(TopicCollection, DeleteTopicsOptions)","u":"deleteTopics(org.apache.kafka.common.TopicCollection,org.apache.kafka.clients.admin.DeleteTopicsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DeleteTopicsOptions","l":"DeleteTopicsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"DELIVERY_TIMEOUT_MS_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecord","l":"deliveryCount()"},{"p":"org.apache.kafka.server.authorizer","c":"AuthorizationResult","l":"DENIED"},{"p":"org.apache.kafka.common.acl","c":"AclPermissionType","l":"DENY"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ConfigKey","l":"dependents"},{"p":"org.apache.kafka.streams.query","c":"ResultOrder","l":"DESCENDING"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"DESCRIBE"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"DESCRIBE_CONFIGS"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"DESCRIBE_TOKENS"},{"p":"org.apache.kafka.streams","c":"Topology","l":"describe()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeAcls(AclBindingFilter)","u":"describeAcls(org.apache.kafka.common.acl.AclBindingFilter)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeAcls(AclBindingFilter, DescribeAclsOptions)","u":"describeAcls(org.apache.kafka.common.acl.AclBindingFilter,org.apache.kafka.clients.admin.DescribeAclsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"describeAcls(AclBindingFilter, DescribeAclsOptions)","u":"describeAcls(org.apache.kafka.common.acl.AclBindingFilter,org.apache.kafka.clients.admin.DescribeAclsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"describeAcls(AclBindingFilter, DescribeAclsOptions)","u":"describeAcls(org.apache.kafka.common.acl.AclBindingFilter,org.apache.kafka.clients.admin.DescribeAclsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeAclsOptions","l":"DescribeAclsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeClassicGroups(Collection)","u":"describeClassicGroups(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeClassicGroups(Collection, DescribeClassicGroupsOptions)","u":"describeClassicGroups(java.util.Collection,org.apache.kafka.clients.admin.DescribeClassicGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"describeClassicGroups(Collection, DescribeClassicGroupsOptions)","u":"describeClassicGroups(java.util.Collection,org.apache.kafka.clients.admin.DescribeClassicGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"describeClassicGroups(Collection, DescribeClassicGroupsOptions)","u":"describeClassicGroups(java.util.Collection,org.apache.kafka.clients.admin.DescribeClassicGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeClassicGroupsOptions","l":"DescribeClassicGroupsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeClassicGroupsResult","l":"DescribeClassicGroupsResult(Map>)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeClientQuotas(ClientQuotaFilter)","u":"describeClientQuotas(org.apache.kafka.common.quota.ClientQuotaFilter)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeClientQuotas(ClientQuotaFilter, DescribeClientQuotasOptions)","u":"describeClientQuotas(org.apache.kafka.common.quota.ClientQuotaFilter,org.apache.kafka.clients.admin.DescribeClientQuotasOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"describeClientQuotas(ClientQuotaFilter, DescribeClientQuotasOptions)","u":"describeClientQuotas(org.apache.kafka.common.quota.ClientQuotaFilter,org.apache.kafka.clients.admin.DescribeClientQuotasOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"describeClientQuotas(ClientQuotaFilter, DescribeClientQuotasOptions)","u":"describeClientQuotas(org.apache.kafka.common.quota.ClientQuotaFilter,org.apache.kafka.clients.admin.DescribeClientQuotasOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeClientQuotasOptions","l":"DescribeClientQuotasOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeClientQuotasResult","l":"DescribeClientQuotasResult(KafkaFuture>>)","u":"%3Cinit%3E(org.apache.kafka.common.KafkaFuture)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeCluster()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeCluster(DescribeClusterOptions)","u":"describeCluster(org.apache.kafka.clients.admin.DescribeClusterOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"describeCluster(DescribeClusterOptions)","u":"describeCluster(org.apache.kafka.clients.admin.DescribeClusterOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"describeCluster(DescribeClusterOptions)","u":"describeCluster(org.apache.kafka.clients.admin.DescribeClusterOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeClusterOptions","l":"DescribeClusterOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeConfigs(Collection)","u":"describeConfigs(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeConfigs(Collection, DescribeConfigsOptions)","u":"describeConfigs(java.util.Collection,org.apache.kafka.clients.admin.DescribeConfigsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"describeConfigs(Collection, DescribeConfigsOptions)","u":"describeConfigs(java.util.Collection,org.apache.kafka.clients.admin.DescribeConfigsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"describeConfigs(Collection, DescribeConfigsOptions)","u":"describeConfigs(java.util.Collection,org.apache.kafka.clients.admin.DescribeConfigsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeConfigsOptions","l":"DescribeConfigsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeConsumerGroups(Collection)","u":"describeConsumerGroups(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeConsumerGroups(Collection, DescribeConsumerGroupsOptions)","u":"describeConsumerGroups(java.util.Collection,org.apache.kafka.clients.admin.DescribeConsumerGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"describeConsumerGroups(Collection, DescribeConsumerGroupsOptions)","u":"describeConsumerGroups(java.util.Collection,org.apache.kafka.clients.admin.DescribeConsumerGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"describeConsumerGroups(Collection, DescribeConsumerGroupsOptions)","u":"describeConsumerGroups(java.util.Collection,org.apache.kafka.clients.admin.DescribeConsumerGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeConsumerGroupsOptions","l":"DescribeConsumerGroupsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeConsumerGroupsResult","l":"DescribeConsumerGroupsResult(Map>)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeDelegationToken()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeDelegationToken(DescribeDelegationTokenOptions)","u":"describeDelegationToken(org.apache.kafka.clients.admin.DescribeDelegationTokenOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"describeDelegationToken(DescribeDelegationTokenOptions)","u":"describeDelegationToken(org.apache.kafka.clients.admin.DescribeDelegationTokenOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"describeDelegationToken(DescribeDelegationTokenOptions)","u":"describeDelegationToken(org.apache.kafka.clients.admin.DescribeDelegationTokenOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeDelegationTokenOptions","l":"DescribeDelegationTokenOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeClassicGroupsResult","l":"describedGroups()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeConsumerGroupsResult","l":"describedGroups()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeShareGroupsResult","l":"describedGroups()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeStreamsGroupsResult","l":"describedGroups()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeFeatures()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeFeatures(DescribeFeaturesOptions)","u":"describeFeatures(org.apache.kafka.clients.admin.DescribeFeaturesOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"describeFeatures(DescribeFeaturesOptions)","u":"describeFeatures(org.apache.kafka.clients.admin.DescribeFeaturesOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"describeFeatures(DescribeFeaturesOptions)","u":"describeFeatures(org.apache.kafka.clients.admin.DescribeFeaturesOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeFeaturesOptions","l":"DescribeFeaturesOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeLogDirs(Collection)","u":"describeLogDirs(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeLogDirs(Collection, DescribeLogDirsOptions)","u":"describeLogDirs(java.util.Collection,org.apache.kafka.clients.admin.DescribeLogDirsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"describeLogDirs(Collection, DescribeLogDirsOptions)","u":"describeLogDirs(java.util.Collection,org.apache.kafka.clients.admin.DescribeLogDirsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"describeLogDirs(Collection, DescribeLogDirsOptions)","u":"describeLogDirs(java.util.Collection,org.apache.kafka.clients.admin.DescribeLogDirsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeLogDirsOptions","l":"DescribeLogDirsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeMetadataQuorum()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeMetadataQuorum(DescribeMetadataQuorumOptions)","u":"describeMetadataQuorum(org.apache.kafka.clients.admin.DescribeMetadataQuorumOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"describeMetadataQuorum(DescribeMetadataQuorumOptions)","u":"describeMetadataQuorum(org.apache.kafka.clients.admin.DescribeMetadataQuorumOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"describeMetadataQuorum(DescribeMetadataQuorumOptions)","u":"describeMetadataQuorum(org.apache.kafka.clients.admin.DescribeMetadataQuorumOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeMetadataQuorumOptions","l":"DescribeMetadataQuorumOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeProducers(Collection)","u":"describeProducers(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeProducers(Collection, DescribeProducersOptions)","u":"describeProducers(java.util.Collection,org.apache.kafka.clients.admin.DescribeProducersOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"describeProducers(Collection, DescribeProducersOptions)","u":"describeProducers(java.util.Collection,org.apache.kafka.clients.admin.DescribeProducersOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"describeProducers(Collection, DescribeProducersOptions)","u":"describeProducers(java.util.Collection,org.apache.kafka.clients.admin.DescribeProducersOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeProducersOptions","l":"DescribeProducersOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeReplicaLogDirs(Collection)","u":"describeReplicaLogDirs(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeReplicaLogDirs(Collection, DescribeReplicaLogDirsOptions)","u":"describeReplicaLogDirs(java.util.Collection,org.apache.kafka.clients.admin.DescribeReplicaLogDirsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"describeReplicaLogDirs(Collection, DescribeReplicaLogDirsOptions)","u":"describeReplicaLogDirs(java.util.Collection,org.apache.kafka.clients.admin.DescribeReplicaLogDirsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"describeReplicaLogDirs(Collection, DescribeReplicaLogDirsOptions)","u":"describeReplicaLogDirs(java.util.Collection,org.apache.kafka.clients.admin.DescribeReplicaLogDirsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeReplicaLogDirsOptions","l":"DescribeReplicaLogDirsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeShareGroups(Collection)","u":"describeShareGroups(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeShareGroups(Collection, DescribeShareGroupsOptions)","u":"describeShareGroups(java.util.Collection,org.apache.kafka.clients.admin.DescribeShareGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"describeShareGroups(Collection, DescribeShareGroupsOptions)","u":"describeShareGroups(java.util.Collection,org.apache.kafka.clients.admin.DescribeShareGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"describeShareGroups(Collection, DescribeShareGroupsOptions)","u":"describeShareGroups(java.util.Collection,org.apache.kafka.clients.admin.DescribeShareGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeShareGroupsOptions","l":"DescribeShareGroupsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeShareGroupsResult","l":"DescribeShareGroupsResult(Map>)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeStreamsGroups(Collection)","u":"describeStreamsGroups(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeStreamsGroups(Collection, DescribeStreamsGroupsOptions)","u":"describeStreamsGroups(java.util.Collection,org.apache.kafka.clients.admin.DescribeStreamsGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"describeStreamsGroups(Collection, DescribeStreamsGroupsOptions)","u":"describeStreamsGroups(java.util.Collection,org.apache.kafka.clients.admin.DescribeStreamsGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"describeStreamsGroups(Collection, DescribeStreamsGroupsOptions)","u":"describeStreamsGroups(java.util.Collection,org.apache.kafka.clients.admin.DescribeStreamsGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeStreamsGroupsOptions","l":"DescribeStreamsGroupsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeStreamsGroupsResult","l":"DescribeStreamsGroupsResult(Map>)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeTopics(Collection)","u":"describeTopics(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeTopics(Collection, DescribeTopicsOptions)","u":"describeTopics(java.util.Collection,org.apache.kafka.clients.admin.DescribeTopicsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeTopics(TopicCollection)","u":"describeTopics(org.apache.kafka.common.TopicCollection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeTopics(TopicCollection, DescribeTopicsOptions)","u":"describeTopics(org.apache.kafka.common.TopicCollection,org.apache.kafka.clients.admin.DescribeTopicsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"describeTopics(TopicCollection, DescribeTopicsOptions)","u":"describeTopics(org.apache.kafka.common.TopicCollection,org.apache.kafka.clients.admin.DescribeTopicsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"describeTopics(TopicCollection, DescribeTopicsOptions)","u":"describeTopics(org.apache.kafka.common.TopicCollection,org.apache.kafka.clients.admin.DescribeTopicsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeTopicsOptions","l":"DescribeTopicsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeTransactions(Collection)","u":"describeTransactions(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeTransactions(Collection, DescribeTransactionsOptions)","u":"describeTransactions(java.util.Collection,org.apache.kafka.clients.admin.DescribeTransactionsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"describeTransactions(Collection, DescribeTransactionsOptions)","u":"describeTransactions(java.util.Collection,org.apache.kafka.clients.admin.DescribeTransactionsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"describeTransactions(Collection, DescribeTransactionsOptions)","u":"describeTransactions(java.util.Collection,org.apache.kafka.clients.admin.DescribeTransactionsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeTransactionsOptions","l":"DescribeTransactionsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeUserScramCredentials()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeUserScramCredentials(List)","u":"describeUserScramCredentials(java.util.List)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"describeUserScramCredentials(List, DescribeUserScramCredentialsOptions)","u":"describeUserScramCredentials(java.util.List,org.apache.kafka.clients.admin.DescribeUserScramCredentialsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"describeUserScramCredentials(List, DescribeUserScramCredentialsOptions)","u":"describeUserScramCredentials(java.util.List,org.apache.kafka.clients.admin.DescribeUserScramCredentialsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"describeUserScramCredentials(List, DescribeUserScramCredentialsOptions)","u":"describeUserScramCredentials(java.util.List,org.apache.kafka.clients.admin.DescribeUserScramCredentialsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeUserScramCredentialsOptions","l":"DescribeUserScramCredentialsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common","c":"MetricName","l":"description()"},{"p":"org.apache.kafka.common","c":"MetricNameTemplate","l":"description()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeTransactionsResult","l":"description(String)","u":"description(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeUserScramCredentialsResult","l":"description(String)","u":"description(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeLogDirsResult","l":"descriptions()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG"},{"p":"org.apache.kafka.streams","c":"TopologyConfig.TaskConfig","l":"deserializationExceptionHandler"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"deserializationExceptionHandler()"},{"p":"org.apache.kafka.streams","c":"TopologyConfig","l":"deserializationExceptionHandlerSupplier"},{"p":"org.apache.kafka.common.security.auth","c":"KafkaPrincipalSerde","l":"deserialize(byte[])"},{"p":"org.apache.kafka.common.serialization","c":"BooleanDeserializer","l":"deserialize(String, byte[])","u":"deserialize(java.lang.String,byte[])"},{"p":"org.apache.kafka.common.serialization","c":"ByteArrayDeserializer","l":"deserialize(String, byte[])","u":"deserialize(java.lang.String,byte[])"},{"p":"org.apache.kafka.common.serialization","c":"ByteBufferDeserializer","l":"deserialize(String, byte[])","u":"deserialize(java.lang.String,byte[])"},{"p":"org.apache.kafka.common.serialization","c":"BytesDeserializer","l":"deserialize(String, byte[])","u":"deserialize(java.lang.String,byte[])"},{"p":"org.apache.kafka.common.serialization","c":"Deserializer","l":"deserialize(String, byte[])","u":"deserialize(java.lang.String,byte[])"},{"p":"org.apache.kafka.common.serialization","c":"DoubleDeserializer","l":"deserialize(String, byte[])","u":"deserialize(java.lang.String,byte[])"},{"p":"org.apache.kafka.common.serialization","c":"FloatDeserializer","l":"deserialize(String, byte[])","u":"deserialize(java.lang.String,byte[])"},{"p":"org.apache.kafka.common.serialization","c":"IntegerDeserializer","l":"deserialize(String, byte[])","u":"deserialize(java.lang.String,byte[])"},{"p":"org.apache.kafka.common.serialization","c":"ListDeserializer","l":"deserialize(String, byte[])","u":"deserialize(java.lang.String,byte[])"},{"p":"org.apache.kafka.common.serialization","c":"LongDeserializer","l":"deserialize(String, byte[])","u":"deserialize(java.lang.String,byte[])"},{"p":"org.apache.kafka.common.serialization","c":"ShortDeserializer","l":"deserialize(String, byte[])","u":"deserialize(java.lang.String,byte[])"},{"p":"org.apache.kafka.common.serialization","c":"StringDeserializer","l":"deserialize(String, byte[])","u":"deserialize(java.lang.String,byte[])"},{"p":"org.apache.kafka.common.serialization","c":"UUIDDeserializer","l":"deserialize(String, byte[])","u":"deserialize(java.lang.String,byte[])"},{"p":"org.apache.kafka.common.serialization","c":"VoidDeserializer","l":"deserialize(String, byte[])","u":"deserialize(java.lang.String,byte[])"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedDeserializer","l":"deserialize(String, byte[])","u":"deserialize(java.lang.String,byte[])"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedDeserializer","l":"deserialize(String, byte[])","u":"deserialize(java.lang.String,byte[])"},{"p":"org.apache.kafka.common.serialization","c":"Deserializer","l":"deserialize(String, Headers, byte[])","u":"deserialize(java.lang.String,org.apache.kafka.common.header.Headers,byte[])"},{"p":"org.apache.kafka.common.serialization","c":"BooleanDeserializer","l":"deserialize(String, Headers, ByteBuffer)","u":"deserialize(java.lang.String,org.apache.kafka.common.header.Headers,java.nio.ByteBuffer)"},{"p":"org.apache.kafka.common.serialization","c":"ByteBufferDeserializer","l":"deserialize(String, Headers, ByteBuffer)","u":"deserialize(java.lang.String,org.apache.kafka.common.header.Headers,java.nio.ByteBuffer)"},{"p":"org.apache.kafka.common.serialization","c":"Deserializer","l":"deserialize(String, Headers, ByteBuffer)","u":"deserialize(java.lang.String,org.apache.kafka.common.header.Headers,java.nio.ByteBuffer)"},{"p":"org.apache.kafka.common.serialization","c":"DoubleDeserializer","l":"deserialize(String, Headers, ByteBuffer)","u":"deserialize(java.lang.String,org.apache.kafka.common.header.Headers,java.nio.ByteBuffer)"},{"p":"org.apache.kafka.common.serialization","c":"FloatDeserializer","l":"deserialize(String, Headers, ByteBuffer)","u":"deserialize(java.lang.String,org.apache.kafka.common.header.Headers,java.nio.ByteBuffer)"},{"p":"org.apache.kafka.common.serialization","c":"IntegerDeserializer","l":"deserialize(String, Headers, ByteBuffer)","u":"deserialize(java.lang.String,org.apache.kafka.common.header.Headers,java.nio.ByteBuffer)"},{"p":"org.apache.kafka.common.serialization","c":"LongDeserializer","l":"deserialize(String, Headers, ByteBuffer)","u":"deserialize(java.lang.String,org.apache.kafka.common.header.Headers,java.nio.ByteBuffer)"},{"p":"org.apache.kafka.common.serialization","c":"ShortDeserializer","l":"deserialize(String, Headers, ByteBuffer)","u":"deserialize(java.lang.String,org.apache.kafka.common.header.Headers,java.nio.ByteBuffer)"},{"p":"org.apache.kafka.common.serialization","c":"StringDeserializer","l":"deserialize(String, Headers, ByteBuffer)","u":"deserialize(java.lang.String,org.apache.kafka.common.header.Headers,java.nio.ByteBuffer)"},{"p":"org.apache.kafka.common.serialization","c":"UUIDDeserializer","l":"deserialize(String, Headers, ByteBuffer)","u":"deserialize(java.lang.String,org.apache.kafka.common.header.Headers,java.nio.ByteBuffer)"},{"p":"org.apache.kafka.common.serialization","c":"VoidDeserializer","l":"deserialize(String, Headers, ByteBuffer)","u":"deserialize(java.lang.String,org.apache.kafka.common.header.Headers,java.nio.ByteBuffer)"},{"p":"org.apache.kafka.common.serialization","c":"Serde","l":"deserializer()"},{"p":"org.apache.kafka.common.serialization","c":"Serdes.WrapperSerde","l":"deserializer()"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"deserializeRecord(ConsumerRecord)","u":"deserializeRecord(org.apache.kafka.clients.consumer.ConsumerRecord)"},{"p":"org.apache.kafka.connect.mirror","c":"Heartbeat","l":"deserializeRecord(ConsumerRecord)","u":"deserializeRecord(org.apache.kafka.clients.consumer.ConsumerRecord)"},{"p":"org.apache.kafka.common.config.provider","c":"DirectoryConfigProvider","l":"DirectoryConfigProvider()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"disableTelemetry()"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"disableTelemetry()"},{"p":"org.apache.kafka.common.errors","c":"DisconnectException","l":"DisconnectException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"DisconnectException","l":"DisconnectException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"DisconnectException","l":"DisconnectException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"DisconnectException","l":"DisconnectException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ConfigKey","l":"displayName"},{"p":"org.apache.kafka.clients.consumer","c":"LogTruncationException","l":"divergentOffsets()"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"doc()"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"doc()"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"doc()"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"doc(String)","u":"doc(java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ConfigKey","l":"documentation"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry","l":"documentation()"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"documentationOf(String)","u":"documentationOf(java.lang.String)"},{"p":"org.apache.kafka.streams.query","c":"FailureReason","l":"DOES_NOT_EXIST"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigType","l":"DOUBLE"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Type","l":"DOUBLE"},{"p":"org.apache.kafka.common.serialization","c":"Serdes","l":"Double()"},{"p":"org.apache.kafka.common.serialization","c":"DoubleDeserializer","l":"DoubleDeserializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"Serdes.DoubleSerde","l":"DoubleSerde()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"DoubleSerializer","l":"DoubleSerializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"DOWNSTREAM_OFFSET_KEY"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"downstreamOffset()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"DSL_STORE_SUPPLIERS_CLASS_CONFIG"},{"p":"org.apache.kafka.streams.state","c":"DslKeyValueParams","l":"DslKeyValueParams(String, boolean)","u":"%3Cinit%3E(java.lang.String,boolean)"},{"p":"org.apache.kafka.streams.state","c":"DslSessionParams","l":"DslSessionParams(String, Duration, EmitStrategy)","u":"%3Cinit%3E(java.lang.String,java.time.Duration,org.apache.kafka.streams.kstream.EmitStrategy)"},{"p":"org.apache.kafka.streams","c":"TopologyConfig","l":"dslStoreSuppliers"},{"p":"org.apache.kafka.streams.state","c":"DslWindowParams","l":"DslWindowParams(String, Duration, Duration, boolean, EmitStrategy, boolean, boolean)","u":"%3Cinit%3E(java.lang.String,java.time.Duration,java.time.Duration,boolean,org.apache.kafka.streams.kstream.EmitStrategy,boolean,boolean)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"DUMMY_THREAD_INDEX"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"duplicate()"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"duplicate()"},{"p":"org.apache.kafka.common.errors","c":"DuplicateBrokerRegistrationException","l":"DuplicateBrokerRegistrationException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"DuplicateBrokerRegistrationException","l":"DuplicateBrokerRegistrationException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"DuplicateResourceException","l":"DuplicateResourceException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"DuplicateResourceException","l":"DuplicateResourceException(String, String)","u":"%3Cinit%3E(java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"DuplicateResourceException","l":"DuplicateResourceException(String, String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"DuplicateResourceException","l":"DuplicateResourceException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"DuplicateSequenceException","l":"DuplicateSequenceException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"DuplicateVoterException","l":"DuplicateVoterException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"DuplicateVoterException","l":"DuplicateVoterException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigSource","l":"DYNAMIC_BROKER_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigSource","l":"DYNAMIC_BROKER_LOGGER_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigSource","l":"DYNAMIC_CLIENT_METRICS_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigSource","l":"DYNAMIC_DEFAULT_BROKER_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigSource","l":"DYNAMIC_GROUP_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigSource","l":"DYNAMIC_TOPIC_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.RebalanceProtocol","l":"EAGER"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetResetStrategy","l":"EARLIEST"},{"p":"org.apache.kafka.streams","c":"Topology.AutoOffsetReset","l":"EARLIEST"},{"p":"org.apache.kafka.clients.admin","c":"OffsetSpec","l":"earliest()"},{"p":"org.apache.kafka.streams","c":"AutoOffsetReset","l":"earliest()"},{"p":"org.apache.kafka.clients.admin","c":"OffsetSpec","l":"earliestLocal()"},{"p":"org.apache.kafka.clients.admin","c":"OffsetSpec.EarliestLocalSpec","l":"EarliestLocalSpec()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"OffsetSpec.EarliestSpec","l":"EarliestSpec()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.server.authorizer","c":"AuthorizerServerInfo","l":"earlyStartListeners()"},{"p":"org.apache.kafka.common.errors","c":"ElectionNotNeededException","l":"ElectionNotNeededException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"ElectionNotNeededException","l":"ElectionNotNeededException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"electLeaders(ElectionType, Set)","u":"electLeaders(org.apache.kafka.common.ElectionType,java.util.Set)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"electLeaders(ElectionType, Set, ElectLeadersOptions)","u":"electLeaders(org.apache.kafka.common.ElectionType,java.util.Set,org.apache.kafka.clients.admin.ElectLeadersOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"electLeaders(ElectionType, Set, ElectLeadersOptions)","u":"electLeaders(org.apache.kafka.common.ElectionType,java.util.Set,org.apache.kafka.clients.admin.ElectLeadersOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"electLeaders(ElectionType, Set, ElectLeadersOptions)","u":"electLeaders(org.apache.kafka.common.ElectionType,java.util.Set,org.apache.kafka.clients.admin.ElectLeadersOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ElectLeadersOptions","l":"ElectLeadersOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"EligibleLeadersNotAvailableException","l":"EligibleLeadersNotAvailableException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"EligibleLeadersNotAvailableException","l":"EligibleLeadersNotAvailableException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common","c":"TopicPartitionInfo","l":"elr()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"embed(String, String, int, ConfigDef)","u":"embed(java.lang.String,java.lang.String,int,org.apache.kafka.common.config.ConfigDef)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig.InternalConfig","l":"EMIT_INTERVAL_MS_KSTREAMS_OUTER_JOIN_SPURIOUS_RESULTS_FIX"},{"p":"org.apache.kafka.streams","c":"StreamsConfig.InternalConfig","l":"EMIT_INTERVAL_MS_KSTREAMS_WINDOWED_AGGREGATION"},{"p":"org.apache.kafka.streams.kstream","c":"Suppressed.BufferConfig","l":"emitEarlyWhenFull()"},{"p":"org.apache.kafka.streams.state","c":"DslSessionParams","l":"emitStrategy()"},{"p":"org.apache.kafka.streams.state","c":"DslWindowParams","l":"emitStrategy()"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedKStream","l":"emitStrategy(EmitStrategy)","u":"emitStrategy(org.apache.kafka.streams.kstream.EmitStrategy)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedKStream","l":"emitStrategy(EmitStrategy)","u":"emitStrategy(org.apache.kafka.streams.kstream.EmitStrategy)"},{"p":"org.apache.kafka.clients.admin","c":"TransactionState","l":"EMPTY"},{"p":"org.apache.kafka.common","c":"ClassicGroupState","l":"EMPTY"},{"p":"org.apache.kafka.common","c":"ConsumerGroupState","l":"EMPTY"},{"p":"org.apache.kafka.common","c":"GroupState","l":"EMPTY"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecords","l":"EMPTY"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecords","l":"empty()"},{"p":"org.apache.kafka.common","c":"Cluster","l":"empty()"},{"p":"org.apache.kafka.common.security.auth","c":"SaslExtensions","l":"empty()"},{"p":"org.apache.kafka.streams.query","c":"Position","l":"emptyPosition()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"ENABLE_AUTO_COMMIT_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"ENABLE_IDEMPOTENCE_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"ENABLE_IDEMPOTENCE_DOC"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"ENABLE_METRICS_PUSH_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"ENABLE_METRICS_PUSH_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"ENABLE_METRICS_PUSH_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"ENABLE_METRICS_PUSH_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"ENABLE_METRICS_PUSH_DOC"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"ENABLE_METRICS_PUSH_DOC"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"ENABLE_METRICS_PUSH_DOC"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"ENABLE_METRICS_PUSH_DOC"},{"p":"org.apache.kafka.streams.query","c":"StateQueryRequest","l":"enableExecutionInfo()"},{"p":"org.apache.kafka.streams","c":"StoreQueryParameters","l":"enableStaleStores()"},{"p":"org.apache.kafka.connect.storage","c":"StringConverterConfig","l":"ENCODING_CONFIG"},{"p":"org.apache.kafka.connect.storage","c":"StringConverterConfig","l":"ENCODING_DEFAULT"},{"p":"org.apache.kafka.connect.storage","c":"StringConverterConfig","l":"encoding()"},{"p":"org.apache.kafka.streams.kstream","c":"Window","l":"end()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata","l":"endOffset()"},{"p":"org.apache.kafka.streams","c":"LagInfo","l":"endOffsetPosition()"},{"p":"org.apache.kafka.streams","c":"TaskMetadata","l":"endOffsets()"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"endOffsets(Collection)","u":"endOffsets(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"endOffsets(Collection)","u":"endOffsets(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"endOffsets(Collection)","u":"endOffsets(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"endOffsets(Collection, Duration)","u":"endOffsets(java.util.Collection,java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"endOffsets(Collection, Duration)","u":"endOffsets(java.util.Collection,java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"endOffsets(Collection, Duration)","u":"endOffsets(java.util.Collection,java.time.Duration)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription.Endpoint","l":"Endpoint(String, int)","u":"%3Cinit%3E(java.lang.String,int)"},{"p":"org.apache.kafka.common","c":"Endpoint","l":"Endpoint(String, SecurityProtocol, String, int)","u":"%3Cinit%3E(java.lang.String,org.apache.kafka.common.security.auth.SecurityProtocol,java.lang.String,int)"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo.Node","l":"endpoints()"},{"p":"org.apache.kafka.server.authorizer","c":"AuthorizerServerInfo","l":"endpoints()"},{"p":"org.apache.kafka.streams.kstream","c":"Window","l":"endTime()"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"enforceRebalance()"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"enforceRebalance()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"enforceRebalance()"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"enforceRebalance(String)","u":"enforceRebalance(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"enforceRebalance(String)","u":"enforceRebalance(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"enforceRebalance(String)","u":"enforceRebalance(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"ENSURE_EXPLICIT_INTERNAL_RESOURCE_NAMING_CONFIG"},{"p":"org.apache.kafka.streams","c":"TopologyConfig","l":"ensureExplicitInternalResourceNaming"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.CaseInsensitiveValidString","l":"ensureValid(String, Object)","u":"ensureValid(java.lang.String,java.lang.Object)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.CompositeValidator","l":"ensureValid(String, Object)","u":"ensureValid(java.lang.String,java.lang.Object)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.LambdaValidator","l":"ensureValid(String, Object)","u":"ensureValid(java.lang.String,java.lang.Object)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ListSize","l":"ensureValid(String, Object)","u":"ensureValid(java.lang.String,java.lang.Object)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.NonEmptyString","l":"ensureValid(String, Object)","u":"ensureValid(java.lang.String,java.lang.Object)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.NonEmptyStringWithoutControlChars","l":"ensureValid(String, Object)","u":"ensureValid(java.lang.String,java.lang.Object)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.NonNullValidator","l":"ensureValid(String, Object)","u":"ensureValid(java.lang.String,java.lang.Object)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Range","l":"ensureValid(String, Object)","u":"ensureValid(java.lang.String,java.lang.Object)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Validator","l":"ensureValid(String, Object)","u":"ensureValid(java.lang.String,java.lang.Object)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ValidList","l":"ensureValid(String, Object)","u":"ensureValid(java.lang.String,java.lang.Object)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ValidString","l":"ensureValid(String, Object)","u":"ensureValid(java.lang.String,java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeClientQuotasResult","l":"entities()"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaAlteration","l":"entity()"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaFilterComponent","l":"entityType()"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaEntity.ConfigEntity","l":"entityType()"},{"p":"org.apache.kafka.clients.admin","c":"Config","l":"entries()"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaEntity","l":"entries()"},{"p":"org.apache.kafka.common.acl","c":"AclBinding","l":"entry()"},{"p":"org.apache.kafka.common.acl","c":"AclBindingFilter","l":"entryFilter()"},{"p":"org.apache.kafka.common.config.provider","c":"EnvVarConfigProvider","l":"EnvVarConfigProvider()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.config.provider","c":"EnvVarConfigProvider","l":"EnvVarConfigProvider(Map)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.streams","c":"TopologyConfig","l":"eosEnabled"},{"p":"org.apache.kafka.streams","c":"TopologyConfig.TaskConfig","l":"eosEnabled"},{"p":"org.apache.kafka.clients.producer","c":"PreparedTxnState","l":"epoch()"},{"p":"org.apache.kafka.clients.admin","c":"FenceProducersResult","l":"epochId(String)","u":"epochId(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"AbortTransactionSpec","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"AlterConfigOp","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"ClassicGroupDescription","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"ClientMetricsResourceListing","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"Config","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigSynonym","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupDescription","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupListing","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeProducersOptions","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"FeatureMetadata","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"FeatureUpdate","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"FinalizedVersionRange","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"GroupListing","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupOffsetsSpec","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"ListShareGroupOffsetsSpec","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"ListTopicsOptions","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"ListTransactionsOptions","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"MemberAssignment","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"MemberDescription","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"MemberToRemove","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"NewTopic","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"ProducerState","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo.Node","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo.ReplicaState","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"RaftVoterEndpoint","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"RecordsToDelete","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"ScramCredentialInfo","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"ShareGroupDescription","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"ShareMemberAssignment","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"ShareMemberDescription","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupDescription","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberAssignment","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberAssignment.TaskIds","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription.Endpoint","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription.TaskOffset","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupSubtopologyDescription","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupSubtopologyDescription.TopicInfo","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"SupportedVersionRange","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"TopicDescription","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"TransactionDescription","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"TransactionListing","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"UserScramCredentialsDescription","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerGroupMetadata","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetAndMetadata","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetAndTimestamp","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.consumer","c":"SubscriptionPattern","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.producer","c":"PreparedTxnState","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.clients.producer","c":"ProducerRecord","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntry","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntryFilter","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common.acl","c":"AclBinding","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common.acl","c":"AclBindingFilter","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common","c":"Cluster","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common","c":"ClusterResource","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common.config","c":"ConfigResource","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common.config","c":"ConfigValue","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common","c":"Endpoint","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common","c":"MetricName","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common","c":"MetricNameTemplate","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common.metrics","c":"Quota","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common","c":"Node","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common","c":"PartitionInfo","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaAlteration.Op","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaEntity","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaFilter","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaFilterComponent","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common.resource","c":"Resource","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common.resource","c":"ResourcePattern","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common.resource","c":"ResourcePatternFilter","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common.security.auth","c":"KafkaPrincipal","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common.security.auth","c":"SaslExtensions","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common.security.token.delegation","c":"DelegationToken","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common.security.token.delegation","c":"TokenInformation","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common","c":"TopicIdPartition","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common","c":"TopicPartition","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common","c":"TopicPartitionInfo","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common","c":"TopicPartitionReplica","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.common","c":"Uuid","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.connect.connector","c":"ConnectRecord","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.connect.data","c":"Field","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.connect.data","c":"SchemaAndValue","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.connect.health","c":"AbstractState","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.connect.health","c":"ConnectorHealth","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.connect.health","c":"TaskState","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.connect.mirror","c":"SourceAndTarget","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.connect.sink","c":"SinkRecord","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.connect.source","c":"SourceRecord","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"GroupAssignment","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.server.authorizer","c":"Action","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"LogSegmentData","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentId","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata.CustomMetadata","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadataUpdate","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemotePartitionDeleteMetadata","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.server.policy","c":"AlterConfigPolicy.RequestMetadata","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.server.policy","c":"CreateTopicPolicy.RequestMetadata","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams","c":"AutoOffsetReset","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams","c":"KeyQueryMetadata","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams","c":"KeyValue","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.kstream","c":"Consumed","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.kstream","c":"JoinWindows","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.kstream","c":"Produced","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindows","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.kstream","c":"SlidingWindows","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindows","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.kstream","c":"UnlimitedWindows","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.kstream","c":"Window","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.kstream","c":"Windowed","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams","c":"LagInfo","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.processor.api","c":"FixedKeyRecord","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext.CapturedForward","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.processor.api","c":"Record","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsAssignment.AssignedTask","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"ProcessId","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.processor","c":"TaskId","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.processor","c":"To","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.query","c":"Position","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.query","c":"PositionBound","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.state","c":"DslKeyValueParams","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.state","c":"DslSessionParams","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.state","c":"DslWindowParams","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.state","c":"HostInfo","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.state","c":"ValueAndTimestamp","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.state","c":"VersionedRecord","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams","c":"StoreQueryParameters","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams","c":"StreamsMetadata","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams","c":"TaskMetadata","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams.test","c":"TestRecord","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.streams","c":"ThreadMetadata","l":"equals(Object)","u":"equals(java.lang.Object)"},{"p":"org.apache.kafka.connect.sink","c":"SinkTaskContext","l":"errantRecordReporter()"},{"p":"org.apache.kafka.streams","c":"KafkaStreams.State","l":"ERROR"},{"p":"org.apache.kafka.common.config","c":"LogLevelConfig","l":"ERROR_LOG_LEVEL"},{"p":"org.apache.kafka.clients.admin","c":"LogDirDescription","l":"error()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerExtensionsValidatorCallback","l":"error(String, String)","u":"error(java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerTokenCallback","l":"error(String, String, String)","u":"error(java.lang.String,java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerValidatorCallback","l":"error(String, String, String)","u":"error(java.lang.String,java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerTokenCallback","l":"errorCode()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerTokenCallback","l":"errorDescription()"},{"p":"org.apache.kafka.common.config","c":"ConfigValue","l":"errorMessages()"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"errorNext(RuntimeException)","u":"errorNext(java.lang.RuntimeException)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerValidatorCallback","l":"errorOpenIDConfiguration()"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupsResult","l":"errors()"},{"p":"org.apache.kafka.clients.admin","c":"ListGroupsResult","l":"errors()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerValidatorCallback","l":"errorScope()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerValidatorCallback","l":"errorStatus()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerTokenCallback","l":"errorUri()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogMetadata","l":"eventTimestampMs()"},{"p":"org.apache.kafka.common.metrics","c":"MetricConfig","l":"eventWindow()"},{"p":"org.apache.kafka.common.metrics","c":"MetricConfig","l":"eventWindow(long)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"EXACTLY_ONCE_V2"},{"p":"org.apache.kafka.connect.source","c":"SourceConnector","l":"exactlyOnceSupport(Map)","u":"exactlyOnceSupport(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"DeleteAclsResult.FilterResult","l":"exception()"},{"p":"org.apache.kafka.server.authorizer","c":"AclCreateResult","l":"exception()"},{"p":"org.apache.kafka.server.authorizer","c":"AclDeleteResult.AclBindingDeleteResult","l":"exception()"},{"p":"org.apache.kafka.server.authorizer","c":"AclDeleteResult","l":"exception()"},{"p":"org.apache.kafka.common.metrics","c":"JmxReporter","l":"EXCLUDE_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"EXCLUDE_INTERNAL_TOPICS_CONFIG"},{"p":"org.apache.kafka.streams.query","c":"StateQueryRequest","l":"executionInfoEnabled()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"ClientJwtValidator","l":"EXPIRATION_CLAIM_NAME"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"expireDelegationToken(byte[])"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"expireDelegationToken(byte[], ExpireDelegationTokenOptions)","u":"expireDelegationToken(byte[],org.apache.kafka.clients.admin.ExpireDelegationTokenOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"expireDelegationToken(byte[], ExpireDelegationTokenOptions)","u":"expireDelegationToken(byte[],org.apache.kafka.clients.admin.ExpireDelegationTokenOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"expireDelegationToken(byte[], ExpireDelegationTokenOptions)","u":"expireDelegationToken(byte[],org.apache.kafka.clients.admin.ExpireDelegationTokenOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ExpireDelegationTokenOptions","l":"ExpireDelegationTokenOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"ExpireDelegationTokenOptions","l":"expiryTimePeriodMs()"},{"p":"org.apache.kafka.clients.admin","c":"ExpireDelegationTokenOptions","l":"expiryTimePeriodMs(long)"},{"p":"org.apache.kafka.clients.admin","c":"ExpireDelegationTokenResult","l":"expiryTimestamp()"},{"p":"org.apache.kafka.clients.admin","c":"RenewDelegationTokenResult","l":"expiryTimestamp()"},{"p":"org.apache.kafka.common.security.token.delegation","c":"TokenInformation","l":"expiryTimestamp()"},{"p":"org.apache.kafka.server.telemetry","c":"ClientTelemetryReceiver","l":"exportMetrics(AuthorizableRequestContext, ClientTelemetryPayload)","u":"exportMetrics(org.apache.kafka.server.authorizer.AuthorizableRequestContext,org.apache.kafka.server.telemetry.ClientTelemetryPayload)"},{"p":"org.apache.kafka.common.security.auth","c":"SaslExtensionsCallback","l":"extensions()"},{"p":"org.apache.kafka.common.security.scram","c":"ScramExtensionsCallback","l":"extensions()"},{"p":"org.apache.kafka.common.security.scram","c":"ScramExtensionsCallback","l":"extensions(Map)","u":"extensions(java.util.Map)"},{"p":"org.apache.kafka.common.security.auth","c":"SaslExtensionsCallback","l":"extensions(SaslExtensions)","u":"extensions(org.apache.kafka.common.security.auth.SaslExtensions)"},{"p":"org.apache.kafka.streams.processor","c":"LogAndSkipOnInvalidTimestamp","l":"extract(ConsumerRecord, long)","u":"extract(org.apache.kafka.clients.consumer.ConsumerRecord,long)"},{"p":"org.apache.kafka.streams.processor","c":"TimestampExtractor","l":"extract(ConsumerRecord, long)","u":"extract(org.apache.kafka.clients.consumer.ConsumerRecord,long)"},{"p":"org.apache.kafka.streams.processor","c":"WallclockTimestampExtractor","l":"extract(ConsumerRecord, long)","u":"extract(org.apache.kafka.clients.consumer.ConsumerRecord,long)"},{"p":"org.apache.kafka.streams.processor","c":"TopicNameExtractor","l":"extract(K, V, RecordContext)","u":"extract(K,V,org.apache.kafka.streams.processor.RecordContext)"},{"p":"org.apache.kafka.streams.errors","c":"DeserializationExceptionHandler.DeserializationHandlerResponse","l":"FAIL"},{"p":"org.apache.kafka.streams.errors","c":"ProcessingExceptionHandler.ProcessingHandlerResponse","l":"FAIL"},{"p":"org.apache.kafka.streams.errors","c":"ProductionExceptionHandler.ProductionExceptionHandlerResponse","l":"FAIL"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"FAILED_BUILD_REMOTE_LOG_AUX_STATE_PER_SEC_METRIC"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"FAILED_REMOTE_COPY_PER_SEC_METRIC"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"FAILED_REMOTE_DELETE_PER_SEC_METRIC"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"FAILED_REMOTE_FETCH_PER_SEC_METRIC"},{"p":"org.apache.kafka.streams.processor","c":"FailOnInvalidTimestamp","l":"FailOnInvalidTimestamp()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.config","c":"LogLevelConfig","l":"FATAL_LOG_LEVEL"},{"p":"org.apache.kafka.clients.admin","c":"DescribeFeaturesResult","l":"featureMetadata()"},{"p":"org.apache.kafka.clients.admin","c":"FeatureUpdate","l":"FeatureUpdate(short, FeatureUpdate.UpgradeType)","u":"%3Cinit%3E(short,org.apache.kafka.clients.admin.FeatureUpdate.UpgradeType)"},{"p":"org.apache.kafka.common.errors","c":"FeatureUpdateFailedException","l":"FeatureUpdateFailedException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"FeatureUpdateFailedException","l":"FeatureUpdateFailedException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"FencedInstanceIdException","l":"FencedInstanceIdException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"FencedInstanceIdException","l":"FencedInstanceIdException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"FencedLeaderEpochException","l":"FencedLeaderEpochException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"FencedLeaderEpochException","l":"FencedLeaderEpochException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"FencedMemberEpochException","l":"FencedMemberEpochException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"FenceProducersResult","l":"fencedProducers()"},{"p":"org.apache.kafka.common.errors","c":"FencedStateEpochException","l":"FencedStateEpochException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"fenceProducer()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"fenceProducers(Collection)","u":"fenceProducers(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"fenceProducers(Collection, FenceProducersOptions)","u":"fenceProducers(java.util.Collection,org.apache.kafka.clients.admin.FenceProducersOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"fenceProducers(Collection, FenceProducersOptions)","u":"fenceProducers(java.util.Collection,org.apache.kafka.clients.admin.FenceProducersOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"fenceProducers(Collection, FenceProducersOptions)","u":"fenceProducers(java.util.Collection,org.apache.kafka.clients.admin.FenceProducersOptions)"},{"p":"org.apache.kafka.clients.admin","c":"FenceProducersOptions","l":"FenceProducersOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaType","l":"FETCH"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"FETCH_MAX_BYTES_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"FETCH_MAX_WAIT_MS_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"FETCH_MIN_BYTES_CONFIG"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlySessionStore","l":"fetch(K)"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlyWindowStore","l":"fetch(K, Instant, Instant)","u":"fetch(K,java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"WindowStore","l":"fetch(K, Instant, Instant)","u":"fetch(K,java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlySessionStore","l":"fetch(K, K)","u":"fetch(K,K)"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlyWindowStore","l":"fetch(K, K, Instant, Instant)","u":"fetch(K,K,java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"WindowStore","l":"fetch(K, K, Instant, Instant)","u":"fetch(K,K,java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"WindowStore","l":"fetch(K, K, long, long)","u":"fetch(K,K,long,long)"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlyWindowStore","l":"fetch(K, long)","u":"fetch(K,long)"},{"p":"org.apache.kafka.streams.state","c":"WindowStore","l":"fetch(K, long, long)","u":"fetch(K,long,long)"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlyWindowStore","l":"fetchAll(Instant, Instant)","u":"fetchAll(java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"WindowStore","l":"fetchAll(Instant, Instant)","u":"fetchAll(java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"WindowStore","l":"fetchAll(long, long)","u":"fetchAll(long,long)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageManager","l":"fetchIndex(RemoteLogSegmentMetadata, RemoteStorageManager.IndexType)","u":"fetchIndex(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata,org.apache.kafka.server.log.remote.storage.RemoteStorageManager.IndexType)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageManager","l":"fetchLogSegment(RemoteLogSegmentMetadata, int)","u":"fetchLogSegment(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata,int)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageManager","l":"fetchLogSegment(RemoteLogSegmentMetadata, int, int)","u":"fetchLogSegment(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata,int,int)"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlySessionStore","l":"fetchSession(K, Instant, Instant)","u":"fetchSession(K,java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"SessionStore","l":"fetchSession(K, Instant, Instant)","u":"fetchSession(K,java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlySessionStore","l":"fetchSession(K, long, long)","u":"fetchSession(K,long,long)"},{"p":"org.apache.kafka.common.errors","c":"FetchSessionIdNotFoundException","l":"FetchSessionIdNotFoundException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"FetchSessionIdNotFoundException","l":"FetchSessionIdNotFoundException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"FetchSessionTopicIdException","l":"FetchSessionTopicIdException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"field(String)","u":"field(java.lang.String)"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"field(String)","u":"field(java.lang.String)"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"field(String)","u":"field(java.lang.String)"},{"p":"org.apache.kafka.connect.data","c":"Field","l":"Field(String, int, Schema)","u":"%3Cinit%3E(java.lang.String,int,org.apache.kafka.connect.data.Schema)"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"field(String, Schema)","u":"field(java.lang.String,org.apache.kafka.connect.data.Schema)"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"fields()"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"fields()"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"fields()"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"FILE_DELETE_DELAY_MS_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"FILE_DELETE_DELAY_MS_DOC"},{"p":"org.apache.kafka.common.config.provider","c":"FileConfigProvider","l":"FileConfigProvider()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"FileJwtRetriever","l":"FileJwtRetriever()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"ApiException","l":"fillInStackTrace()"},{"p":"org.apache.kafka.common.metrics","c":"QuotaViolationException","l":"fillInStackTrace()"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"filter(Predicate)","u":"filter(org.apache.kafka.streams.kstream.Predicate)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"filter(Predicate)","u":"filter(org.apache.kafka.streams.kstream.Predicate)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"filter(Predicate, Materialized>)","u":"filter(org.apache.kafka.streams.kstream.Predicate,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"filter(Predicate, Named)","u":"filter(org.apache.kafka.streams.kstream.Predicate,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"filter(Predicate, Named)","u":"filter(org.apache.kafka.streams.kstream.Predicate,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"filter(Predicate, Named, Materialized>)","u":"filter(org.apache.kafka.streams.kstream.Predicate,org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.clients.admin","c":"ListTransactionsOptions","l":"filteredDuration()"},{"p":"org.apache.kafka.clients.admin","c":"ListTransactionsOptions","l":"filteredProducerIds()"},{"p":"org.apache.kafka.clients.admin","c":"ListTransactionsOptions","l":"filteredStates()"},{"p":"org.apache.kafka.clients.admin","c":"ListTransactionsOptions","l":"filteredTransactionalIdPattern()"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"filterNot(Predicate)","u":"filterNot(org.apache.kafka.streams.kstream.Predicate)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"filterNot(Predicate)","u":"filterNot(org.apache.kafka.streams.kstream.Predicate)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"filterNot(Predicate, Materialized>)","u":"filterNot(org.apache.kafka.streams.kstream.Predicate,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"filterNot(Predicate, Named)","u":"filterNot(org.apache.kafka.streams.kstream.Predicate,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"filterNot(Predicate, Named)","u":"filterNot(org.apache.kafka.streams.kstream.Predicate,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"filterNot(Predicate, Named, Materialized>)","u":"filterNot(org.apache.kafka.streams.kstream.Predicate,org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.clients.admin","c":"ListTransactionsOptions","l":"filterOnDuration(long)"},{"p":"org.apache.kafka.clients.admin","c":"ListTransactionsOptions","l":"filterOnTransactionalIdPattern(String)","u":"filterOnTransactionalIdPattern(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"ListTransactionsOptions","l":"filterProducerIds(Collection)","u":"filterProducerIds(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"ListTransactionsOptions","l":"filterStates(Collection)","u":"filterStates(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"FeatureMetadata","l":"finalizedFeatures()"},{"p":"org.apache.kafka.clients.admin","c":"FeatureMetadata","l":"finalizedFeaturesEpoch()"},{"p":"org.apache.kafka.clients.admin","c":"FinalizedVersionRange","l":"FinalizedVersionRange(short, short)","u":"%3Cinit%3E(short,short)"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntryFilter","l":"findIndefiniteField()"},{"p":"org.apache.kafka.common.acl","c":"AclBindingFilter","l":"findIndefiniteField()"},{"p":"org.apache.kafka.common.resource","c":"ResourcePatternFilter","l":"findIndefiniteField()"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlySessionStore","l":"findSessions(K, Instant, Instant)","u":"findSessions(K,java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"SessionStore","l":"findSessions(K, Instant, Instant)","u":"findSessions(K,java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlySessionStore","l":"findSessions(K, K, Instant, Instant)","u":"findSessions(K,K,java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"SessionStore","l":"findSessions(K, K, Instant, Instant)","u":"findSessions(K,K,java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlySessionStore","l":"findSessions(K, K, long, long)","u":"findSessions(K,K,long,long)"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlySessionStore","l":"findSessions(K, long, long)","u":"findSessions(K,long,long)"},{"p":"org.apache.kafka.streams.state","c":"SessionStore","l":"findSessions(long, long)","u":"findSessions(long,long)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"flatMap(KeyValueMapper>>)","u":"flatMap(org.apache.kafka.streams.kstream.KeyValueMapper)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"flatMap(KeyValueMapper>>, Named)","u":"flatMap(org.apache.kafka.streams.kstream.KeyValueMapper,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"flatMapValues(ValueMapper>)","u":"flatMapValues(org.apache.kafka.streams.kstream.ValueMapper)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"flatMapValues(ValueMapper>, Named)","u":"flatMapValues(org.apache.kafka.streams.kstream.ValueMapper,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"flatMapValues(ValueMapperWithKey>)","u":"flatMapValues(org.apache.kafka.streams.kstream.ValueMapperWithKey)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"flatMapValues(ValueMapperWithKey>, Named)","u":"flatMapValues(org.apache.kafka.streams.kstream.ValueMapperWithKey,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.common.serialization","c":"Serdes","l":"Float()"},{"p":"org.apache.kafka.connect.data","c":"Schema.Type","l":"FLOAT32"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"FLOAT32_SCHEMA"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"float32()"},{"p":"org.apache.kafka.connect.data","c":"Schema.Type","l":"FLOAT64"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"FLOAT64_SCHEMA"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"float64()"},{"p":"org.apache.kafka.common.serialization","c":"FloatDeserializer","l":"FloatDeserializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"Serdes.FloatSerde","l":"FloatSerde()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"FloatSerializer","l":"FloatSerializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"FLUSH_MESSAGES_INTERVAL_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"FLUSH_MESSAGES_INTERVAL_DOC"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"FLUSH_MS_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"FLUSH_MS_DOC"},{"p":"org.apache.kafka.clients.producer","c":"KafkaProducer","l":"flush()"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"flush()"},{"p":"org.apache.kafka.clients.producer","c":"Producer","l":"flush()"},{"p":"org.apache.kafka.streams.processor","c":"StateStore","l":"flush()"},{"p":"org.apache.kafka.connect.sink","c":"SinkTask","l":"flush(Map)","u":"flush(java.util.Map)"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSinkTask","l":"flush(Map)","u":"flush(java.util.Map)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"flushed()"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"flushException"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsAssignment","l":"followupRebalanceDeadline()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Frequencies","l":"forBooleanValues(MetricName, MetricName)","u":"forBooleanValues(org.apache.kafka.common.MetricName,org.apache.kafka.common.MetricName)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"forceTerminateTransaction(String)","u":"forceTerminateTransaction(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"forceTerminateTransaction(String, TerminateTransactionOptions)","u":"forceTerminateTransaction(java.lang.String,org.apache.kafka.clients.admin.TerminateTransactionOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"forceTerminateTransaction(String, TerminateTransactionOptions)","u":"forceTerminateTransaction(java.lang.String,org.apache.kafka.clients.admin.TerminateTransactionOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"forceTerminateTransaction(String, TerminateTransactionOptions)","u":"forceTerminateTransaction(java.lang.String,org.apache.kafka.clients.admin.TerminateTransactionOptions)"},{"p":"org.apache.kafka.streams.kstream","c":"WindowedSerdes.TimeWindowedSerde","l":"forChangelog(boolean)"},{"p":"org.apache.kafka.common.config","c":"SslClientAuth","l":"forConfig(String)","u":"forConfig(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"ListGroupsOptions","l":"forConsumerGroups()"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"foreach(ForeachAction)","u":"foreach(org.apache.kafka.streams.kstream.ForeachAction)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"foreach(ForeachAction, Named)","u":"foreach(org.apache.kafka.streams.kstream.ForeachAction,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"ForeachProcessor","l":"ForeachProcessor(ForeachAction)","u":"%3Cinit%3E(org.apache.kafka.streams.kstream.ForeachAction)"},{"p":"org.apache.kafka.streams.query","c":"QueryResult","l":"forFailure(FailureReason, String)","u":"forFailure(org.apache.kafka.streams.query.FailureReason,java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"GroupAuthorizationException","l":"forGroupId(String)","u":"forGroupId(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"AlterConfigOp.OpType","l":"forId(byte)"},{"p":"org.apache.kafka.clients.consumer","c":"AcknowledgeType","l":"forId(byte)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.RebalanceProtocol","l":"forId(byte)"},{"p":"org.apache.kafka.common.config","c":"ConfigResource.Type","l":"forId(byte)"},{"p":"org.apache.kafka.common","c":"IsolationLevel","l":"forId(byte)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentState","l":"forId(byte)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemotePartitionDeleteState","l":"forId(byte)"},{"p":"org.apache.kafka.common.metrics","c":"Sensor.RecordingLevel","l":"forId(int)"},{"p":"org.apache.kafka.common.security.auth","c":"SecurityProtocol","l":"forId(short)"},{"p":"org.apache.kafka.connect.mirror","c":"DefaultReplicationPolicy","l":"formatRemoteTopic(String, String)","u":"formatRemoteTopic(java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.connect.mirror","c":"IdentityReplicationPolicy","l":"formatRemoteTopic(String, String)","u":"formatRemoteTopic(java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.connect.mirror","c":"ReplicationPolicy","l":"formatRemoteTopic(String, String)","u":"formatRemoteTopic(java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.common.metrics","c":"Sensor.RecordingLevel","l":"forName(String)","u":"forName(java.lang.String)"},{"p":"org.apache.kafka.common.security.auth","c":"SecurityProtocol","l":"forName(String)","u":"forName(java.lang.String)"},{"p":"org.apache.kafka.streams.query","c":"QueryResult","l":"forResult(R)"},{"p":"org.apache.kafka.clients.admin","c":"ListGroupsOptions","l":"forShareGroups()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignmentUtils.RackAwareOptimizationParams","l":"forStatefulTasks()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignmentUtils.RackAwareOptimizationParams","l":"forStatelessTasks()"},{"p":"org.apache.kafka.clients.admin","c":"ListGroupsOptions","l":"forStreamsGroups()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignmentUtils.RackAwareOptimizationParams","l":"forTasks(SortedSet)","u":"forTasks(java.util.SortedSet)"},{"p":"org.apache.kafka.clients.admin","c":"OffsetSpec","l":"forTimestamp(long)"},{"p":"org.apache.kafka.streams.kstream","c":"EmitStrategy.StrategyType","l":"forType(EmitStrategy.StrategyType)","u":"forType(org.apache.kafka.streams.kstream.EmitStrategy.StrategyType)"},{"p":"org.apache.kafka.streams.query","c":"QueryResult","l":"forUnknownQueryType(Query, StateStore)","u":"forUnknownQueryType(org.apache.kafka.streams.query.Query,org.apache.kafka.streams.processor.StateStore)"},{"p":"org.apache.kafka.streams.processor.api","c":"FixedKeyProcessorContext","l":"forward(FixedKeyRecord)","u":"forward(org.apache.kafka.streams.processor.api.FixedKeyRecord)"},{"p":"org.apache.kafka.streams.processor.api","c":"FixedKeyProcessorContext","l":"forward(FixedKeyRecord, String)","u":"forward(org.apache.kafka.streams.processor.api.FixedKeyRecord,java.lang.String)"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"forward(K, V)","u":"forward(K,V)"},{"p":"org.apache.kafka.streams.processor","c":"ProcessorContext","l":"forward(K, V)","u":"forward(K,V)"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"forward(K, V, To)","u":"forward(K,V,org.apache.kafka.streams.processor.To)"},{"p":"org.apache.kafka.streams.processor","c":"ProcessorContext","l":"forward(K, V, To)","u":"forward(K,V,org.apache.kafka.streams.processor.To)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"forward(Record)","u":"forward(org.apache.kafka.streams.processor.api.Record)"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessorContext","l":"forward(Record)","u":"forward(org.apache.kafka.streams.processor.api.Record)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"forward(Record, String)","u":"forward(org.apache.kafka.streams.processor.api.Record,java.lang.String)"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessorContext","l":"forward(Record, String)","u":"forward(org.apache.kafka.streams.processor.api.Record,java.lang.String)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"forwarded()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"forwarded()"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"forwarded(String)","u":"forwarded(java.lang.String)"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"forwarded(String)","u":"forwarded(java.lang.String)"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClientConfig","l":"FORWARDING_ADMIN_CLASS"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClientConfig","l":"FORWARDING_ADMIN_CLASS_DEFAULT"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClientConfig","l":"FORWARDING_ADMIN_CLASS_DOC"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"ForwardingAdmin(Map)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Frequencies","l":"Frequencies(int, double, double, Frequency...)","u":"%3Cinit%3E(int,double,double,org.apache.kafka.common.metrics.stats.Frequency...)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Frequencies","l":"frequency(MetricConfig, long, double)","u":"frequency(org.apache.kafka.common.metrics.MetricConfig,long,double)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Frequency","l":"Frequency(MetricName, double)","u":"%3Cinit%3E(org.apache.kafka.common.MetricName,double)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Histogram.BinScheme","l":"fromBin(int)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Histogram.ConstantBinScheme","l":"fromBin(int)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Histogram.LinearBinScheme","l":"fromBin(int)"},{"p":"org.apache.kafka.tools.api","c":"Decoder","l":"fromBytes(byte[])"},{"p":"org.apache.kafka.tools.api","c":"DefaultDecoder","l":"fromBytes(byte[])"},{"p":"org.apache.kafka.tools.api","c":"IntegerDecoder","l":"fromBytes(byte[])"},{"p":"org.apache.kafka.tools.api","c":"LongDecoder","l":"fromBytes(byte[])"},{"p":"org.apache.kafka.tools.api","c":"StringDecoder","l":"fromBytes(byte[])"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"fromCode(byte)"},{"p":"org.apache.kafka.common.acl","c":"AclPermissionType","l":"fromCode(byte)"},{"p":"org.apache.kafka.common.resource","c":"PatternType","l":"fromCode(byte)"},{"p":"org.apache.kafka.common.resource","c":"ResourceType","l":"fromCode(byte)"},{"p":"org.apache.kafka.clients.admin","c":"FeatureUpdate.UpgradeType","l":"fromCode(int)"},{"p":"org.apache.kafka.connect.storage","c":"Converter","l":"fromConnectData(String, Headers, Schema, Object)","u":"fromConnectData(java.lang.String,org.apache.kafka.common.header.Headers,org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.storage","c":"Converter","l":"fromConnectData(String, Schema, Object)","u":"fromConnectData(java.lang.String,org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.storage","c":"StringConverter","l":"fromConnectData(String, Schema, Object)","u":"fromConnectData(java.lang.String,org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.storage","c":"HeaderConverter","l":"fromConnectHeader(String, String, Schema, Object)","u":"fromConnectHeader(java.lang.String,java.lang.String,org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.storage","c":"SimpleHeaderConverter","l":"fromConnectHeader(String, String, Schema, Object)","u":"fromConnectHeader(java.lang.String,java.lang.String,org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.storage","c":"StringConverter","l":"fromConnectHeader(String, String, Schema, Object)","u":"fromConnectHeader(java.lang.String,java.lang.String,org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.clients.admin","c":"EndpointType","l":"fromId(byte)"},{"p":"org.apache.kafka.connect.data","c":"Decimal","l":"fromLogical(Schema, BigDecimal)","u":"fromLogical(org.apache.kafka.connect.data.Schema,java.math.BigDecimal)"},{"p":"org.apache.kafka.connect.data","c":"Date","l":"fromLogical(Schema, Date)","u":"fromLogical(org.apache.kafka.connect.data.Schema,java.util.Date)"},{"p":"org.apache.kafka.connect.data","c":"Time","l":"fromLogical(Schema, Date)","u":"fromLogical(org.apache.kafka.connect.data.Schema,java.util.Date)"},{"p":"org.apache.kafka.connect.data","c":"Timestamp","l":"fromLogical(Schema, Date)","u":"fromLogical(org.apache.kafka.connect.data.Schema,java.util.Date)"},{"p":"org.apache.kafka.streams.query","c":"Position","l":"fromMap(Map>)","u":"fromMap(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"ScramMechanism","l":"fromMechanismName(String)","u":"fromMechanismName(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"StoreQueryParameters","l":"fromNameAndType(String, QueryableStoreType)","u":"fromNameAndType(java.lang.String,org.apache.kafka.streams.state.QueryableStoreType)"},{"p":"org.apache.kafka.connect.source","c":"SourceTask.TransactionBoundary","l":"fromProperty(String)","u":"fromProperty(java.lang.String)"},{"p":"org.apache.kafka.common.security.token.delegation","c":"TokenInformation","l":"fromRecord(String, KafkaPrincipal, KafkaPrincipal, Collection, long, long, long)","u":"fromRecord(java.lang.String,org.apache.kafka.common.security.auth.KafkaPrincipal,org.apache.kafka.common.security.auth.KafkaPrincipal,java.util.Collection,long,long,long)"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"fromString(String)","u":"fromString(java.lang.String)"},{"p":"org.apache.kafka.common.acl","c":"AclPermissionType","l":"fromString(String)","u":"fromString(java.lang.String)"},{"p":"org.apache.kafka.common.resource","c":"PatternType","l":"fromString(String)","u":"fromString(java.lang.String)"},{"p":"org.apache.kafka.common.resource","c":"ResourceType","l":"fromString(String)","u":"fromString(java.lang.String)"},{"p":"org.apache.kafka.common","c":"Uuid","l":"fromString(String)","u":"fromString(java.lang.String)"},{"p":"org.apache.kafka.streams.query","c":"MultiVersionedKeyQuery","l":"fromTime()"},{"p":"org.apache.kafka.streams.query","c":"MultiVersionedKeyQuery","l":"fromTime(Instant)","u":"fromTime(java.time.Instant)"},{"p":"org.apache.kafka.clients.admin","c":"ScramMechanism","l":"fromType(byte)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentId","l":"generateNew(TopicIdPartition)","u":"generateNew(org.apache.kafka.common.TopicIdPartition)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerGroupMetadata","l":"generationId()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.Subscription","l":"generationId()"},{"p":"org.apache.kafka.common","c":"KafkaFuture","l":"get()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"BrokerJwtValidator.ClaimSupplier","l":"get()"},{"p":"org.apache.kafka.streams.kstream","c":"TransformerSupplier","l":"get()"},{"p":"org.apache.kafka.streams.kstream","c":"ValueTransformerSupplier","l":"get()"},{"p":"org.apache.kafka.streams.kstream","c":"ValueTransformerWithKeySupplier","l":"get()"},{"p":"org.apache.kafka.streams.processor.api","c":"FixedKeyProcessorSupplier","l":"get()"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessorSupplier","l":"get()"},{"p":"org.apache.kafka.streams.state","c":"StoreSupplier","l":"get()"},{"p":"org.apache.kafka.streams.state","c":"VersionedBytesStore","l":"get(Bytes, long)","u":"get(org.apache.kafka.common.utils.Bytes,long)"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"get(Field)","u":"get(org.apache.kafka.connect.data.Field)"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlyKeyValueStore","l":"get(K)"},{"p":"org.apache.kafka.streams.state","c":"VersionedKeyValueStore","l":"get(K)"},{"p":"org.apache.kafka.streams.state","c":"VersionedKeyValueStore","l":"get(K, long)","u":"get(K,long)"},{"p":"org.apache.kafka.common","c":"KafkaFuture","l":"get(long, TimeUnit)","u":"get(long,java.util.concurrent.TimeUnit)"},{"p":"org.apache.kafka.clients.admin","c":"Config","l":"get(String)","u":"get(java.lang.String)"},{"p":"org.apache.kafka.common.config.provider","c":"ConfigProvider","l":"get(String)","u":"get(java.lang.String)"},{"p":"org.apache.kafka.common.config.provider","c":"DirectoryConfigProvider","l":"get(String)","u":"get(java.lang.String)"},{"p":"org.apache.kafka.common.config.provider","c":"EnvVarConfigProvider","l":"get(String)","u":"get(java.lang.String)"},{"p":"org.apache.kafka.common.config.provider","c":"FileConfigProvider","l":"get(String)","u":"get(java.lang.String)"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"get(String)","u":"get(java.lang.String)"},{"p":"org.apache.kafka.common.config.provider","c":"ConfigProvider","l":"get(String, Set)","u":"get(java.lang.String,java.util.Set)"},{"p":"org.apache.kafka.common.config.provider","c":"DirectoryConfigProvider","l":"get(String, Set)","u":"get(java.lang.String,java.util.Set)"},{"p":"org.apache.kafka.common.config.provider","c":"EnvVarConfigProvider","l":"get(String, Set)","u":"get(java.lang.String,java.util.Set)"},{"p":"org.apache.kafka.common.config.provider","c":"FileConfigProvider","l":"get(String, Set)","u":"get(java.lang.String,java.util.Set)"},{"p":"org.apache.kafka.streams","c":"KafkaClientSupplier","l":"getAdmin(Map)","u":"getAdmin(java.util.Map)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"getAdminConfigs(String)","u":"getAdminConfigs(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"TopologyTestDriver","l":"getAllStateStores()"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"getArray(String)","u":"getArray(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor","l":"getAssignorInstances(List, Map)","u":"getAssignorInstances(java.util.List,java.util.Map)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig.InternalConfig","l":"getBoolean(Map, String, boolean)","u":"getBoolean(java.util.Map,java.lang.String,boolean)"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"getBoolean(String)","u":"getBoolean(java.lang.String)"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"getBoolean(String)","u":"getBoolean(java.lang.String)"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"getBytes(String)","u":"getBytes(java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"getClass(String)","u":"getClass(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"getClientTags()"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"getConfiguredInstance(String, Class)","u":"getConfiguredInstance(java.lang.String,java.lang.Class)"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"getConfiguredInstance(String, Class, Map)","u":"getConfiguredInstance(java.lang.String,java.lang.Class,java.util.Map)"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"getConfiguredInstances(List, Class, Map)","u":"getConfiguredInstances(java.util.List,java.lang.Class,java.util.Map)"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"getConfiguredInstances(String, Class)","u":"getConfiguredInstances(java.lang.String,java.lang.Class)"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"getConfiguredInstances(String, Class, Map)","u":"getConfiguredInstances(java.lang.String,java.lang.Class,java.util.Map)"},{"p":"org.apache.kafka.streams","c":"KafkaClientSupplier","l":"getConsumer(Map)","u":"getConsumer(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeReplicaLogDirsResult.ReplicaLogDirInfo","l":"getCurrentReplicaLogDir()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeReplicaLogDirsResult.ReplicaLogDirInfo","l":"getCurrentReplicaOffsetLag()"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"getDouble(String)","u":"getDouble(java.lang.String)"},{"p":"org.apache.kafka.streams.query","c":"QueryResult","l":"getExecutionInfo()"},{"p":"org.apache.kafka.streams.query","c":"QueryResult","l":"getFailureMessage()"},{"p":"org.apache.kafka.streams.query","c":"QueryResult","l":"getFailureReason()"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"getFloat32(String)","u":"getFloat32(java.lang.String)"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"getFloat64(String)","u":"getFloat64(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeReplicaLogDirsResult.ReplicaLogDirInfo","l":"getFutureReplicaLogDir()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeReplicaLogDirsResult.ReplicaLogDirInfo","l":"getFutureReplicaOffsetLag()"},{"p":"org.apache.kafka.streams","c":"KafkaClientSupplier","l":"getGlobalConsumer(Map)","u":"getGlobalConsumer(java.util.Map)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"getGlobalConsumerConfigs(String)","u":"getGlobalConsumerConfigs(java.lang.String)"},{"p":"org.apache.kafka.streams.query","c":"StateQueryResult","l":"getGlobalResult()"},{"p":"org.apache.kafka.streams.test","c":"TestRecord","l":"getHeaders()"},{"p":"org.apache.kafka.common.serialization","c":"ListSerializer","l":"getInnerSerializer()"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"getInt(String)","u":"getInt(java.lang.String)"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"getInt16(String)","u":"getInt16(java.lang.String)"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"getInt32(String)","u":"getInt32(java.lang.String)"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"getInt64(String)","u":"getInt64(java.lang.String)"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"getInt8(String)","u":"getInt8(java.lang.String)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext.CapturedPunctuator","l":"getInterval()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext.CapturedPunctuator","l":"getIntervalMs()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"getKafkaClientSupplier()"},{"p":"org.apache.kafka.streams.query","c":"KeyQuery","l":"getKey()"},{"p":"org.apache.kafka.streams.query","c":"WindowKeyQuery","l":"getKey()"},{"p":"org.apache.kafka.streams.query","c":"WindowRangeQuery","l":"getKey()"},{"p":"org.apache.kafka.streams.test","c":"TestRecord","l":"getKey()"},{"p":"org.apache.kafka.streams","c":"TopologyTestDriver","l":"getKeyValueStore(String)","u":"getKeyValueStore(java.lang.String)"},{"p":"org.apache.kafka.common","c":"Uuid","l":"getLeastSignificantBits()"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"getList(String)","u":"getList(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig.InternalConfig","l":"getLong(Map, String, long)","u":"getLong(java.util.Map,java.lang.String,long)"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"getLong(String)","u":"getLong(java.lang.String)"},{"p":"org.apache.kafka.streams.query","c":"RangeQuery","l":"getLowerBound()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"getMainConsumerConfigs(String, String, int)","u":"getMainConsumerConfigs(java.lang.String,java.lang.String,int)"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"getMap(String)","u":"getMap(java.lang.String)"},{"p":"org.apache.kafka.common","c":"Uuid","l":"getMostSignificantBits()"},{"p":"org.apache.kafka.common.security.auth","c":"KafkaPrincipal","l":"getName()"},{"p":"org.apache.kafka.connect.data","c":"Schema.Type","l":"getName()"},{"p":"org.apache.kafka.connect.storage","c":"ConverterType","l":"getName()"},{"p":"org.apache.kafka.common","c":"KafkaFuture","l":"getNow(T)"},{"p":"org.apache.kafka.streams.query","c":"StateQueryResult","l":"getOnlyPartitionResult()"},{"p":"org.apache.kafka.streams.query","c":"Position","l":"getPartitionPositions(String)","u":"getPartitionPositions(java.lang.String)"},{"p":"org.apache.kafka.streams.query","c":"StateQueryResult","l":"getPartitionResults()"},{"p":"org.apache.kafka.streams.query","c":"StateQueryRequest","l":"getPartitions()"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"getPassword(String)","u":"getPassword(java.lang.String)"},{"p":"org.apache.kafka.streams.processor","c":"StateStore","l":"getPosition()"},{"p":"org.apache.kafka.streams.query","c":"QueryResult","l":"getPosition()"},{"p":"org.apache.kafka.streams.query","c":"StateQueryResult","l":"getPosition()"},{"p":"org.apache.kafka.streams.query","c":"StateQueryRequest","l":"getPositionBound()"},{"p":"org.apache.kafka.common.security.auth","c":"KafkaPrincipal","l":"getPrincipalType()"},{"p":"org.apache.kafka.streams","c":"KafkaClientSupplier","l":"getProducer(Map)","u":"getProducer(java.util.Map)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"getProducerConfigs(String)","u":"getProducerConfigs(java.lang.String)"},{"p":"org.apache.kafka.common.security.auth","c":"SecurityProviderCreator","l":"getProvider()"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext.CapturedPunctuator","l":"getPunctuator()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext.CapturedPunctuator","l":"getPunctuator()"},{"p":"org.apache.kafka.streams.query","c":"StateQueryRequest","l":"getQuery()"},{"p":"org.apache.kafka.streams","c":"TestOutputTopic","l":"getQueueSize()"},{"p":"org.apache.kafka.streams.test","c":"TestRecord","l":"getRecordTime()"},{"p":"org.apache.kafka.streams","c":"KafkaClientSupplier","l":"getRestoreConsumer(Map)","u":"getRestoreConsumer(java.util.Map)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"getRestoreConsumerConfigs(String)","u":"getRestoreConsumerConfigs(java.lang.String)"},{"p":"org.apache.kafka.streams.query","c":"QueryResult","l":"getResult()"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"getSensor(String)","u":"getSensor(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"TopologyTestDriver","l":"getSessionStore(String)","u":"getSessionStore(java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"getShort(String)","u":"getShort(java.lang.String)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"getStateStore(String)","u":"getStateStore(java.lang.String)"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessingContext","l":"getStateStore(String)","u":"getStateStore(java.lang.String)"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"getStateStore(String)","u":"getStateStore(java.lang.String)"},{"p":"org.apache.kafka.streams.processor","c":"ProcessorContext","l":"getStateStore(String)","u":"getStateStore(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"TopologyTestDriver","l":"getStateStore(String)","u":"getStateStore(java.lang.String)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"getStateStoreContext()"},{"p":"org.apache.kafka.streams.query","c":"StateQueryRequest","l":"getStoreName()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig.InternalConfig","l":"getString(Map, String, String)","u":"getString(java.util.Map,java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"getString(String)","u":"getString(java.lang.String)"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"getString(String)","u":"getString(java.lang.String)"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"getStruct(String)","u":"getStruct(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"TopologyConfig","l":"getTaskConfig()"},{"p":"org.apache.kafka.streams.query","c":"WindowKeyQuery","l":"getTimeFrom()"},{"p":"org.apache.kafka.streams.query","c":"WindowRangeQuery","l":"getTimeFrom()"},{"p":"org.apache.kafka.streams","c":"TopologyTestDriver","l":"getTimestampedKeyValueStore(String)","u":"getTimestampedKeyValueStore(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"TopologyTestDriver","l":"getTimestampedWindowStore(String)","u":"getTimestampedWindowStore(java.lang.String)"},{"p":"org.apache.kafka.streams.query","c":"WindowKeyQuery","l":"getTimeTo()"},{"p":"org.apache.kafka.streams.query","c":"WindowRangeQuery","l":"getTimeTo()"},{"p":"org.apache.kafka.streams.query","c":"Position","l":"getTopics()"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext.CapturedPunctuator","l":"getType()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext.CapturedPunctuator","l":"getType()"},{"p":"org.apache.kafka.streams.query","c":"RangeQuery","l":"getUpperBound()"},{"p":"org.apache.kafka.streams.test","c":"TestRecord","l":"getValue()"},{"p":"org.apache.kafka.streams.state","c":"ValueAndTimestamp","l":"getValueOrNull(ValueAndTimestamp)","u":"getValueOrNull(org.apache.kafka.streams.state.ValueAndTimestamp)"},{"p":"org.apache.kafka.streams","c":"TopologyTestDriver","l":"getVersionedKeyValueStore(String)","u":"getVersionedKeyValueStore(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedDeserializer","l":"getWindowSize()"},{"p":"org.apache.kafka.streams","c":"TopologyTestDriver","l":"getWindowStore(String)","u":"getWindowStore(java.lang.String)"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"getWithoutDefault(String)","u":"getWithoutDefault(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"GLOBAL_CONSUMER_PREFIX"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"globalConsumerPrefix(String)","u":"globalConsumerPrefix(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"TopologyDescription","l":"globalStores()"},{"p":"org.apache.kafka.streams","c":"StreamsBuilder","l":"globalTable(String)","u":"globalTable(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"StreamsBuilder","l":"globalTable(String, Consumed)","u":"globalTable(java.lang.String,org.apache.kafka.streams.kstream.Consumed)"},{"p":"org.apache.kafka.streams","c":"StreamsBuilder","l":"globalTable(String, Consumed, Materialized>)","u":"globalTable(java.lang.String,org.apache.kafka.streams.kstream.Consumed,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams","c":"StreamsBuilder","l":"globalTable(String, Materialized>)","u":"globalTable(java.lang.String,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"JoinWindows","l":"grace(Duration)","u":"grace(java.time.Duration)"},{"p":"org.apache.kafka.streams.kstream","c":"Joined","l":"gracePeriod()"},{"p":"org.apache.kafka.streams.kstream","c":"JoinWindows","l":"gracePeriodMs()"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindows","l":"gracePeriodMs()"},{"p":"org.apache.kafka.streams.kstream","c":"SlidingWindows","l":"gracePeriodMs()"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindows","l":"gracePeriodMs()"},{"p":"org.apache.kafka.streams.kstream","c":"UnlimitedWindows","l":"gracePeriodMs()"},{"p":"org.apache.kafka.streams.kstream","c":"Windows","l":"gracePeriodMs()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ConfigKey","l":"group"},{"p":"org.apache.kafka.common.config","c":"ConfigResource.Type","l":"GROUP"},{"p":"org.apache.kafka.common.resource","c":"ResourceType","l":"GROUP"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"GROUP_ID_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"GROUP_INSTANCE_ID_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"GROUP_PROTOCOL_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"GROUP_PROTOCOL_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"GROUP_PROTOCOL_DOC"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"GROUP_REMOTE_ASSIGNOR_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"GROUP_REMOTE_ASSIGNOR_DOC"},{"p":"org.apache.kafka.common","c":"MetricName","l":"group()"},{"p":"org.apache.kafka.common","c":"MetricNameTemplate","l":"group()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.GroupAssignment","l":"groupAssignment()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.GroupAssignment","l":"GroupAssignment(Map)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"GroupAssignment","l":"GroupAssignment(Map)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.common.errors","c":"GroupAuthorizationException","l":"GroupAuthorizationException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"GroupAuthorizationException","l":"GroupAuthorizationException(String, String)","u":"%3Cinit%3E(java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"groupBy(KeyValueMapper>)","u":"groupBy(org.apache.kafka.streams.kstream.KeyValueMapper)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"groupBy(KeyValueMapper>, Grouped)","u":"groupBy(org.apache.kafka.streams.kstream.KeyValueMapper,org.apache.kafka.streams.kstream.Grouped)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"groupBy(KeyValueMapper)","u":"groupBy(org.apache.kafka.streams.kstream.KeyValueMapper)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"groupBy(KeyValueMapper, Grouped)","u":"groupBy(org.apache.kafka.streams.kstream.KeyValueMapper,org.apache.kafka.streams.kstream.Grouped)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"groupByKey()"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"groupByKey(Grouped)","u":"groupByKey(org.apache.kafka.streams.kstream.Grouped)"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupDescription","l":"groupEpoch()"},{"p":"org.apache.kafka.clients.admin","c":"ShareGroupDescription","l":"groupEpoch()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupDescription","l":"groupEpoch()"},{"p":"org.apache.kafka.clients.admin","c":"ClassicGroupDescription","l":"groupId()"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupDescription","l":"groupId()"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupListing","l":"groupId()"},{"p":"org.apache.kafka.clients.admin","c":"GroupListing","l":"groupId()"},{"p":"org.apache.kafka.clients.admin","c":"ShareGroupDescription","l":"groupId()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupDescription","l":"groupId()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerGroupMetadata","l":"groupId()"},{"p":"org.apache.kafka.common.errors","c":"GroupAuthorizationException","l":"groupId()"},{"p":"org.apache.kafka.common.errors","c":"GroupIdNotFoundException","l":"GroupIdNotFoundException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"MemberDescription","l":"groupInstanceId()"},{"p":"org.apache.kafka.clients.admin","c":"MemberToRemove","l":"groupInstanceId()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerGroupMetadata","l":"groupInstanceId()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.Subscription","l":"groupInstanceId()"},{"p":"org.apache.kafka.clients.admin","c":"GroupListing","l":"GroupListing(String, Optional, String, Optional)","u":"%3Cinit%3E(java.lang.String,java.util.Optional,java.lang.String,java.util.Optional)"},{"p":"org.apache.kafka.common.errors","c":"GroupMaxSizeReachedException","l":"GroupMaxSizeReachedException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"CloseOptions","l":"groupMembershipOperation()"},{"p":"org.apache.kafka.clients.consumer","c":"CloseOptions","l":"groupMembershipOperation(CloseOptions.GroupMembershipOperation)","u":"groupMembershipOperation(org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"groupMetadata()"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"groupMetadata()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"groupMetadata()"},{"p":"org.apache.kafka.common.errors","c":"GroupNotEmptyException","l":"GroupNotEmptyException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.connect.util","c":"ConnectorUtils","l":"groupPartitions(List, int)","u":"groupPartitions(java.util.List,int)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"groups()"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupDescription","l":"groupState()"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupListing","l":"groupState()"},{"p":"org.apache.kafka.clients.admin","c":"GroupListing","l":"groupState()"},{"p":"org.apache.kafka.clients.admin","c":"ShareGroupDescription","l":"groupState()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupDescription","l":"groupState()"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupsOptions","l":"groupStates()"},{"p":"org.apache.kafka.clients.admin","c":"ListGroupsOptions","l":"groupStates()"},{"p":"org.apache.kafka.common","c":"GroupState","l":"groupStatesForType(GroupType)","u":"groupStatesForType(org.apache.kafka.common.GroupType)"},{"p":"org.apache.kafka.common.errors","c":"GroupSubscribedToTopicException","l":"GroupSubscribedToTopicException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.GroupSubscription","l":"groupSubscription()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.GroupSubscription","l":"GroupSubscription(Map)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"GSSAPI_MECHANISM"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerLoginCallbackHandler","l":"handle(Callback[])","u":"handle(javax.security.auth.callback.Callback[])"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerValidatorCallbackHandler","l":"handle(Callback[])","u":"handle(javax.security.auth.callback.Callback[])"},{"p":"org.apache.kafka.streams.errors","c":"DeserializationExceptionHandler","l":"handle(ErrorHandlerContext, ConsumerRecord, Exception)","u":"handle(org.apache.kafka.streams.errors.ErrorHandlerContext,org.apache.kafka.clients.consumer.ConsumerRecord,java.lang.Exception)"},{"p":"org.apache.kafka.streams.errors","c":"LogAndContinueExceptionHandler","l":"handle(ErrorHandlerContext, ConsumerRecord, Exception)","u":"handle(org.apache.kafka.streams.errors.ErrorHandlerContext,org.apache.kafka.clients.consumer.ConsumerRecord,java.lang.Exception)"},{"p":"org.apache.kafka.streams.errors","c":"LogAndFailExceptionHandler","l":"handle(ErrorHandlerContext, ConsumerRecord, Exception)","u":"handle(org.apache.kafka.streams.errors.ErrorHandlerContext,org.apache.kafka.clients.consumer.ConsumerRecord,java.lang.Exception)"},{"p":"org.apache.kafka.streams.errors","c":"DefaultProductionExceptionHandler","l":"handle(ErrorHandlerContext, ProducerRecord, Exception)","u":"handle(org.apache.kafka.streams.errors.ErrorHandlerContext,org.apache.kafka.clients.producer.ProducerRecord,java.lang.Exception)"},{"p":"org.apache.kafka.streams.errors","c":"ProductionExceptionHandler","l":"handle(ErrorHandlerContext, ProducerRecord, Exception)","u":"handle(org.apache.kafka.streams.errors.ErrorHandlerContext,org.apache.kafka.clients.producer.ProducerRecord,java.lang.Exception)"},{"p":"org.apache.kafka.streams.errors","c":"LogAndContinueProcessingExceptionHandler","l":"handle(ErrorHandlerContext, Record, Exception)","u":"handle(org.apache.kafka.streams.errors.ErrorHandlerContext,org.apache.kafka.streams.processor.api.Record,java.lang.Exception)"},{"p":"org.apache.kafka.streams.errors","c":"LogAndFailProcessingExceptionHandler","l":"handle(ErrorHandlerContext, Record, Exception)","u":"handle(org.apache.kafka.streams.errors.ErrorHandlerContext,org.apache.kafka.streams.processor.api.Record,java.lang.Exception)"},{"p":"org.apache.kafka.streams.errors","c":"ProcessingExceptionHandler","l":"handle(ErrorHandlerContext, Record, Exception)","u":"handle(org.apache.kafka.streams.errors.ErrorHandlerContext,org.apache.kafka.streams.processor.api.Record,java.lang.Exception)"},{"p":"org.apache.kafka.streams.errors","c":"DeserializationExceptionHandler","l":"handle(ProcessorContext, ConsumerRecord, Exception)","u":"handle(org.apache.kafka.streams.processor.ProcessorContext,org.apache.kafka.clients.consumer.ConsumerRecord,java.lang.Exception)"},{"p":"org.apache.kafka.streams.errors","c":"LogAndContinueExceptionHandler","l":"handle(ProcessorContext, ConsumerRecord, Exception)","u":"handle(org.apache.kafka.streams.processor.ProcessorContext,org.apache.kafka.clients.consumer.ConsumerRecord,java.lang.Exception)"},{"p":"org.apache.kafka.streams.errors","c":"LogAndFailExceptionHandler","l":"handle(ProcessorContext, ConsumerRecord, Exception)","u":"handle(org.apache.kafka.streams.processor.ProcessorContext,org.apache.kafka.clients.consumer.ConsumerRecord,java.lang.Exception)"},{"p":"org.apache.kafka.streams.errors","c":"DefaultProductionExceptionHandler","l":"handle(ProducerRecord, Exception)","u":"handle(org.apache.kafka.clients.producer.ProducerRecord,java.lang.Exception)"},{"p":"org.apache.kafka.streams.errors","c":"ProductionExceptionHandler","l":"handle(ProducerRecord, Exception)","u":"handle(org.apache.kafka.clients.producer.ProducerRecord,java.lang.Exception)"},{"p":"org.apache.kafka.streams.errors","c":"StreamsUncaughtExceptionHandler","l":"handle(Throwable)","u":"handle(java.lang.Throwable)"},{"p":"org.apache.kafka.streams.errors","c":"ProductionExceptionHandler","l":"handleSerializationException(ErrorHandlerContext, ProducerRecord, Exception, ProductionExceptionHandler.SerializationExceptionOrigin)","u":"handleSerializationException(org.apache.kafka.streams.errors.ErrorHandlerContext,org.apache.kafka.clients.producer.ProducerRecord,java.lang.Exception,org.apache.kafka.streams.errors.ProductionExceptionHandler.SerializationExceptionOrigin)"},{"p":"org.apache.kafka.streams.errors","c":"ProductionExceptionHandler","l":"handleSerializationException(ProducerRecord, Exception)","u":"handleSerializationException(org.apache.kafka.clients.producer.ProducerRecord,java.lang.Exception)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams.State","l":"hasCompletedShutdown()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ConfigKey","l":"hasDefault()"},{"p":"org.apache.kafka.common.metrics","c":"Sensor","l":"hasExpired()"},{"p":"org.apache.kafka.clients.admin","c":"AbortTransactionSpec","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"AlterConfigOp","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"ClassicGroupDescription","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"ClientMetricsResourceListing","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"Config","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigSynonym","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupDescription","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupListing","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeProducersOptions","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"FeatureMetadata","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"FeatureUpdate","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"FinalizedVersionRange","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"GroupListing","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupOffsetsSpec","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"ListShareGroupOffsetsSpec","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"ListTopicsOptions","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"ListTransactionsOptions","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"MemberAssignment","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"MemberDescription","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"MemberToRemove","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"NewTopic","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"ProducerState","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo.Node","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo.ReplicaState","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"RaftVoterEndpoint","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"RecordsToDelete","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"ScramCredentialInfo","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"ShareGroupDescription","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"ShareMemberAssignment","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"ShareMemberDescription","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupDescription","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberAssignment","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberAssignment.TaskIds","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription.Endpoint","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription.TaskOffset","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupSubtopologyDescription","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupSubtopologyDescription.TopicInfo","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"SupportedVersionRange","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"TopicDescription","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"TransactionDescription","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"TransactionListing","l":"hashCode()"},{"p":"org.apache.kafka.clients.admin","c":"UserScramCredentialsDescription","l":"hashCode()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerGroupMetadata","l":"hashCode()"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetAndMetadata","l":"hashCode()"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetAndTimestamp","l":"hashCode()"},{"p":"org.apache.kafka.clients.consumer","c":"SubscriptionPattern","l":"hashCode()"},{"p":"org.apache.kafka.clients.producer","c":"PreparedTxnState","l":"hashCode()"},{"p":"org.apache.kafka.clients.producer","c":"ProducerRecord","l":"hashCode()"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntry","l":"hashCode()"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntryFilter","l":"hashCode()"},{"p":"org.apache.kafka.common.acl","c":"AclBinding","l":"hashCode()"},{"p":"org.apache.kafka.common.acl","c":"AclBindingFilter","l":"hashCode()"},{"p":"org.apache.kafka.common","c":"Cluster","l":"hashCode()"},{"p":"org.apache.kafka.common","c":"ClusterResource","l":"hashCode()"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"hashCode()"},{"p":"org.apache.kafka.common.config","c":"ConfigResource","l":"hashCode()"},{"p":"org.apache.kafka.common.config","c":"ConfigValue","l":"hashCode()"},{"p":"org.apache.kafka.common","c":"Endpoint","l":"hashCode()"},{"p":"org.apache.kafka.common","c":"MetricName","l":"hashCode()"},{"p":"org.apache.kafka.common","c":"MetricNameTemplate","l":"hashCode()"},{"p":"org.apache.kafka.common.metrics","c":"Quota","l":"hashCode()"},{"p":"org.apache.kafka.common","c":"Node","l":"hashCode()"},{"p":"org.apache.kafka.common","c":"PartitionInfo","l":"hashCode()"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaAlteration.Op","l":"hashCode()"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaEntity","l":"hashCode()"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaFilter","l":"hashCode()"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaFilterComponent","l":"hashCode()"},{"p":"org.apache.kafka.common.resource","c":"Resource","l":"hashCode()"},{"p":"org.apache.kafka.common.resource","c":"ResourcePattern","l":"hashCode()"},{"p":"org.apache.kafka.common.resource","c":"ResourcePatternFilter","l":"hashCode()"},{"p":"org.apache.kafka.common.security.auth","c":"KafkaPrincipal","l":"hashCode()"},{"p":"org.apache.kafka.common.security.auth","c":"SaslExtensions","l":"hashCode()"},{"p":"org.apache.kafka.common.security.token.delegation","c":"DelegationToken","l":"hashCode()"},{"p":"org.apache.kafka.common.security.token.delegation","c":"TokenInformation","l":"hashCode()"},{"p":"org.apache.kafka.common","c":"TopicIdPartition","l":"hashCode()"},{"p":"org.apache.kafka.common","c":"TopicPartition","l":"hashCode()"},{"p":"org.apache.kafka.common","c":"TopicPartitionInfo","l":"hashCode()"},{"p":"org.apache.kafka.common","c":"TopicPartitionReplica","l":"hashCode()"},{"p":"org.apache.kafka.common","c":"Uuid","l":"hashCode()"},{"p":"org.apache.kafka.connect.connector","c":"ConnectRecord","l":"hashCode()"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"hashCode()"},{"p":"org.apache.kafka.connect.data","c":"Field","l":"hashCode()"},{"p":"org.apache.kafka.connect.data","c":"SchemaAndValue","l":"hashCode()"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"hashCode()"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"hashCode()"},{"p":"org.apache.kafka.connect.health","c":"AbstractState","l":"hashCode()"},{"p":"org.apache.kafka.connect.health","c":"ConnectorHealth","l":"hashCode()"},{"p":"org.apache.kafka.connect.health","c":"TaskState","l":"hashCode()"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"hashCode()"},{"p":"org.apache.kafka.connect.mirror","c":"SourceAndTarget","l":"hashCode()"},{"p":"org.apache.kafka.connect.sink","c":"SinkRecord","l":"hashCode()"},{"p":"org.apache.kafka.connect.source","c":"SourceRecord","l":"hashCode()"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"GroupAssignment","l":"hashCode()"},{"p":"org.apache.kafka.server.authorizer","c":"Action","l":"hashCode()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"LogSegmentData","l":"hashCode()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentId","l":"hashCode()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata.CustomMetadata","l":"hashCode()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata","l":"hashCode()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadataUpdate","l":"hashCode()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemotePartitionDeleteMetadata","l":"hashCode()"},{"p":"org.apache.kafka.server.policy","c":"AlterConfigPolicy.RequestMetadata","l":"hashCode()"},{"p":"org.apache.kafka.server.policy","c":"CreateTopicPolicy.RequestMetadata","l":"hashCode()"},{"p":"org.apache.kafka.streams","c":"AutoOffsetReset","l":"hashCode()"},{"p":"org.apache.kafka.streams","c":"KeyQueryMetadata","l":"hashCode()"},{"p":"org.apache.kafka.streams","c":"KeyValue","l":"hashCode()"},{"p":"org.apache.kafka.streams.kstream","c":"Consumed","l":"hashCode()"},{"p":"org.apache.kafka.streams.kstream","c":"JoinWindows","l":"hashCode()"},{"p":"org.apache.kafka.streams.kstream","c":"Produced","l":"hashCode()"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindows","l":"hashCode()"},{"p":"org.apache.kafka.streams.kstream","c":"SlidingWindows","l":"hashCode()"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindows","l":"hashCode()"},{"p":"org.apache.kafka.streams.kstream","c":"UnlimitedWindows","l":"hashCode()"},{"p":"org.apache.kafka.streams.kstream","c":"Window","l":"hashCode()"},{"p":"org.apache.kafka.streams.kstream","c":"Windowed","l":"hashCode()"},{"p":"org.apache.kafka.streams","c":"LagInfo","l":"hashCode()"},{"p":"org.apache.kafka.streams.processor.api","c":"FixedKeyRecord","l":"hashCode()"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext.CapturedForward","l":"hashCode()"},{"p":"org.apache.kafka.streams.processor.api","c":"Record","l":"hashCode()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsAssignment.AssignedTask","l":"hashCode()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"ProcessId","l":"hashCode()"},{"p":"org.apache.kafka.streams.processor","c":"TaskId","l":"hashCode()"},{"p":"org.apache.kafka.streams.processor","c":"To","l":"hashCode()"},{"p":"org.apache.kafka.streams.query","c":"Position","l":"hashCode()"},{"p":"org.apache.kafka.streams.query","c":"PositionBound","l":"hashCode()"},{"p":"org.apache.kafka.streams.state","c":"DslKeyValueParams","l":"hashCode()"},{"p":"org.apache.kafka.streams.state","c":"DslSessionParams","l":"hashCode()"},{"p":"org.apache.kafka.streams.state","c":"DslWindowParams","l":"hashCode()"},{"p":"org.apache.kafka.streams.state","c":"HostInfo","l":"hashCode()"},{"p":"org.apache.kafka.streams.state","c":"ValueAndTimestamp","l":"hashCode()"},{"p":"org.apache.kafka.streams.state","c":"VersionedRecord","l":"hashCode()"},{"p":"org.apache.kafka.streams","c":"StoreQueryParameters","l":"hashCode()"},{"p":"org.apache.kafka.streams","c":"StreamsMetadata","l":"hashCode()"},{"p":"org.apache.kafka.streams","c":"TaskMetadata","l":"hashCode()"},{"p":"org.apache.kafka.streams.test","c":"TestRecord","l":"hashCode()"},{"p":"org.apache.kafka.streams","c":"ThreadMetadata","l":"hashCode()"},{"p":"org.apache.kafka.common.metrics","c":"Sensor","l":"hasMetrics()"},{"p":"org.apache.kafka.streams","c":"KafkaStreams.State","l":"hasNotStarted()"},{"p":"org.apache.kafka.clients.producer","c":"RecordMetadata","l":"hasOffset()"},{"p":"org.apache.kafka.common","c":"Node","l":"hasRack()"},{"p":"org.apache.kafka.streams","c":"KafkaStreams.State","l":"hasStartedOrFinishedShuttingDown()"},{"p":"org.apache.kafka.clients.producer","c":"RecordMetadata","l":"hasTimestamp()"},{"p":"org.apache.kafka.clients.producer","c":"PreparedTxnState","l":"hasTransaction()"},{"p":"org.apache.kafka.connect.storage","c":"ConverterType","l":"HEADER"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"HEADER_SCHEMA"},{"p":"org.apache.kafka.connect.mirror","c":"Heartbeat","l":"HEADER_SCHEMA"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecord","l":"headers()"},{"p":"org.apache.kafka.clients.producer","c":"ProducerRecord","l":"headers()"},{"p":"org.apache.kafka.common.errors","c":"RecordDeserializationException","l":"headers()"},{"p":"org.apache.kafka.connect.connector","c":"ConnectRecord","l":"headers()"},{"p":"org.apache.kafka.streams.errors","c":"ErrorHandlerContext","l":"headers()"},{"p":"org.apache.kafka.streams.processor.api","c":"FixedKeyRecord","l":"headers()"},{"p":"org.apache.kafka.streams.processor.api","c":"Record","l":"headers()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext.CapturedForward","l":"headers()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"headers()"},{"p":"org.apache.kafka.streams.processor","c":"ProcessorContext","l":"headers()"},{"p":"org.apache.kafka.streams.processor","c":"RecordContext","l":"headers()"},{"p":"org.apache.kafka.streams.test","c":"TestRecord","l":"headers()"},{"p":"org.apache.kafka.common.header","c":"Headers","l":"headers(String)","u":"headers(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"HEARTBEAT_INTERVAL_MS_CONFIG"},{"p":"org.apache.kafka.connect.mirror","c":"Heartbeat","l":"Heartbeat(String, String, long)","u":"%3Cinit%3E(java.lang.String,java.lang.String,long)"},{"p":"org.apache.kafka.connect.mirror","c":"ReplicationPolicy","l":"heartbeatsTopic()"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClient","l":"heartbeatTopics()"},{"p":"org.apache.kafka.connect.mirror","c":"RemoteClusterUtils","l":"heartbeatTopics(Map)","u":"heartbeatTopics(java.util.Map)"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"SubscriptionType","l":"HETEROGENEOUS"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Importance","l":"HIGH"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogMetadataManager","l":"highestOffsetForEpoch(TopicIdPartition, int)","u":"highestOffsetForEpoch(org.apache.kafka.common.TopicIdPartition,int)"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo","l":"highWatermark()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Histogram","l":"Histogram(Histogram.BinScheme)","u":"%3Cinit%3E(org.apache.kafka.common.metrics.stats.Histogram.BinScheme)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"history()"},{"p":"org.apache.kafka.streams.state","c":"VersionedBytesStoreSupplier","l":"historyRetentionMs()"},{"p":"org.apache.kafka.common.security.token.delegation","c":"DelegationToken","l":"hmac()"},{"p":"org.apache.kafka.common.security.token.delegation","c":"DelegationToken","l":"hmacAsBase64String()"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"SubscriptionType","l":"HOMOGENEOUS"},{"p":"org.apache.kafka.clients.admin","c":"MemberDescription","l":"host()"},{"p":"org.apache.kafka.clients.admin","c":"RaftVoterEndpoint","l":"host()"},{"p":"org.apache.kafka.clients.admin","c":"ShareMemberDescription","l":"host()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription.Endpoint","l":"host()"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntry","l":"host()"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntryFilter","l":"host()"},{"p":"org.apache.kafka.common","c":"Endpoint","l":"host()"},{"p":"org.apache.kafka.common","c":"Node","l":"host()"},{"p":"org.apache.kafka.streams.state","c":"HostInfo","l":"host()"},{"p":"org.apache.kafka.streams","c":"StreamsMetadata","l":"host()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsState","l":"hostInfo()"},{"p":"org.apache.kafka.streams","c":"StreamsMetadata","l":"hostInfo()"},{"p":"org.apache.kafka.streams.state","c":"HostInfo","l":"HostInfo(String, int)","u":"%3Cinit%3E(java.lang.String,int)"},{"p":"org.apache.kafka.clients.consumer","c":"AcknowledgeType","l":"id"},{"p":"org.apache.kafka.common.metrics","c":"Sensor.RecordingLevel","l":"id"},{"p":"org.apache.kafka.common.security.auth","c":"SecurityProtocol","l":"id"},{"p":"org.apache.kafka.streams.errors","c":"DeserializationExceptionHandler.DeserializationHandlerResponse","l":"id"},{"p":"org.apache.kafka.streams.errors","c":"ProcessingExceptionHandler.ProcessingHandlerResponse","l":"id"},{"p":"org.apache.kafka.streams.errors","c":"ProductionExceptionHandler.ProductionExceptionHandlerResponse","l":"id"},{"p":"org.apache.kafka.streams.errors","c":"StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse","l":"id"},{"p":"org.apache.kafka.connect.tools","c":"SchemaSourceTask","l":"ID_CONFIG"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSinkTask","l":"ID_CONFIG"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSourceTask","l":"ID_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"AlterConfigOp.OpType","l":"id()"},{"p":"org.apache.kafka.clients.admin","c":"EndpointType","l":"id()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.RebalanceProtocol","l":"id()"},{"p":"org.apache.kafka.common.config","c":"ConfigResource.Type","l":"id()"},{"p":"org.apache.kafka.common","c":"IsolationLevel","l":"id()"},{"p":"org.apache.kafka.common","c":"Node","l":"id()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentId","l":"id()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentState","l":"id()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemotePartitionDeleteState","l":"id()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsAssignment.AssignedTask","l":"id()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"ProcessId","l":"id()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskInfo","l":"id()"},{"p":"org.apache.kafka.streams","c":"TopologyDescription.GlobalStore","l":"id()"},{"p":"org.apache.kafka.streams","c":"TopologyDescription.Subtopology","l":"id()"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"IDEMPOTENT_WRITE"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignmentUtils","l":"identityAssignment(ApplicationState)","u":"identityAssignment(org.apache.kafka.streams.processor.assignment.ApplicationState)"},{"p":"org.apache.kafka.connect.mirror","c":"IdentityReplicationPolicy","l":"IdentityReplicationPolicy()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common","c":"Node","l":"idString()"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"ignore(String)","u":"ignore(java.lang.String)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerExtensionsValidatorCallback","l":"ignoredExtensions()"},{"p":"org.apache.kafka.common.errors","c":"IllegalGenerationException","l":"IllegalGenerationException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"IllegalGenerationException","l":"IllegalGenerationException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"IllegalGenerationException","l":"IllegalGenerationException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"IllegalGenerationException","l":"IllegalGenerationException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"IllegalSaslStateException","l":"IllegalSaslStateException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"IllegalSaslStateException","l":"IllegalSaslStateException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.connect.errors","c":"IllegalWorkerStateException","l":"IllegalWorkerStateException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.connect.errors","c":"IllegalWorkerStateException","l":"IllegalWorkerStateException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.connect.errors","c":"IllegalWorkerStateException","l":"IllegalWorkerStateException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ConfigKey","l":"importance"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized.StoreType","l":"IN_MEMORY"},{"p":"org.apache.kafka.streams.state","c":"BuiltInDslStoreSuppliers","l":"IN_MEMORY"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"IN_MEMORY"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.CaseInsensitiveValidString","l":"in(String...)","u":"in(java.lang.String...)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ValidList","l":"in(String...)","u":"in(java.lang.String...)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ValidString","l":"in(String...)","u":"in(java.lang.String...)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindows","l":"inactivityGap()"},{"p":"org.apache.kafka.common.metrics","c":"JmxReporter","l":"INCLUDE_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"DescribeClassicGroupsOptions","l":"includeAuthorizedOperations()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeClusterOptions","l":"includeAuthorizedOperations()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeConsumerGroupsOptions","l":"includeAuthorizedOperations()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeShareGroupsOptions","l":"includeAuthorizedOperations()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeStreamsGroupsOptions","l":"includeAuthorizedOperations()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeTopicsOptions","l":"includeAuthorizedOperations()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeClassicGroupsOptions","l":"includeAuthorizedOperations(boolean)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeClusterOptions","l":"includeAuthorizedOperations(boolean)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeConsumerGroupsOptions","l":"includeAuthorizedOperations(boolean)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeShareGroupsOptions","l":"includeAuthorizedOperations(boolean)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeStreamsGroupsOptions","l":"includeAuthorizedOperations(boolean)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeTopicsOptions","l":"includeAuthorizedOperations(boolean)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeConfigsOptions","l":"includeDocumentation()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeConfigsOptions","l":"includeDocumentation(boolean)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeClusterOptions","l":"includeFencedBrokers()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeClusterOptions","l":"includeFencedBrokers(boolean)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeConfigsOptions","l":"includeSynonyms()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeConfigsOptions","l":"includeSynonyms(boolean)"},{"p":"org.apache.kafka.common.errors","c":"InconsistentClusterIdException","l":"InconsistentClusterIdException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InconsistentClusterIdException","l":"InconsistentClusterIdException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"InconsistentGroupProtocolException","l":"InconsistentGroupProtocolException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InconsistentGroupProtocolException","l":"InconsistentGroupProtocolException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"InconsistentTopicIdException","l":"InconsistentTopicIdException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InconsistentVoterSetException","l":"InconsistentVoterSetException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InconsistentVoterSetException","l":"InconsistentVoterSetException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.clients.admin","c":"NewPartitions","l":"increaseTo(int)"},{"p":"org.apache.kafka.clients.admin","c":"NewPartitions","l":"increaseTo(int, List>)","u":"increaseTo(int,java.util.List)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"incrementalAlterConfigs(Map>)","u":"incrementalAlterConfigs(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"incrementalAlterConfigs(Map>, AlterConfigsOptions)","u":"incrementalAlterConfigs(java.util.Map,org.apache.kafka.clients.admin.AlterConfigsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"incrementalAlterConfigs(Map>, AlterConfigsOptions)","u":"incrementalAlterConfigs(java.util.Map,org.apache.kafka.clients.admin.AlterConfigsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"incrementalAlterConfigs(Map>, AlterConfigsOptions)","u":"incrementalAlterConfigs(java.util.Map,org.apache.kafka.clients.admin.AlterConfigsOptions)"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"INDEX_INTERVAL_BYTES_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"INDEX_INTERVAL_BYTES_DOC"},{"p":"org.apache.kafka.connect.data","c":"Field","l":"index()"},{"p":"org.apache.kafka.common.errors","c":"IneligibleReplicaException","l":"IneligibleReplicaException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.connect.data","c":"Values","l":"inferSchema(Object)","u":"inferSchema(java.lang.Object)"},{"p":"org.apache.kafka.common.metrics","c":"Sensor.RecordingLevel","l":"INFO"},{"p":"org.apache.kafka.common.config","c":"LogLevelConfig","l":"INFO_LOG_LEVEL"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupsOptions","l":"inGroupStates(Set)","u":"inGroupStates(java.util.Set)"},{"p":"org.apache.kafka.clients.admin","c":"ListGroupsOptions","l":"inGroupStates(Set)","u":"inGroupStates(java.util.Set)"},{"p":"org.apache.kafka.streams.processor.api","c":"ContextualFixedKeyProcessor","l":"init(FixedKeyProcessorContext)","u":"init(org.apache.kafka.streams.processor.api.FixedKeyProcessorContext)"},{"p":"org.apache.kafka.streams.processor.api","c":"FixedKeyProcessor","l":"init(FixedKeyProcessorContext)","u":"init(org.apache.kafka.streams.processor.api.FixedKeyProcessorContext)"},{"p":"org.apache.kafka.common.metrics","c":"JmxReporter","l":"init(List)","u":"init(java.util.List)"},{"p":"org.apache.kafka.common.metrics","c":"MetricsReporter","l":"init(List)","u":"init(java.util.List)"},{"p":"org.apache.kafka.streams.kstream","c":"Transformer","l":"init(ProcessorContext)","u":"init(org.apache.kafka.streams.processor.ProcessorContext)"},{"p":"org.apache.kafka.streams.kstream","c":"ValueTransformer","l":"init(ProcessorContext)","u":"init(org.apache.kafka.streams.processor.ProcessorContext)"},{"p":"org.apache.kafka.streams.kstream","c":"ValueTransformerWithKey","l":"init(ProcessorContext)","u":"init(org.apache.kafka.streams.processor.ProcessorContext)"},{"p":"org.apache.kafka.streams.processor.api","c":"ContextualProcessor","l":"init(ProcessorContext)","u":"init(org.apache.kafka.streams.processor.api.ProcessorContext)"},{"p":"org.apache.kafka.streams.processor.api","c":"Processor","l":"init(ProcessorContext)","u":"init(org.apache.kafka.streams.processor.api.ProcessorContext)"},{"p":"org.apache.kafka.streams.processor","c":"StateStore","l":"init(StateStoreContext, StateStore)","u":"init(org.apache.kafka.streams.processor.StateStoreContext,org.apache.kafka.streams.processor.StateStore)"},{"p":"org.apache.kafka.connect.connector","c":"Connector","l":"initialize(ConnectorContext)","u":"initialize(org.apache.kafka.connect.connector.ConnectorContext)"},{"p":"org.apache.kafka.connect.tools","c":"MockSinkConnector","l":"initialize(ConnectorContext)","u":"initialize(org.apache.kafka.connect.connector.ConnectorContext)"},{"p":"org.apache.kafka.connect.tools","c":"MockSourceConnector","l":"initialize(ConnectorContext)","u":"initialize(org.apache.kafka.connect.connector.ConnectorContext)"},{"p":"org.apache.kafka.connect.connector","c":"Connector","l":"initialize(ConnectorContext, List>)","u":"initialize(org.apache.kafka.connect.connector.ConnectorContext,java.util.List)"},{"p":"org.apache.kafka.connect.tools","c":"MockSinkConnector","l":"initialize(ConnectorContext, List>)","u":"initialize(org.apache.kafka.connect.connector.ConnectorContext,java.util.List)"},{"p":"org.apache.kafka.connect.tools","c":"MockSourceConnector","l":"initialize(ConnectorContext, List>)","u":"initialize(org.apache.kafka.connect.connector.ConnectorContext,java.util.List)"},{"p":"org.apache.kafka.connect.sink","c":"SinkTask","l":"initialize(SinkTaskContext)","u":"initialize(org.apache.kafka.connect.sink.SinkTaskContext)"},{"p":"org.apache.kafka.connect.source","c":"SourceTask","l":"initialize(SourceTaskContext)","u":"initialize(org.apache.kafka.connect.source.SourceTaskContext)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerLoginModule","l":"initialize(Subject, CallbackHandler, Map, Map)","u":"initialize(javax.security.auth.Subject,javax.security.auth.callback.CallbackHandler,java.util.Map,java.util.Map)"},{"p":"org.apache.kafka.common.security.plain","c":"PlainLoginModule","l":"initialize(Subject, CallbackHandler, Map, Map)","u":"initialize(javax.security.auth.Subject,javax.security.auth.callback.CallbackHandler,java.util.Map,java.util.Map)"},{"p":"org.apache.kafka.common.security.scram","c":"ScramLoginModule","l":"initialize(Subject, CallbackHandler, Map, Map)","u":"initialize(javax.security.auth.Subject,javax.security.auth.callback.CallbackHandler,java.util.Map,java.util.Map)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"initTransactionException"},{"p":"org.apache.kafka.clients.producer","c":"KafkaProducer","l":"initTransactions()"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"initTransactions()"},{"p":"org.apache.kafka.clients.producer","c":"Producer","l":"initTransactions()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"injectTimeoutException(int)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"injectTimeoutException(int)"},{"p":"org.apache.kafka.streams.state","c":"BuiltInDslStoreSuppliers.InMemoryDslStoreSuppliers","l":"InMemoryDslStoreSuppliers()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.state","c":"Stores","l":"inMemoryKeyValueStore(String)","u":"inMemoryKeyValueStore(java.lang.String)"},{"p":"org.apache.kafka.streams.state","c":"Stores","l":"inMemorySessionStore(String, Duration)","u":"inMemorySessionStore(java.lang.String,java.time.Duration)"},{"p":"org.apache.kafka.streams.state","c":"Stores","l":"inMemoryWindowStore(String, Duration, Duration, boolean)","u":"inMemoryWindowStore(java.lang.String,java.time.Duration,java.time.Duration,boolean)"},{"p":"org.apache.kafka.common.serialization","c":"ListDeserializer","l":"innerDeserializer()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerExtensionsValidatorCallback","l":"inputExtensions()"},{"p":"org.apache.kafka.common.errors","c":"CoordinatorNotAvailableException","l":"INSTANCE"},{"p":"org.apache.kafka.common.errors","c":"DisconnectException","l":"INSTANCE"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription","l":"instanceId()"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"MemberSubscription","l":"instanceId()"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupsOptions","l":"inStates(Set)","u":"inStates(java.util.Set)"},{"p":"org.apache.kafka.streams.query","c":"StateQueryRequest","l":"inStore(String)","u":"inStore(java.lang.String)"},{"p":"org.apache.kafka.common","c":"PartitionInfo","l":"inSyncReplicas()"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigType","l":"INT"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Type","l":"INT"},{"p":"org.apache.kafka.connect.data","c":"Schema.Type","l":"INT16"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"INT16_SCHEMA"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"int16()"},{"p":"org.apache.kafka.connect.data","c":"Schema.Type","l":"INT32"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"INT32_SCHEMA"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"int32()"},{"p":"org.apache.kafka.connect.data","c":"Schema.Type","l":"INT64"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"INT64_SCHEMA"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"int64()"},{"p":"org.apache.kafka.connect.data","c":"Schema.Type","l":"INT8"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"INT8_SCHEMA"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"int8()"},{"p":"org.apache.kafka.common.serialization","c":"Serdes","l":"Integer()"},{"p":"org.apache.kafka.tools.api","c":"IntegerDecoder","l":"IntegerDecoder()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"IntegerDeserializer","l":"IntegerDeserializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"Serdes.IntegerSerde","l":"IntegerSerde()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"IntegerSerializer","l":"IntegerSerializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.server.authorizer","c":"AuthorizerServerInfo","l":"interBrokerEndpoint()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"INTERCEPTOR_CLASSES_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"INTERCEPTOR_CLASSES_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"INTERCEPTOR_CLASSES_DOC"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"INTERCEPTOR_CLASSES_DOC"},{"p":"org.apache.kafka.common.annotation","c":"InterfaceStability","l":"InterfaceStability()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig.InternalConfig","l":"INTERNAL_CONSUMER_WRAPPER"},{"p":"org.apache.kafka.streams","c":"StreamsConfig.InternalConfig","l":"INTERNAL_TASK_ASSIGNOR_CLASS"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClientConfig","l":"INTERNAL_TOPIC_SEPARATOR_ENABLED"},{"p":"org.apache.kafka.connect.mirror","c":"DefaultReplicationPolicy","l":"INTERNAL_TOPIC_SEPARATOR_ENABLED_CONFIG"},{"p":"org.apache.kafka.connect.mirror","c":"DefaultReplicationPolicy","l":"INTERNAL_TOPIC_SEPARATOR_ENABLED_DEFAULT"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClientConfig","l":"INTERNAL_TOPIC_SEPARATOR_ENABLED_DEFAULT"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClientConfig","l":"INTERNAL_TOPIC_SEPARATOR_ENABLED_DOC"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ConfigKey","l":"internalConfig"},{"p":"org.apache.kafka.streams","c":"StreamsConfig.InternalConfig","l":"InternalConfig()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common","c":"Cluster","l":"internalTopics()"},{"p":"org.apache.kafka.common.errors","c":"InterruptException","l":"InterruptException(InterruptedException)","u":"%3Cinit%3E(java.lang.InterruptedException)"},{"p":"org.apache.kafka.common.errors","c":"InterruptException","l":"InterruptException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InterruptException","l":"InterruptException(String, InterruptedException)","u":"%3Cinit%3E(java.lang.String,java.lang.InterruptedException)"},{"p":"org.apache.kafka.connect.source","c":"SourceTask.TransactionBoundary","l":"INTERVAL"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignor.AssignmentError","l":"INVALID_STANDBY_TASK"},{"p":"org.apache.kafka.common.errors","c":"InvalidCommitOffsetSizeException","l":"InvalidCommitOffsetSizeException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidCommitOffsetSizeException","l":"InvalidCommitOffsetSizeException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"InvalidConfigurationException","l":"InvalidConfigurationException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"InvalidConfigurationException","l":"InvalidConfigurationException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidConfigurationException","l":"InvalidConfigurationException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"InvalidConfigurationException","l":"InvalidConfigurationException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerExtensionsValidatorCallback","l":"invalidExtensions()"},{"p":"org.apache.kafka.common.errors","c":"InvalidFetchSessionEpochException","l":"InvalidFetchSessionEpochException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"InvalidFetchSessionEpochException","l":"InvalidFetchSessionEpochException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidFetchSizeException","l":"InvalidFetchSizeException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidFetchSizeException","l":"InvalidFetchSizeException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"InvalidGroupIdException","l":"InvalidGroupIdException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidGroupIdException","l":"InvalidGroupIdException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.clients.consumer","c":"InvalidOffsetException","l":"InvalidOffsetException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidOffsetException","l":"InvalidOffsetException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidOffsetException","l":"InvalidOffsetException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"InvalidPartitionsException","l":"InvalidPartitionsException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidPartitionsException","l":"InvalidPartitionsException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"InvalidPidMappingException","l":"InvalidPidMappingException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidPrincipalTypeException","l":"InvalidPrincipalTypeException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidPrincipalTypeException","l":"InvalidPrincipalTypeException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"InvalidProducerEpochException","l":"InvalidProducerEpochException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common","c":"InvalidRecordException","l":"InvalidRecordException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common","c":"InvalidRecordException","l":"InvalidRecordException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"InvalidRecordStateException","l":"InvalidRecordStateException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidRegistrationException","l":"InvalidRegistrationException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidRegularExpression","l":"InvalidRegularExpression(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidReplicaAssignmentException","l":"InvalidReplicaAssignmentException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidReplicaAssignmentException","l":"InvalidReplicaAssignmentException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"InvalidReplicationFactorException","l":"InvalidReplicationFactorException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidReplicationFactorException","l":"InvalidReplicationFactorException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"InvalidRequestException","l":"InvalidRequestException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidRequestException","l":"InvalidRequestException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"InvalidRequiredAcksException","l":"InvalidRequiredAcksException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidSessionTimeoutException","l":"InvalidSessionTimeoutException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidSessionTimeoutException","l":"InvalidSessionTimeoutException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"InvalidShareSessionEpochException","l":"InvalidShareSessionEpochException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"InvalidStateStoreException","l":"InvalidStateStoreException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"InvalidStateStoreException","l":"InvalidStateStoreException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.streams.errors","c":"InvalidStateStoreException","l":"InvalidStateStoreException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.streams.errors","c":"InvalidStateStorePartitionException","l":"InvalidStateStorePartitionException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"InvalidStateStorePartitionException","l":"InvalidStateStorePartitionException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"InvalidTimestampException","l":"InvalidTimestampException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidTimestampException","l":"InvalidTimestampException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"InvalidTopicException","l":"InvalidTopicException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"InvalidTopicException","l":"InvalidTopicException(Set)","u":"%3Cinit%3E(java.util.Set)"},{"p":"org.apache.kafka.common.errors","c":"InvalidTopicException","l":"InvalidTopicException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidTopicException","l":"InvalidTopicException(String, Set)","u":"%3Cinit%3E(java.lang.String,java.util.Set)"},{"p":"org.apache.kafka.common.errors","c":"InvalidTopicException","l":"InvalidTopicException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"InvalidTopicException","l":"InvalidTopicException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.common","c":"Cluster","l":"invalidTopics()"},{"p":"org.apache.kafka.common.errors","c":"InvalidTopicException","l":"invalidTopics()"},{"p":"org.apache.kafka.common.errors","c":"InvalidTxnStateException","l":"InvalidTxnStateException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidTxnTimeoutException","l":"InvalidTxnTimeoutException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidTxnTimeoutException","l":"InvalidTxnTimeoutException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"InvalidUpdateVersionException","l":"InvalidUpdateVersionException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidUpdateVersionException","l":"InvalidUpdateVersionException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"InvalidVoterKeyException","l":"InvalidVoterKeyException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"InvalidVoterKeyException","l":"InvalidVoterKeyException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaEntity","l":"IP"},{"p":"org.apache.kafka.streams","c":"StreamsConfig.InternalConfig","l":"IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED"},{"p":"org.apache.kafka.streams.query","c":"StateQueryRequest","l":"isAllPartitions()"},{"p":"org.apache.kafka.common","c":"Cluster","l":"isBootstrapConfigured()"},{"p":"org.apache.kafka.common","c":"KafkaFuture","l":"isCancelled()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskTopicPartition","l":"isChangelog()"},{"p":"org.apache.kafka.connect.mirror","c":"DefaultReplicationPolicy","l":"isCheckpointsTopic(String)","u":"isCheckpointsTopic(java.lang.String)"},{"p":"org.apache.kafka.connect.mirror","c":"ReplicationPolicy","l":"isCheckpointsTopic(String)","u":"isCheckpointsTopic(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription","l":"isClassic()"},{"p":"org.apache.kafka.streams.query","c":"QueryConfig","l":"isCollectExecutionInfo()"},{"p":"org.apache.kafka.common","c":"KafkaFuture","l":"isCompletedExceptionally()"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry","l":"isDefault()"},{"p":"org.apache.kafka.common.config","c":"ConfigResource","l":"isDefault()"},{"p":"org.apache.kafka.common","c":"KafkaFuture","l":"isDone()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecords","l":"isEmpty()"},{"p":"org.apache.kafka.common","c":"Node","l":"isEmpty()"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"isEmpty()"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"isEmpty()"},{"p":"org.apache.kafka.streams.query","c":"Position","l":"isEmpty()"},{"p":"org.apache.kafka.streams","c":"TestOutputTopic","l":"isEmpty()"},{"p":"org.apache.kafka.streams.query","c":"QueryResult","l":"isFailure()"},{"p":"org.apache.kafka.common","c":"Node","l":"isFenced()"},{"p":"org.apache.kafka.clients.admin","c":"ReplicaInfo","l":"isFuture()"},{"p":"org.apache.kafka.connect.mirror","c":"ReplicationPolicy","l":"isHeartbeatsTopic(String)","u":"isHeartbeatsTopic(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"TopicDescription","l":"isInternal()"},{"p":"org.apache.kafka.clients.admin","c":"TopicListing","l":"isInternal()"},{"p":"org.apache.kafka.connect.mirror","c":"ReplicationPolicy","l":"isInternalTopic(String)","u":"isInternalTopic(java.lang.String)"},{"p":"org.apache.kafka.common.metrics","c":"KafkaMetric","l":"isMeasurable()"},{"p":"org.apache.kafka.connect.mirror","c":"DefaultReplicationPolicy","l":"isMM2InternalTopic(String)","u":"isMM2InternalTopic(java.lang.String)"},{"p":"org.apache.kafka.connect.mirror","c":"ReplicationPolicy","l":"isMM2InternalTopic(String)","u":"isMM2InternalTopic(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"TopologyConfig","l":"isNamedTopology()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"ISOLATION_LEVEL_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"ISOLATION_LEVEL_DOC"},{"p":"org.apache.kafka.clients.admin","c":"ListOffsetsOptions","l":"isolationLevel()"},{"p":"org.apache.kafka.streams.processor","c":"StateStore","l":"isOpen()"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"isOptional()"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"isOptional()"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"isOptional()"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"GroupSpec","l":"isPartitionAssignable(Uuid, int)","u":"isPartitionAssignable(org.apache.kafka.common.Uuid,int)"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"GroupSpec","l":"isPartitionAssigned(Uuid, int)","u":"isPartitionAssigned(org.apache.kafka.common.Uuid,int)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"isPaused()"},{"p":"org.apache.kafka.connect.data","c":"Schema.Type","l":"isPrimitive()"},{"p":"org.apache.kafka.common","c":"TopicPartitionInfo","l":"isr()"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry","l":"isReadOnly()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogMetadataManager","l":"isReady(TopicIdPartition)","u":"isReady(org.apache.kafka.common.TopicIdPartition)"},{"p":"org.apache.kafka.streams.query","c":"StateQueryRequest","l":"isRequireActive()"},{"p":"org.apache.kafka.streams","c":"KafkaStreams.State","l":"isRunningOrRebalancing()"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry","l":"isSensitive()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Type","l":"isSensitive()"},{"p":"org.apache.kafka.streams","c":"KafkaStreams.State","l":"isShuttingDown()"},{"p":"org.apache.kafka.clients.admin","c":"ClassicGroupDescription","l":"isSimpleConsumerGroup()"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupDescription","l":"isSimpleConsumerGroup()"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupListing","l":"isSimpleConsumerGroup()"},{"p":"org.apache.kafka.clients.admin","c":"GroupListing","l":"isSimpleConsumerGroup()"},{"p":"org.apache.kafka.streams.query","c":"KeyQuery","l":"isSkipCache()"},{"p":"org.apache.kafka.streams.query","c":"TimestampedKeyQuery","l":"isSkipCache()"},{"p":"org.apache.kafka.streams.state","c":"DslWindowParams","l":"isSlidingWindow()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskTopicPartition","l":"isSource()"},{"p":"org.apache.kafka.common.resource","c":"PatternType","l":"isSpecific()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskInfo","l":"isStateful()"},{"p":"org.apache.kafka.streams.query","c":"QueryResult","l":"isSuccess()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"ClientJwtValidator","l":"ISSUED_AT_CLAIM_NAME"},{"p":"org.apache.kafka.common.security.token.delegation","c":"TokenInformation","l":"issueTimestamp()"},{"p":"org.apache.kafka.server.telemetry","c":"ClientTelemetryPayload","l":"isTerminating()"},{"p":"org.apache.kafka.streams.state","c":"DslKeyValueParams","l":"isTimestamped()"},{"p":"org.apache.kafka.streams.state","c":"DslWindowParams","l":"isTimestamped()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata","l":"isTxnIdxEmpty()"},{"p":"org.apache.kafka.streams.query","c":"PositionBound","l":"isUnbounded()"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntry","l":"isUnknown()"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntryFilter","l":"isUnknown()"},{"p":"org.apache.kafka.common.acl","c":"AclBinding","l":"isUnknown()"},{"p":"org.apache.kafka.common.acl","c":"AclBindingFilter","l":"isUnknown()"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"isUnknown()"},{"p":"org.apache.kafka.common.acl","c":"AclPermissionType","l":"isUnknown()"},{"p":"org.apache.kafka.common.resource","c":"PatternType","l":"isUnknown()"},{"p":"org.apache.kafka.common.resource","c":"Resource","l":"isUnknown()"},{"p":"org.apache.kafka.common.resource","c":"ResourcePattern","l":"isUnknown()"},{"p":"org.apache.kafka.common.resource","c":"ResourcePatternFilter","l":"isUnknown()"},{"p":"org.apache.kafka.common.resource","c":"ResourceType","l":"isUnknown()"},{"p":"org.apache.kafka.common.metrics","c":"Quota","l":"isUpperBound()"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaEntity","l":"isValidEntityType(String)","u":"isValidEntityType(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams.State","l":"isValidTransition(KafkaStreams.State)","u":"isValidTransition(org.apache.kafka.streams.KafkaStreams.State)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentState","l":"isValidTransition(RemoteLogSegmentState, RemoteLogSegmentState)","u":"isValidTransition(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState,org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemotePartitionDeleteState","l":"isValidTransition(RemotePartitionDeleteState, RemotePartitionDeleteState)","u":"isValidTransition(org.apache.kafka.server.log.remote.storage.RemotePartitionDeleteState,org.apache.kafka.server.log.remote.storage.RemotePartitionDeleteState)"},{"p":"org.apache.kafka.clients.admin","c":"ScramCredentialInfo","l":"iterations()"},{"p":"org.apache.kafka.common.security.scram","c":"ScramCredential","l":"iterations()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecords","l":"iterator()"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"iterator()"},{"p":"org.apache.kafka.common.metrics","c":"JmxReporter","l":"JmxReporter()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"join(GlobalKTable, KeyValueMapper, ValueJoiner)","u":"join(org.apache.kafka.streams.kstream.GlobalKTable,org.apache.kafka.streams.kstream.KeyValueMapper,org.apache.kafka.streams.kstream.ValueJoiner)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"join(GlobalKTable, KeyValueMapper, ValueJoiner, Named)","u":"join(org.apache.kafka.streams.kstream.GlobalKTable,org.apache.kafka.streams.kstream.KeyValueMapper,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"join(GlobalKTable, KeyValueMapper, ValueJoinerWithKey)","u":"join(org.apache.kafka.streams.kstream.GlobalKTable,org.apache.kafka.streams.kstream.KeyValueMapper,org.apache.kafka.streams.kstream.ValueJoinerWithKey)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"join(GlobalKTable, KeyValueMapper, ValueJoinerWithKey, Named)","u":"join(org.apache.kafka.streams.kstream.GlobalKTable,org.apache.kafka.streams.kstream.KeyValueMapper,org.apache.kafka.streams.kstream.ValueJoinerWithKey,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"join(KStream, ValueJoiner, JoinWindows)","u":"join(org.apache.kafka.streams.kstream.KStream,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.JoinWindows)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"join(KStream, ValueJoiner, JoinWindows, StreamJoined)","u":"join(org.apache.kafka.streams.kstream.KStream,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.JoinWindows,org.apache.kafka.streams.kstream.StreamJoined)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"join(KStream, ValueJoinerWithKey, JoinWindows)","u":"join(org.apache.kafka.streams.kstream.KStream,org.apache.kafka.streams.kstream.ValueJoinerWithKey,org.apache.kafka.streams.kstream.JoinWindows)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"join(KStream, ValueJoinerWithKey, JoinWindows, StreamJoined)","u":"join(org.apache.kafka.streams.kstream.KStream,org.apache.kafka.streams.kstream.ValueJoinerWithKey,org.apache.kafka.streams.kstream.JoinWindows,org.apache.kafka.streams.kstream.StreamJoined)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"join(KTable, ValueJoiner)","u":"join(org.apache.kafka.streams.kstream.KTable,org.apache.kafka.streams.kstream.ValueJoiner)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"join(KTable, ValueJoiner, Joined)","u":"join(org.apache.kafka.streams.kstream.KTable,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.Joined)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"join(KTable, ValueJoinerWithKey)","u":"join(org.apache.kafka.streams.kstream.KTable,org.apache.kafka.streams.kstream.ValueJoinerWithKey)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"join(KTable, ValueJoinerWithKey, Joined)","u":"join(org.apache.kafka.streams.kstream.KTable,org.apache.kafka.streams.kstream.ValueJoinerWithKey,org.apache.kafka.streams.kstream.Joined)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"join(KTable, ValueJoiner)","u":"join(org.apache.kafka.streams.kstream.KTable,org.apache.kafka.streams.kstream.ValueJoiner)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"join(KTable, ValueJoiner, Materialized>)","u":"join(org.apache.kafka.streams.kstream.KTable,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"join(KTable, ValueJoiner, Named)","u":"join(org.apache.kafka.streams.kstream.KTable,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"join(KTable, ValueJoiner, Named, Materialized>)","u":"join(org.apache.kafka.streams.kstream.KTable,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"join(KTable, BiFunction, ValueJoiner)","u":"join(org.apache.kafka.streams.kstream.KTable,java.util.function.BiFunction,org.apache.kafka.streams.kstream.ValueJoiner)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"join(KTable, BiFunction, ValueJoiner, Materialized>)","u":"join(org.apache.kafka.streams.kstream.KTable,java.util.function.BiFunction,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"join(KTable, BiFunction, ValueJoiner, TableJoined)","u":"join(org.apache.kafka.streams.kstream.KTable,java.util.function.BiFunction,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.TableJoined)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"join(KTable, BiFunction, ValueJoiner, TableJoined, Materialized>)","u":"join(org.apache.kafka.streams.kstream.KTable,java.util.function.BiFunction,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.TableJoined,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"join(KTable, Function, ValueJoiner)","u":"join(org.apache.kafka.streams.kstream.KTable,java.util.function.Function,org.apache.kafka.streams.kstream.ValueJoiner)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"join(KTable, Function, ValueJoiner, Materialized>)","u":"join(org.apache.kafka.streams.kstream.KTable,java.util.function.Function,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"join(KTable, Function, ValueJoiner, TableJoined)","u":"join(org.apache.kafka.streams.kstream.KTable,java.util.function.Function,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.TableJoined)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"join(KTable, Function, ValueJoiner, TableJoined, Materialized>)","u":"join(org.apache.kafka.streams.kstream.KTable,java.util.function.Function,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.TableJoined,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"JwtBearerJwtRetriever","l":"JwtBearerJwtRetriever()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"JwtBearerJwtRetriever","l":"JwtBearerJwtRetriever(Time)","u":"%3Cinit%3E(org.apache.kafka.common.utils.Time)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"JwtRetrieverException","l":"JwtRetrieverException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"JwtRetrieverException","l":"JwtRetrieverException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"JwtRetrieverException","l":"JwtRetrieverException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"JwtValidatorException","l":"JwtValidatorException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"JwtValidatorException","l":"JwtValidatorException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"JwtValidatorException","l":"JwtValidatorException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.connect.health","c":"ConnectClusterDetails","l":"kafkaClusterId()"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"KafkaConsumer(Map)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"KafkaConsumer(Map, Deserializer, Deserializer)","u":"%3Cinit%3E(java.util.Map,org.apache.kafka.common.serialization.Deserializer,org.apache.kafka.common.serialization.Deserializer)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"KafkaConsumer(Properties)","u":"%3Cinit%3E(java.util.Properties)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"KafkaConsumer(Properties, Deserializer, Deserializer)","u":"%3Cinit%3E(java.util.Properties,org.apache.kafka.common.serialization.Deserializer,org.apache.kafka.common.serialization.Deserializer)"},{"p":"org.apache.kafka.common","c":"KafkaException","l":"KafkaException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common","c":"KafkaException","l":"KafkaException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common","c":"KafkaException","l":"KafkaException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common","c":"KafkaException","l":"KafkaException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.common","c":"KafkaFuture","l":"KafkaFuture()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.metrics","c":"KafkaMetric","l":"KafkaMetric(Object, MetricName, MetricValueProvider, MetricConfig, Time)","u":"%3Cinit%3E(java.lang.Object,org.apache.kafka.common.MetricName,org.apache.kafka.common.metrics.MetricValueProvider,org.apache.kafka.common.metrics.MetricConfig,org.apache.kafka.common.utils.Time)"},{"p":"org.apache.kafka.common.metrics","c":"KafkaMetricsContext","l":"KafkaMetricsContext(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.metrics","c":"KafkaMetricsContext","l":"KafkaMetricsContext(String, Map)","u":"%3Cinit%3E(java.lang.String,java.util.Map)"},{"p":"org.apache.kafka.connect.sink","c":"SinkRecord","l":"kafkaOffset()"},{"p":"org.apache.kafka.connect.connector","c":"ConnectRecord","l":"kafkaPartition()"},{"p":"org.apache.kafka.common.security.auth","c":"KafkaPrincipal","l":"KafkaPrincipal(String, String)","u":"%3Cinit%3E(java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.common.security.auth","c":"KafkaPrincipal","l":"KafkaPrincipal(String, String, boolean)","u":"%3Cinit%3E(java.lang.String,java.lang.String,boolean)"},{"p":"org.apache.kafka.clients.producer","c":"KafkaProducer","l":"KafkaProducer(Map)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.clients.producer","c":"KafkaProducer","l":"KafkaProducer(Map, Serializer, Serializer)","u":"%3Cinit%3E(java.util.Map,org.apache.kafka.common.serialization.Serializer,org.apache.kafka.common.serialization.Serializer)"},{"p":"org.apache.kafka.clients.producer","c":"KafkaProducer","l":"KafkaProducer(Properties)","u":"%3Cinit%3E(java.util.Properties)"},{"p":"org.apache.kafka.clients.producer","c":"KafkaProducer","l":"KafkaProducer(Properties, Serializer, Serializer)","u":"%3Cinit%3E(java.util.Properties,org.apache.kafka.common.serialization.Serializer,org.apache.kafka.common.serialization.Serializer)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaShareConsumer","l":"KafkaShareConsumer(Map)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaShareConsumer","l":"KafkaShareConsumer(Map, Deserializer, Deserializer)","u":"%3Cinit%3E(java.util.Map,org.apache.kafka.common.serialization.Deserializer,org.apache.kafka.common.serialization.Deserializer)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaShareConsumer","l":"KafkaShareConsumer(Properties)","u":"%3Cinit%3E(java.util.Properties)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaShareConsumer","l":"KafkaShareConsumer(Properties, Deserializer, Deserializer)","u":"%3Cinit%3E(java.util.Properties,org.apache.kafka.common.serialization.Deserializer,org.apache.kafka.common.serialization.Deserializer)"},{"p":"org.apache.kafka.common.errors","c":"KafkaStorageException","l":"KafkaStorageException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"KafkaStorageException","l":"KafkaStorageException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"KafkaStorageException","l":"KafkaStorageException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"KafkaStorageException","l":"KafkaStorageException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"KafkaStreams(Topology, Properties)","u":"%3Cinit%3E(org.apache.kafka.streams.Topology,java.util.Properties)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"KafkaStreams(Topology, Properties, KafkaClientSupplier)","u":"%3Cinit%3E(org.apache.kafka.streams.Topology,java.util.Properties,org.apache.kafka.streams.KafkaClientSupplier)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"KafkaStreams(Topology, Properties, KafkaClientSupplier, Time)","u":"%3Cinit%3E(org.apache.kafka.streams.Topology,java.util.Properties,org.apache.kafka.streams.KafkaClientSupplier,org.apache.kafka.common.utils.Time)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"KafkaStreams(Topology, Properties, Time)","u":"%3Cinit%3E(org.apache.kafka.streams.Topology,java.util.Properties,org.apache.kafka.common.utils.Time)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"KafkaStreams(Topology, StreamsConfig)","u":"%3Cinit%3E(org.apache.kafka.streams.Topology,org.apache.kafka.streams.StreamsConfig)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"KafkaStreams(Topology, StreamsConfig, KafkaClientSupplier)","u":"%3Cinit%3E(org.apache.kafka.streams.Topology,org.apache.kafka.streams.StreamsConfig,org.apache.kafka.streams.KafkaClientSupplier)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"KafkaStreams(Topology, StreamsConfig, Time)","u":"%3Cinit%3E(org.apache.kafka.streams.Topology,org.apache.kafka.streams.StreamsConfig,org.apache.kafka.common.utils.Time)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"ApplicationState","l":"kafkaStreamsStates(boolean)"},{"p":"org.apache.kafka.streams","c":"KeyValue","l":"key"},{"p":"org.apache.kafka.common.errors","c":"RecordDeserializationException.DeserializationExceptionOrigin","l":"KEY"},{"p":"org.apache.kafka.connect.storage","c":"ConverterType","l":"KEY"},{"p":"org.apache.kafka.streams.errors","c":"ProductionExceptionHandler.SerializationExceptionOrigin","l":"KEY"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"KEY_DESERIALIZER_CLASS_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"KEY_DESERIALIZER_CLASS_DOC"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"KEY_SCHEMA"},{"p":"org.apache.kafka.connect.mirror","c":"Heartbeat","l":"KEY_SCHEMA"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"KEY_SERIALIZER_CLASS_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"KEY_SERIALIZER_CLASS_DOC"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecord","l":"key()"},{"p":"org.apache.kafka.clients.producer","c":"ProducerRecord","l":"key()"},{"p":"org.apache.kafka.common.header","c":"Header","l":"key()"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaAlteration.Op","l":"key()"},{"p":"org.apache.kafka.connect.connector","c":"ConnectRecord","l":"key()"},{"p":"org.apache.kafka.connect.header","c":"Header","l":"key()"},{"p":"org.apache.kafka.streams.kstream","c":"Windowed","l":"key()"},{"p":"org.apache.kafka.streams.processor.api","c":"FixedKeyRecord","l":"key()"},{"p":"org.apache.kafka.streams.processor.api","c":"Record","l":"key()"},{"p":"org.apache.kafka.streams.query","c":"MultiVersionedKeyQuery","l":"key()"},{"p":"org.apache.kafka.streams.query","c":"TimestampedKeyQuery","l":"key()"},{"p":"org.apache.kafka.streams.query","c":"VersionedKeyQuery","l":"key()"},{"p":"org.apache.kafka.streams.test","c":"TestRecord","l":"key()"},{"p":"org.apache.kafka.common.errors","c":"RecordDeserializationException","l":"keyBuffer()"},{"p":"org.apache.kafka.streams.state","c":"StateSerdes","l":"keyDeserializer()"},{"p":"org.apache.kafka.streams.state","c":"StateSerdes","l":"keyFrom(byte[])"},{"p":"org.apache.kafka.streams","c":"KeyQueryMetadata","l":"KeyQueryMetadata(HostInfo, Set, int)","u":"%3Cinit%3E(org.apache.kafka.streams.state.HostInfo,java.util.Set,int)"},{"p":"org.apache.kafka.connect.connector","c":"ConnectRecord","l":"keySchema()"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"keySchema()"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"keySchema()"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"keySchema()"},{"p":"org.apache.kafka.streams.kstream","c":"Joined","l":"keySerde()"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"keySerde()"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessingContext","l":"keySerde()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"keySerde()"},{"p":"org.apache.kafka.streams.processor","c":"ProcessorContext","l":"keySerde()"},{"p":"org.apache.kafka.streams.processor","c":"StateStoreContext","l":"keySerde()"},{"p":"org.apache.kafka.streams.state","c":"StateSerdes","l":"keySerde()"},{"p":"org.apache.kafka.streams.kstream","c":"Grouped","l":"keySerde(Serde)","u":"keySerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Joined","l":"keySerde(Serde)","u":"keySerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Produced","l":"keySerde(Serde)","u":"keySerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.state","c":"StateSerdes","l":"keySerializer()"},{"p":"org.apache.kafka.common.security.auth","c":"SslEngineFactory","l":"keystore()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext.CapturedForward","l":"keyValue()"},{"p":"org.apache.kafka.streams","c":"KeyValue","l":"KeyValue(K, V)","u":"%3Cinit%3E(K,V)"},{"p":"org.apache.kafka.streams.state","c":"QueryableStoreTypes","l":"keyValueStore()"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized.StoreType","l":"keyValueStore(DslKeyValueParams)","u":"keyValueStore(org.apache.kafka.streams.state.DslKeyValueParams)"},{"p":"org.apache.kafka.streams.state","c":"BuiltInDslStoreSuppliers.InMemoryDslStoreSuppliers","l":"keyValueStore(DslKeyValueParams)","u":"keyValueStore(org.apache.kafka.streams.state.DslKeyValueParams)"},{"p":"org.apache.kafka.streams.state","c":"BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers","l":"keyValueStore(DslKeyValueParams)","u":"keyValueStore(org.apache.kafka.streams.state.DslKeyValueParams)"},{"p":"org.apache.kafka.streams.state","c":"DslStoreSuppliers","l":"keyValueStore(DslKeyValueParams)","u":"keyValueStore(org.apache.kafka.streams.state.DslKeyValueParams)"},{"p":"org.apache.kafka.streams.state","c":"Stores","l":"keyValueStoreBuilder(KeyValueBytesStoreSupplier, Serde, Serde)","u":"keyValueStoreBuilder(org.apache.kafka.streams.state.KeyValueBytesStoreSupplier,org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsState","l":"lagFor(TaskId)","u":"lagFor(org.apache.kafka.streams.processor.TaskId)"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo.ReplicaState","l":"lastCaughtUpTimestamp()"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo.ReplicaState","l":"lastFetchTimestamp()"},{"p":"org.apache.kafka.common.header","c":"Headers","l":"lastHeader(String)","u":"lastHeader(java.lang.String)"},{"p":"org.apache.kafka.common","c":"TopicPartitionInfo","l":"lastKnownElr()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"lastPollTimeout()"},{"p":"org.apache.kafka.clients.admin","c":"ProducerState","l":"lastSequence()"},{"p":"org.apache.kafka.clients.admin","c":"ProducerState","l":"lastTimestamp()"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"lastWithName(String)","u":"lastWithName(java.lang.String)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"lastWithName(String)","u":"lastWithName(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetResetStrategy","l":"LATEST"},{"p":"org.apache.kafka.streams","c":"Topology.AutoOffsetReset","l":"LATEST"},{"p":"org.apache.kafka.clients.admin","c":"OffsetSpec","l":"latest()"},{"p":"org.apache.kafka.streams","c":"AutoOffsetReset","l":"latest()"},{"p":"org.apache.kafka.clients.admin","c":"OffsetSpec.LatestSpec","l":"LatestSpec()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"OffsetSpec","l":"latestTiered()"},{"p":"org.apache.kafka.clients.admin","c":"OffsetSpec.LatestTieredSpec","l":"LatestTieredSpec()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageManager.IndexType","l":"LEADER_EPOCH"},{"p":"org.apache.kafka.common","c":"PartitionInfo","l":"leader()"},{"p":"org.apache.kafka.common","c":"TopicPartitionInfo","l":"leader()"},{"p":"org.apache.kafka.clients.admin","c":"ListOffsetsResult.ListOffsetsResultInfo","l":"leaderEpoch()"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo","l":"leaderEpoch()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecord","l":"leaderEpoch()"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetAndMetadata","l":"leaderEpoch()"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetAndTimestamp","l":"leaderEpoch()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"LogSegmentData","l":"leaderEpochIndex()"},{"p":"org.apache.kafka.common","c":"Cluster","l":"leaderFor(TopicPartition)","u":"leaderFor(org.apache.kafka.common.TopicPartition)"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo","l":"leaderId()"},{"p":"org.apache.kafka.common.errors","c":"LeaderNotAvailableException","l":"LeaderNotAvailableException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"LeaderNotAvailableException","l":"LeaderNotAvailableException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.clients.consumer","c":"CloseOptions.GroupMembershipOperation","l":"LEAVE_GROUP"},{"p":"org.apache.kafka.streams","c":"KafkaStreams.CloseOptions","l":"leaveGroup(boolean)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"leftJoin(GlobalKTable, KeyValueMapper, ValueJoiner)","u":"leftJoin(org.apache.kafka.streams.kstream.GlobalKTable,org.apache.kafka.streams.kstream.KeyValueMapper,org.apache.kafka.streams.kstream.ValueJoiner)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"leftJoin(GlobalKTable, KeyValueMapper, ValueJoiner, Named)","u":"leftJoin(org.apache.kafka.streams.kstream.GlobalKTable,org.apache.kafka.streams.kstream.KeyValueMapper,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"leftJoin(GlobalKTable, KeyValueMapper, ValueJoinerWithKey)","u":"leftJoin(org.apache.kafka.streams.kstream.GlobalKTable,org.apache.kafka.streams.kstream.KeyValueMapper,org.apache.kafka.streams.kstream.ValueJoinerWithKey)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"leftJoin(GlobalKTable, KeyValueMapper, ValueJoinerWithKey, Named)","u":"leftJoin(org.apache.kafka.streams.kstream.GlobalKTable,org.apache.kafka.streams.kstream.KeyValueMapper,org.apache.kafka.streams.kstream.ValueJoinerWithKey,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"leftJoin(KStream, ValueJoiner, JoinWindows)","u":"leftJoin(org.apache.kafka.streams.kstream.KStream,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.JoinWindows)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"leftJoin(KStream, ValueJoiner, JoinWindows, StreamJoined)","u":"leftJoin(org.apache.kafka.streams.kstream.KStream,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.JoinWindows,org.apache.kafka.streams.kstream.StreamJoined)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"leftJoin(KStream, ValueJoinerWithKey, JoinWindows)","u":"leftJoin(org.apache.kafka.streams.kstream.KStream,org.apache.kafka.streams.kstream.ValueJoinerWithKey,org.apache.kafka.streams.kstream.JoinWindows)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"leftJoin(KStream, ValueJoinerWithKey, JoinWindows, StreamJoined)","u":"leftJoin(org.apache.kafka.streams.kstream.KStream,org.apache.kafka.streams.kstream.ValueJoinerWithKey,org.apache.kafka.streams.kstream.JoinWindows,org.apache.kafka.streams.kstream.StreamJoined)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"leftJoin(KTable, ValueJoiner)","u":"leftJoin(org.apache.kafka.streams.kstream.KTable,org.apache.kafka.streams.kstream.ValueJoiner)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"leftJoin(KTable, ValueJoiner, Materialized>)","u":"leftJoin(org.apache.kafka.streams.kstream.KTable,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"leftJoin(KTable, ValueJoiner, Named)","u":"leftJoin(org.apache.kafka.streams.kstream.KTable,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"leftJoin(KTable, ValueJoiner, Named, Materialized>)","u":"leftJoin(org.apache.kafka.streams.kstream.KTable,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"leftJoin(KTable, ValueJoiner)","u":"leftJoin(org.apache.kafka.streams.kstream.KTable,org.apache.kafka.streams.kstream.ValueJoiner)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"leftJoin(KTable, ValueJoiner, Joined)","u":"leftJoin(org.apache.kafka.streams.kstream.KTable,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.Joined)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"leftJoin(KTable, ValueJoinerWithKey)","u":"leftJoin(org.apache.kafka.streams.kstream.KTable,org.apache.kafka.streams.kstream.ValueJoinerWithKey)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"leftJoin(KTable, ValueJoinerWithKey, Joined)","u":"leftJoin(org.apache.kafka.streams.kstream.KTable,org.apache.kafka.streams.kstream.ValueJoinerWithKey,org.apache.kafka.streams.kstream.Joined)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"leftJoin(KTable, BiFunction, ValueJoiner)","u":"leftJoin(org.apache.kafka.streams.kstream.KTable,java.util.function.BiFunction,org.apache.kafka.streams.kstream.ValueJoiner)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"leftJoin(KTable, BiFunction, ValueJoiner, Materialized>)","u":"leftJoin(org.apache.kafka.streams.kstream.KTable,java.util.function.BiFunction,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"leftJoin(KTable, BiFunction, ValueJoiner, TableJoined)","u":"leftJoin(org.apache.kafka.streams.kstream.KTable,java.util.function.BiFunction,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.TableJoined)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"leftJoin(KTable, BiFunction, ValueJoiner, TableJoined, Materialized>)","u":"leftJoin(org.apache.kafka.streams.kstream.KTable,java.util.function.BiFunction,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.TableJoined,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"leftJoin(KTable, Function, ValueJoiner)","u":"leftJoin(org.apache.kafka.streams.kstream.KTable,java.util.function.Function,org.apache.kafka.streams.kstream.ValueJoiner)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"leftJoin(KTable, Function, ValueJoiner, Materialized>)","u":"leftJoin(org.apache.kafka.streams.kstream.KTable,java.util.function.Function,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"leftJoin(KTable, Function, ValueJoiner, TableJoined)","u":"leftJoin(org.apache.kafka.streams.kstream.KTable,java.util.function.Function,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.TableJoined)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"leftJoin(KTable, Function, ValueJoiner, TableJoined, Materialized>)","u":"leftJoin(org.apache.kafka.streams.kstream.KTable,java.util.function.Function,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.TableJoined,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerToken","l":"lifetimeMs()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Percentiles.BucketSizing","l":"LINEAR"},{"p":"org.apache.kafka.common.metrics.stats","c":"Histogram.LinearBinScheme","l":"LinearBinScheme(int, double)","u":"%3Cinit%3E(int,double)"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"LINGER_MS_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigType","l":"LIST"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Type","l":"LIST"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listClientMetricsResources()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listClientMetricsResources(ListClientMetricsResourcesOptions)","u":"listClientMetricsResources(org.apache.kafka.clients.admin.ListClientMetricsResourcesOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"listClientMetricsResources(ListClientMetricsResourcesOptions)","u":"listClientMetricsResources(org.apache.kafka.clients.admin.ListClientMetricsResourcesOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"listClientMetricsResources(ListClientMetricsResourcesOptions)","u":"listClientMetricsResources(org.apache.kafka.clients.admin.ListClientMetricsResourcesOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ListClientMetricsResourcesOptions","l":"ListClientMetricsResourcesOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listConfigResources()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listConfigResources(Set, ListConfigResourcesOptions)","u":"listConfigResources(java.util.Set,org.apache.kafka.clients.admin.ListConfigResourcesOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"listConfigResources(Set, ListConfigResourcesOptions)","u":"listConfigResources(java.util.Set,org.apache.kafka.clients.admin.ListConfigResourcesOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"listConfigResources(Set, ListConfigResourcesOptions)","u":"listConfigResources(java.util.Set,org.apache.kafka.clients.admin.ListConfigResourcesOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ListConfigResourcesOptions","l":"ListConfigResourcesOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listConsumerGroupOffsets(Map)","u":"listConsumerGroupOffsets(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listConsumerGroupOffsets(Map, ListConsumerGroupOffsetsOptions)","u":"listConsumerGroupOffsets(java.util.Map,org.apache.kafka.clients.admin.ListConsumerGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"listConsumerGroupOffsets(Map, ListConsumerGroupOffsetsOptions)","u":"listConsumerGroupOffsets(java.util.Map,org.apache.kafka.clients.admin.ListConsumerGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"listConsumerGroupOffsets(Map, ListConsumerGroupOffsetsOptions)","u":"listConsumerGroupOffsets(java.util.Map,org.apache.kafka.clients.admin.ListConsumerGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listConsumerGroupOffsets(String)","u":"listConsumerGroupOffsets(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listConsumerGroupOffsets(String, ListConsumerGroupOffsetsOptions)","u":"listConsumerGroupOffsets(java.lang.String,org.apache.kafka.clients.admin.ListConsumerGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupOffsetsOptions","l":"ListConsumerGroupOffsetsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupOffsetsSpec","l":"ListConsumerGroupOffsetsSpec()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listConsumerGroups()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listConsumerGroups(ListConsumerGroupsOptions)","u":"listConsumerGroups(org.apache.kafka.clients.admin.ListConsumerGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"listConsumerGroups(ListConsumerGroupsOptions)","u":"listConsumerGroups(org.apache.kafka.clients.admin.ListConsumerGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"listConsumerGroups(ListConsumerGroupsOptions)","u":"listConsumerGroups(org.apache.kafka.clients.admin.ListConsumerGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupsOptions","l":"ListConsumerGroupsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"ListDeserializer","l":"ListDeserializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"ListDeserializer","l":"ListDeserializer(Class, Deserializer)","u":"%3Cinit%3E(java.lang.Class,org.apache.kafka.common.serialization.Deserializer)"},{"p":"org.apache.kafka.clients.admin","c":"RaftVoterEndpoint","l":"listener()"},{"p":"org.apache.kafka.common","c":"Endpoint","l":"listener()"},{"p":"org.apache.kafka.common","c":"Endpoint","l":"listenerName()"},{"p":"org.apache.kafka.common.security.auth","c":"AuthenticationContext","l":"listenerName()"},{"p":"org.apache.kafka.common.security.auth","c":"PlaintextAuthenticationContext","l":"listenerName()"},{"p":"org.apache.kafka.common.security.auth","c":"SaslAuthenticationContext","l":"listenerName()"},{"p":"org.apache.kafka.common.security.auth","c":"SslAuthenticationContext","l":"listenerName()"},{"p":"org.apache.kafka.server.authorizer","c":"AuthorizableRequestContext","l":"listenerName()"},{"p":"org.apache.kafka.common.errors","c":"ListenerNotFoundException","l":"ListenerNotFoundException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"ListenerNotFoundException","l":"ListenerNotFoundException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listGroups()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listGroups(ListGroupsOptions)","u":"listGroups(org.apache.kafka.clients.admin.ListGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"listGroups(ListGroupsOptions)","u":"listGroups(org.apache.kafka.clients.admin.ListGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"listGroups(ListGroupsOptions)","u":"listGroups(org.apache.kafka.clients.admin.ListGroupsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ListGroupsOptions","l":"ListGroupsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"ListTopicsResult","l":"listings()"},{"p":"org.apache.kafka.clients.admin","c":"ListTopicsOptions","l":"listInternal(boolean)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listOffsets(Map)","u":"listOffsets(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listOffsets(Map, ListOffsetsOptions)","u":"listOffsets(java.util.Map,org.apache.kafka.clients.admin.ListOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"listOffsets(Map, ListOffsetsOptions)","u":"listOffsets(java.util.Map,org.apache.kafka.clients.admin.ListOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"listOffsets(Map, ListOffsetsOptions)","u":"listOffsets(java.util.Map,org.apache.kafka.clients.admin.ListOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ListOffsetsOptions","l":"ListOffsetsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"ListOffsetsOptions","l":"ListOffsetsOptions(IsolationLevel)","u":"%3Cinit%3E(org.apache.kafka.common.IsolationLevel)"},{"p":"org.apache.kafka.clients.admin","c":"ListOffsetsResult","l":"ListOffsetsResult(Map>)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"ListOffsetsResult.ListOffsetsResultInfo","l":"ListOffsetsResultInfo(long, long, Optional)","u":"%3Cinit%3E(long,long,java.util.Optional)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listPartitionReassignments()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listPartitionReassignments(ListPartitionReassignmentsOptions)","u":"listPartitionReassignments(org.apache.kafka.clients.admin.ListPartitionReassignmentsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listPartitionReassignments(Optional>, ListPartitionReassignmentsOptions)","u":"listPartitionReassignments(java.util.Optional,org.apache.kafka.clients.admin.ListPartitionReassignmentsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"listPartitionReassignments(Optional>, ListPartitionReassignmentsOptions)","u":"listPartitionReassignments(java.util.Optional,org.apache.kafka.clients.admin.ListPartitionReassignmentsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"listPartitionReassignments(Optional>, ListPartitionReassignmentsOptions)","u":"listPartitionReassignments(java.util.Optional,org.apache.kafka.clients.admin.ListPartitionReassignmentsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listPartitionReassignments(Set)","u":"listPartitionReassignments(java.util.Set)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listPartitionReassignments(Set, ListPartitionReassignmentsOptions)","u":"listPartitionReassignments(java.util.Set,org.apache.kafka.clients.admin.ListPartitionReassignmentsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ListPartitionReassignmentsOptions","l":"ListPartitionReassignmentsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogMetadataManager","l":"listRemoteLogSegments(TopicIdPartition)","u":"listRemoteLogSegments(org.apache.kafka.common.TopicIdPartition)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogMetadataManager","l":"listRemoteLogSegments(TopicIdPartition, int)","u":"listRemoteLogSegments(org.apache.kafka.common.TopicIdPartition,int)"},{"p":"org.apache.kafka.common.serialization","c":"Serdes.ListSerde","l":"ListSerde()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"Serdes.ListSerde","l":"ListSerde(Class, Serde)","u":"%3Cinit%3E(java.lang.Class,org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.common.serialization","c":"Serdes","l":"ListSerde(Class, Serde)","u":"ListSerde(java.lang.Class,org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.common.serialization","c":"ListSerializer","l":"ListSerializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"ListSerializer","l":"ListSerializer(Serializer)","u":"%3Cinit%3E(org.apache.kafka.common.serialization.Serializer)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listShareGroupOffsets(Map)","u":"listShareGroupOffsets(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listShareGroupOffsets(Map, ListShareGroupOffsetsOptions)","u":"listShareGroupOffsets(java.util.Map,org.apache.kafka.clients.admin.ListShareGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"listShareGroupOffsets(Map, ListShareGroupOffsetsOptions)","u":"listShareGroupOffsets(java.util.Map,org.apache.kafka.clients.admin.ListShareGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"listShareGroupOffsets(Map, ListShareGroupOffsetsOptions)","u":"listShareGroupOffsets(java.util.Map,org.apache.kafka.clients.admin.ListShareGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ListShareGroupOffsetsOptions","l":"ListShareGroupOffsetsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"ListShareGroupOffsetsSpec","l":"ListShareGroupOffsetsSpec()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listStreamsGroupOffsets(Map)","u":"listStreamsGroupOffsets(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listStreamsGroupOffsets(Map, ListStreamsGroupOffsetsOptions)","u":"listStreamsGroupOffsets(java.util.Map,org.apache.kafka.clients.admin.ListStreamsGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"listStreamsGroupOffsets(Map, ListStreamsGroupOffsetsOptions)","u":"listStreamsGroupOffsets(java.util.Map,org.apache.kafka.clients.admin.ListStreamsGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"listStreamsGroupOffsets(Map, ListStreamsGroupOffsetsOptions)","u":"listStreamsGroupOffsets(java.util.Map,org.apache.kafka.clients.admin.ListStreamsGroupOffsetsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ListStreamsGroupOffsetsOptions","l":"ListStreamsGroupOffsetsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"ListStreamsGroupOffsetsSpec","l":"ListStreamsGroupOffsetsSpec()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listTopics()"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"listTopics()"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"listTopics()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"listTopics()"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"listTopics(Duration)","u":"listTopics(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"listTopics(Duration)","u":"listTopics(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"listTopics(Duration)","u":"listTopics(java.time.Duration)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listTopics(ListTopicsOptions)","u":"listTopics(org.apache.kafka.clients.admin.ListTopicsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"listTopics(ListTopicsOptions)","u":"listTopics(org.apache.kafka.clients.admin.ListTopicsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"listTopics(ListTopicsOptions)","u":"listTopics(org.apache.kafka.clients.admin.ListTopicsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ListTopicsOptions","l":"ListTopicsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listTransactions()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"listTransactions(ListTransactionsOptions)","u":"listTransactions(org.apache.kafka.clients.admin.ListTransactionsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"listTransactions(ListTransactionsOptions)","u":"listTransactions(org.apache.kafka.clients.admin.ListTransactionsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"listTransactions(ListTransactionsOptions)","u":"listTransactions(org.apache.kafka.clients.admin.ListTransactionsOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ListTransactionsOptions","l":"ListTransactionsOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.resource","c":"PatternType","l":"LITERAL"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"LOCAL_LOG_RETENTION_BYTES_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"LOCAL_LOG_RETENTION_BYTES_DOC"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"LOCAL_LOG_RETENTION_MS_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"LOCAL_LOG_RETENTION_MS_DOC"},{"p":"org.apache.kafka.streams.errors","c":"LockException","l":"LockException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"LockException","l":"LockException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.streams.errors","c":"LockException","l":"LockException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.streams.kstream","c":"EmitStrategy","l":"log"},{"p":"org.apache.kafka.streams.state","c":"RocksDBConfigSetter","l":"LOG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"LOG_SUMMARY_INTERVAL_MS_CONFIG"},{"p":"org.apache.kafka.streams.errors","c":"LogAndContinueExceptionHandler","l":"LogAndContinueExceptionHandler()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.errors","c":"LogAndContinueProcessingExceptionHandler","l":"LogAndContinueProcessingExceptionHandler()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.errors","c":"LogAndFailExceptionHandler","l":"LogAndFailExceptionHandler()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.errors","c":"LogAndFailProcessingExceptionHandler","l":"LogAndFailProcessingExceptionHandler()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.processor","c":"LogAndSkipOnInvalidTimestamp","l":"LogAndSkipOnInvalidTimestamp()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.state","c":"StoreBuilder","l":"logConfig()"},{"p":"org.apache.kafka.clients.admin","c":"LogDirDescription","l":"LogDirDescription(ApiException, Map)","u":"%3Cinit%3E(org.apache.kafka.common.errors.ApiException,java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"LogDirDescription","l":"LogDirDescription(ApiException, Map, long, long)","u":"%3Cinit%3E(org.apache.kafka.common.errors.ApiException,java.util.Map,long,long)"},{"p":"org.apache.kafka.common.errors","c":"LogDirNotFoundException","l":"LogDirNotFoundException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"LogDirNotFoundException","l":"LogDirNotFoundException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"LogDirNotFoundException","l":"LogDirNotFoundException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo.ReplicaState","l":"logEndOffset()"},{"p":"org.apache.kafka.streams.state","c":"StoreBuilder","l":"loggingEnabled()"},{"p":"org.apache.kafka.connect.data","c":"Date","l":"LOGICAL_NAME"},{"p":"org.apache.kafka.connect.data","c":"Decimal","l":"LOGICAL_NAME"},{"p":"org.apache.kafka.connect.data","c":"Time","l":"LOGICAL_NAME"},{"p":"org.apache.kafka.connect.data","c":"Timestamp","l":"LOGICAL_NAME"},{"p":"org.apache.kafka.server.authorizer","c":"Action","l":"logIfAllowed()"},{"p":"org.apache.kafka.server.authorizer","c":"Action","l":"logIfDenied()"},{"p":"org.apache.kafka.common.security.auth","c":"Login","l":"login()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerLoginModule","l":"login()"},{"p":"org.apache.kafka.common.security.plain","c":"PlainLoginModule","l":"login()"},{"p":"org.apache.kafka.common.security.scram","c":"ScramLoginModule","l":"login()"},{"p":"org.apache.kafka.common.config","c":"LogLevelConfig","l":"LogLevelConfig()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerLoginModule","l":"logout()"},{"p":"org.apache.kafka.common.security.plain","c":"PlainLoginModule","l":"logout()"},{"p":"org.apache.kafka.common.security.scram","c":"ScramLoginModule","l":"logout()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"LogSegmentData","l":"logSegment()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"LogSegmentData","l":"LogSegmentData(Path, Path, Path, Optional, Path, ByteBuffer)","u":"%3Cinit%3E(java.nio.file.Path,java.nio.file.Path,java.nio.file.Path,java.util.Optional,java.nio.file.Path,java.nio.ByteBuffer)"},{"p":"org.apache.kafka.clients.consumer","c":"LogTruncationException","l":"LogTruncationException(Map, Map)","u":"%3Cinit%3E(java.util.Map,java.util.Map)"},{"p":"org.apache.kafka.clients.consumer","c":"LogTruncationException","l":"LogTruncationException(String, Map, Map)","u":"%3Cinit%3E(java.lang.String,java.util.Map,java.util.Map)"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"logUnused()"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigType","l":"LONG"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Type","l":"LONG"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Width","l":"LONG"},{"p":"org.apache.kafka.common.serialization","c":"Serdes","l":"Long()"},{"p":"org.apache.kafka.tools.api","c":"LongDecoder","l":"LongDecoder()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"LongDeserializer","l":"LongDeserializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"Serdes.LongSerde","l":"LongSerde()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"LongSerializer","l":"LongSerializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Importance","l":"LOW"},{"p":"org.apache.kafka.streams.query","c":"TimestampedRangeQuery","l":"lowerBound()"},{"p":"org.apache.kafka.common.metrics","c":"Quota","l":"lowerBound(double)"},{"p":"org.apache.kafka.clients.admin","c":"DeletedRecords","l":"lowWatermark()"},{"p":"org.apache.kafka.clients.admin","c":"DeleteRecordsResult","l":"lowWatermarks()"},{"p":"org.apache.kafka.streams.state","c":"Stores","l":"lruMap(String, int)","u":"lruMap(java.lang.String,int)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"MAIN_CONSUMER_PREFIX"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"main(String[])","u":"main(java.lang.String[])"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"main(String[])","u":"main(java.lang.String[])"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"main(String[])","u":"main(java.lang.String[])"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"main(String[])","u":"main(java.lang.String[])"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"mainConsumerPrefix(String)","u":"mainConsumerPrefix(java.lang.String)"},{"p":"org.apache.kafka.streams.state","c":"ValueAndTimestamp","l":"make(V, long)","u":"make(V,long)"},{"p":"org.apache.kafka.streams.state","c":"ValueAndTimestamp","l":"makeAllowNullable(V, long)","u":"makeAllowNullable(V,long)"},{"p":"org.apache.kafka.connect.data","c":"Schema.Type","l":"MAP"},{"p":"org.apache.kafka.common.security.auth","c":"SaslExtensions","l":"map()"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"map(KeyValueMapper>)","u":"map(org.apache.kafka.streams.kstream.KeyValueMapper)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"map(KeyValueMapper>, Named)","u":"map(org.apache.kafka.streams.kstream.KeyValueMapper,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"map(Schema, Schema)","u":"map(org.apache.kafka.connect.data.Schema,org.apache.kafka.connect.data.Schema)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"mapValues(ValueMapper)","u":"mapValues(org.apache.kafka.streams.kstream.ValueMapper)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"mapValues(ValueMapper, Named)","u":"mapValues(org.apache.kafka.streams.kstream.ValueMapper,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"mapValues(ValueMapper)","u":"mapValues(org.apache.kafka.streams.kstream.ValueMapper)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"mapValues(ValueMapper, Materialized>)","u":"mapValues(org.apache.kafka.streams.kstream.ValueMapper,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"mapValues(ValueMapper, Named)","u":"mapValues(org.apache.kafka.streams.kstream.ValueMapper,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"mapValues(ValueMapper, Named, Materialized>)","u":"mapValues(org.apache.kafka.streams.kstream.ValueMapper,org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"mapValues(ValueMapperWithKey)","u":"mapValues(org.apache.kafka.streams.kstream.ValueMapperWithKey)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"mapValues(ValueMapperWithKey, Named)","u":"mapValues(org.apache.kafka.streams.kstream.ValueMapperWithKey,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"mapValues(ValueMapperWithKey)","u":"mapValues(org.apache.kafka.streams.kstream.ValueMapperWithKey)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"mapValues(ValueMapperWithKey, Materialized>)","u":"mapValues(org.apache.kafka.streams.kstream.ValueMapperWithKey,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"mapValues(ValueMapperWithKey, Named)","u":"mapValues(org.apache.kafka.streams.kstream.ValueMapperWithKey,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"mapValues(ValueMapperWithKey, Named, Materialized>)","u":"mapValues(org.apache.kafka.streams.kstream.ValueMapperWithKey,org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.common.resource","c":"PatternType","l":"MATCH"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaFilterComponent","l":"match()"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntryFilter","l":"matches(AccessControlEntry)","u":"matches(org.apache.kafka.common.acl.AccessControlEntry)"},{"p":"org.apache.kafka.common.acl","c":"AclBindingFilter","l":"matches(AclBinding)","u":"matches(org.apache.kafka.common.acl.AclBinding)"},{"p":"org.apache.kafka.common.resource","c":"ResourcePatternFilter","l":"matches(ResourcePattern)","u":"matches(org.apache.kafka.common.resource.ResourcePattern)"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntryFilter","l":"matchesAtMostOne()"},{"p":"org.apache.kafka.common.acl","c":"AclBindingFilter","l":"matchesAtMostOne()"},{"p":"org.apache.kafka.common.resource","c":"ResourcePatternFilter","l":"matchesAtMostOne()"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"MAX_BLOCK_MS_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"MAX_COMPACTION_LAG_MS_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"MAX_COMPACTION_LAG_MS_DOC"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"MAX_MESSAGE_BYTES_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"MAX_MESSAGE_BYTES_DOC"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"MAX_PARTITION_FETCH_BYTES_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"MAX_POLL_INTERVAL_MS_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"MAX_POLL_RECORDS_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"MAX_RACK_AWARE_ASSIGNMENT_TAG_KEY_LENGTH"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"MAX_RACK_AWARE_ASSIGNMENT_TAG_LIST_SIZE"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"MAX_RACK_AWARE_ASSIGNMENT_TAG_VALUE_LENGTH"},{"p":"org.apache.kafka.common.metrics","c":"Sensor.RecordingLevel","l":"MAX_RECORDING_LEVEL_KEY"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"MAX_REQUEST_SIZE_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"MAX_TASK_IDLE_MS_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"MAX_TASK_IDLE_MS_DISABLED"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"MAX_TASK_IDLE_MS_DOC"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"MAX_WARMUP_REPLICAS_CONFIG"},{"p":"org.apache.kafka.common.metrics.stats","c":"Max","l":"Max()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams","c":"TopologyConfig","l":"maxBufferedSize"},{"p":"org.apache.kafka.streams","c":"TopologyConfig.TaskConfig","l":"maxBufferedSize"},{"p":"org.apache.kafka.streams.kstream","c":"Suppressed.BufferConfig","l":"maxBytes(long)"},{"p":"org.apache.kafka.clients.admin","c":"CreateDelegationTokenOptions","l":"maxlifeTimeMs()"},{"p":"org.apache.kafka.clients.admin","c":"CreateDelegationTokenOptions","l":"maxLifetimeMs()"},{"p":"org.apache.kafka.clients.admin","c":"CreateDelegationTokenOptions","l":"maxlifeTimeMs(long)"},{"p":"org.apache.kafka.clients.admin","c":"CreateDelegationTokenOptions","l":"maxLifetimeMs(long)"},{"p":"org.apache.kafka.streams.kstream","c":"Suppressed.BufferConfig","l":"maxRecords(long)"},{"p":"org.apache.kafka.streams","c":"TopologyConfig","l":"maxTaskIdleMs"},{"p":"org.apache.kafka.streams","c":"TopologyConfig.TaskConfig","l":"maxTaskIdleMs"},{"p":"org.apache.kafka.clients.admin","c":"OffsetSpec","l":"maxTimestamp()"},{"p":"org.apache.kafka.common.security.token.delegation","c":"TokenInformation","l":"maxTimestamp()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata","l":"maxTimestampMs()"},{"p":"org.apache.kafka.clients.admin","c":"OffsetSpec.MaxTimestampSpec","l":"MaxTimestampSpec()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"SupportedVersionRange","l":"maxVersion()"},{"p":"org.apache.kafka.clients.admin","c":"FeatureUpdate","l":"maxVersionLevel()"},{"p":"org.apache.kafka.clients.admin","c":"FinalizedVersionRange","l":"maxVersionLevel()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"AssignmentConfigs","l":"maxWarmupReplicas()"},{"p":"org.apache.kafka.common.metrics","c":"KafkaMetric","l":"measurable()"},{"p":"org.apache.kafka.common.metrics","c":"Measurable","l":"measure(MetricConfig, long)","u":"measure(org.apache.kafka.common.metrics.MetricConfig,long)"},{"p":"org.apache.kafka.common.metrics.stats","c":"CumulativeSum","l":"measure(MetricConfig, long)","u":"measure(org.apache.kafka.common.metrics.MetricConfig,long)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Rate","l":"measure(MetricConfig, long)","u":"measure(org.apache.kafka.common.metrics.MetricConfig,long)"},{"p":"org.apache.kafka.common.metrics.stats","c":"SampledStat","l":"measure(MetricConfig, long)","u":"measure(org.apache.kafka.common.metrics.MetricConfig,long)"},{"p":"org.apache.kafka.common.metrics.stats","c":"TokenBucket","l":"measure(MetricConfig, long)","u":"measure(org.apache.kafka.common.metrics.MetricConfig,long)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Value","l":"measure(MetricConfig, long)","u":"measure(org.apache.kafka.common.metrics.MetricConfig,long)"},{"p":"org.apache.kafka.clients.admin","c":"ScramCredentialInfo","l":"mechanism()"},{"p":"org.apache.kafka.clients.admin","c":"UserScramCredentialDeletion","l":"mechanism()"},{"p":"org.apache.kafka.clients.admin","c":"ScramMechanism","l":"mechanismName()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Importance","l":"MEDIUM"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Width","l":"MEDIUM"},{"p":"org.apache.kafka.clients.admin","c":"MemberAssignment","l":"MemberAssignment(Set)","u":"%3Cinit%3E(java.util.Set)"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"GroupSpec","l":"memberAssignment(String)","u":"memberAssignment(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"MemberDescription","l":"MemberDescription(String, Optional, String, String, MemberAssignment)","u":"%3Cinit%3E(java.lang.String,java.util.Optional,java.lang.String,java.lang.String,org.apache.kafka.clients.admin.MemberAssignment)"},{"p":"org.apache.kafka.clients.admin","c":"MemberDescription","l":"MemberDescription(String, Optional, String, String, MemberAssignment, Optional)","u":"%3Cinit%3E(java.lang.String,java.util.Optional,java.lang.String,java.lang.String,org.apache.kafka.clients.admin.MemberAssignment,java.util.Optional)"},{"p":"org.apache.kafka.clients.admin","c":"MemberDescription","l":"MemberDescription(String, Optional, String, String, MemberAssignment, Optional, Optional, Optional)","u":"%3Cinit%3E(java.lang.String,java.util.Optional,java.lang.String,java.lang.String,org.apache.kafka.clients.admin.MemberAssignment,java.util.Optional,java.util.Optional,java.util.Optional)"},{"p":"org.apache.kafka.clients.admin","c":"MemberDescription","l":"MemberDescription(String, String, String, MemberAssignment)","u":"%3Cinit%3E(java.lang.String,java.lang.String,java.lang.String,org.apache.kafka.clients.admin.MemberAssignment)"},{"p":"org.apache.kafka.clients.admin","c":"MemberDescription","l":"memberEpoch()"},{"p":"org.apache.kafka.clients.admin","c":"ShareMemberDescription","l":"memberEpoch()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription","l":"memberEpoch()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription","l":"memberId()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerGroupMetadata","l":"memberId()"},{"p":"org.apache.kafka.common.errors","c":"MemberIdRequiredException","l":"MemberIdRequiredException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"MemberIdRequiredException","l":"MemberIdRequiredException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"GroupSpec","l":"memberIds()"},{"p":"org.apache.kafka.clients.admin","c":"RemoveMembersFromConsumerGroupResult","l":"memberResult(MemberToRemove)","u":"memberResult(org.apache.kafka.clients.admin.MemberToRemove)"},{"p":"org.apache.kafka.clients.admin","c":"ClassicGroupDescription","l":"members()"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupDescription","l":"members()"},{"p":"org.apache.kafka.clients.admin","c":"RemoveMembersFromConsumerGroupOptions","l":"members()"},{"p":"org.apache.kafka.clients.admin","c":"ShareGroupDescription","l":"members()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupDescription","l":"members()"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"GroupAssignment","l":"members()"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"GroupSpec","l":"memberSubscription(String)","u":"memberSubscription(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"MemberToRemove","l":"MemberToRemove(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"MERGE_REPARTITION_TOPICS"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"merge(KStream)","u":"merge(org.apache.kafka.streams.kstream.KStream)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"merge(KStream, Named)","u":"merge(org.apache.kafka.streams.kstream.KStream,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.query","c":"Position","l":"merge(Position)","u":"merge(org.apache.kafka.streams.query.Position)"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"MESSAGE_DOWNCONVERSION_ENABLE_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"MESSAGE_DOWNCONVERSION_ENABLE_DOC"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"MESSAGE_TIMESTAMP_AFTER_MAX_MS_DOC"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"MESSAGE_TIMESTAMP_BEFORE_MAX_MS_DOC"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"MESSAGE_TIMESTAMP_TYPE_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"MESSAGE_TIMESTAMP_TYPE_DOC"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"METADATA_KEY"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"METADATA_MAX_AGE_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"METADATA_MAX_AGE_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"METADATA_MAX_AGE_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"METADATA_MAX_AGE_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"METADATA_MAX_IDLE_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"METADATA_RECOVERY_STRATEGY_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"METADATA_RECOVERY_STRATEGY_DOC"},{"p":"org.apache.kafka.common","c":"Uuid","l":"METADATA_TOPIC_ID"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetAndMetadata","l":"metadata()"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"metadata()"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"metadataForAllStreamsClients()"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"metadataForLocalThreads()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Meter","l":"Meter(MetricName, MetricName)","u":"%3Cinit%3E(org.apache.kafka.common.MetricName,org.apache.kafka.common.MetricName)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Meter","l":"Meter(SampledStat, MetricName, MetricName)","u":"%3Cinit%3E(org.apache.kafka.common.metrics.stats.SampledStat,org.apache.kafka.common.MetricName,org.apache.kafka.common.MetricName)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Meter","l":"Meter(TimeUnit, MetricName, MetricName)","u":"%3Cinit%3E(java.util.concurrent.TimeUnit,org.apache.kafka.common.MetricName,org.apache.kafka.common.MetricName)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Meter","l":"Meter(TimeUnit, SampledStat, MetricName, MetricName)","u":"%3Cinit%3E(java.util.concurrent.TimeUnit,org.apache.kafka.common.metrics.stats.SampledStat,org.apache.kafka.common.MetricName,org.apache.kafka.common.MetricName)"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"METRIC_REPORTER_CLASSES_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"METRIC_REPORTER_CLASSES_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"METRIC_REPORTER_CLASSES_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"METRIC_REPORTER_CLASSES_CONFIG"},{"p":"org.apache.kafka.common.metrics","c":"QuotaViolationException","l":"metric()"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"metric(MetricName)","u":"metric(org.apache.kafka.common.MetricName)"},{"p":"org.apache.kafka.common.metrics","c":"JmxReporter","l":"metricChange(KafkaMetric)","u":"metricChange(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.common.metrics","c":"MetricsReporter","l":"metricChange(KafkaMetric)","u":"metricChange(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.common.metrics","c":"MetricConfig","l":"MetricConfig()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"metricInstance(MetricNameTemplate, Map)","u":"metricInstance(org.apache.kafka.common.MetricNameTemplate,java.util.Map)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"metricInstance(MetricNameTemplate, String...)","u":"metricInstance(org.apache.kafka.common.MetricNameTemplate,java.lang.String...)"},{"p":"org.apache.kafka.common","c":"Metric","l":"metricName()"},{"p":"org.apache.kafka.common.metrics","c":"KafkaMetric","l":"metricName()"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"metricName(String, String)","u":"metricName(java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.common.metrics","c":"PluginMetrics","l":"metricName(String, String, LinkedHashMap)","u":"metricName(java.lang.String,java.lang.String,java.util.LinkedHashMap)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"metricName(String, String, Map)","u":"metricName(java.lang.String,java.lang.String,java.util.Map)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"metricName(String, String, String)","u":"metricName(java.lang.String,java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"metricName(String, String, String, Map)","u":"metricName(java.lang.String,java.lang.String,java.lang.String,java.util.Map)"},{"p":"org.apache.kafka.common","c":"MetricName","l":"MetricName(String, String, String, Map)","u":"%3Cinit%3E(java.lang.String,java.lang.String,java.lang.String,java.util.Map)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"metricName(String, String, String, String...)","u":"metricName(java.lang.String,java.lang.String,java.lang.String,java.lang.String...)"},{"p":"org.apache.kafka.common","c":"MetricNameTemplate","l":"MetricNameTemplate(String, String, String, Set)","u":"%3Cinit%3E(java.lang.String,java.lang.String,java.lang.String,java.util.Set)"},{"p":"org.apache.kafka.common","c":"MetricNameTemplate","l":"MetricNameTemplate(String, String, String, String...)","u":"%3Cinit%3E(java.lang.String,java.lang.String,java.lang.String,java.lang.String...)"},{"p":"org.apache.kafka.common.metrics","c":"JmxReporter","l":"metricRemoval(KafkaMetric)","u":"metricRemoval(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.common.metrics","c":"MetricsReporter","l":"metricRemoval(KafkaMetric)","u":"metricRemoval(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.common.metrics","c":"JmxReporter","l":"METRICS_CONFIG_PREFIX"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"METRICS_LATEST"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"METRICS_NUM_SAMPLES_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"METRICS_NUM_SAMPLES_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"METRICS_NUM_SAMPLES_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"METRICS_NUM_SAMPLES_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"METRICS_RECORDING_LEVEL_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"METRICS_RECORDING_LEVEL_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"METRICS_RECORDING_LEVEL_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"METRICS_RECORDING_LEVEL_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"METRICS_SAMPLE_WINDOW_MS_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"METRICS_SAMPLE_WINDOW_MS_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"METRICS_SAMPLE_WINDOW_MS_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"METRICS_SAMPLE_WINDOW_MS_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"metrics()"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"metrics()"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"metrics()"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"metrics()"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"metrics()"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaShareConsumer","l":"metrics()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"metrics()"},{"p":"org.apache.kafka.clients.consumer","c":"MockShareConsumer","l":"metrics()"},{"p":"org.apache.kafka.clients.consumer","c":"ShareConsumer","l":"metrics()"},{"p":"org.apache.kafka.clients.producer","c":"KafkaProducer","l":"metrics()"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"metrics()"},{"p":"org.apache.kafka.clients.producer","c":"Producer","l":"metrics()"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"metrics()"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"metrics()"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"metrics()"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessingContext","l":"metrics()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"metrics()"},{"p":"org.apache.kafka.streams.processor","c":"ProcessorContext","l":"metrics()"},{"p":"org.apache.kafka.streams.processor","c":"StateStoreContext","l":"metrics()"},{"p":"org.apache.kafka.streams","c":"StreamsMetrics","l":"metrics()"},{"p":"org.apache.kafka.streams","c":"TopologyTestDriver","l":"metrics()"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"Metrics()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"Metrics(MetricConfig)","u":"%3Cinit%3E(org.apache.kafka.common.metrics.MetricConfig)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"Metrics(MetricConfig, List, Time)","u":"%3Cinit%3E(org.apache.kafka.common.metrics.MetricConfig,java.util.List,org.apache.kafka.common.utils.Time)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"Metrics(MetricConfig, List, Time, boolean)","u":"%3Cinit%3E(org.apache.kafka.common.metrics.MetricConfig,java.util.List,org.apache.kafka.common.utils.Time,boolean)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"Metrics(MetricConfig, List, Time, boolean, MetricsContext)","u":"%3Cinit%3E(org.apache.kafka.common.metrics.MetricConfig,java.util.List,org.apache.kafka.common.utils.Time,boolean,org.apache.kafka.common.metrics.MetricsContext)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"Metrics(MetricConfig, List, Time, MetricsContext)","u":"%3Cinit%3E(org.apache.kafka.common.metrics.MetricConfig,java.util.List,org.apache.kafka.common.utils.Time,org.apache.kafka.common.metrics.MetricsContext)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"Metrics(MetricConfig, Time)","u":"%3Cinit%3E(org.apache.kafka.common.metrics.MetricConfig,org.apache.kafka.common.utils.Time)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"Metrics(Time)","u":"%3Cinit%3E(org.apache.kafka.common.utils.Time)"},{"p":"org.apache.kafka.streams.state","c":"StoreSupplier","l":"metricsScope()"},{"p":"org.apache.kafka.common","c":"Metric","l":"metricValue()"},{"p":"org.apache.kafka.common.metrics","c":"KafkaMetric","l":"metricValue()"},{"p":"org.apache.kafka.streams.processor","c":"StandbyUpdateListener.SuspendReason","l":"MIGRATED"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"MIN_CLEANABLE_DIRTY_RATIO_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"MIN_CLEANABLE_DIRTY_RATIO_DOC"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"MIN_COMPACTION_LAG_MS_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"MIN_COMPACTION_LAG_MS_DOC"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"MIN_IN_SYNC_REPLICAS_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"MIN_IN_SYNC_REPLICAS_DOC"},{"p":"org.apache.kafka.common.metrics.stats","c":"Min","l":"Min()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"SupportedVersionRange","l":"minVersion()"},{"p":"org.apache.kafka.clients.admin","c":"FinalizedVersionRange","l":"minVersionLevel()"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClient","l":"MirrorClient(Map)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClient","l":"MirrorClient(MirrorClientConfig)","u":"%3Cinit%3E(org.apache.kafka.connect.mirror.MirrorClientConfig)"},{"p":"org.apache.kafka.common.errors","c":"MismatchedEndpointTypeException","l":"MismatchedEndpointTypeException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignor.AssignmentError","l":"MISSING_PROCESS_ID"},{"p":"org.apache.kafka.streams.errors","c":"MissingSourceTopicException","l":"MissingSourceTopicException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.connect.tools","c":"MockConnector","l":"MOCK_MODE_KEY"},{"p":"org.apache.kafka.connect.tools","c":"MockConnector","l":"MockConnector()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"MockConsumer(OffsetResetStrategy)","u":"%3Cinit%3E(org.apache.kafka.clients.consumer.OffsetResetStrategy)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"MockConsumer(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"MockProcessorContext()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"MockProcessorContext()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"MockProcessorContext(Properties)","u":"%3Cinit%3E(java.util.Properties)"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"MockProcessorContext(Properties)","u":"%3Cinit%3E(java.util.Properties)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"MockProcessorContext(Properties, TaskId, File)","u":"%3Cinit%3E(java.util.Properties,org.apache.kafka.streams.processor.TaskId,java.io.File)"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"MockProcessorContext(Properties, TaskId, File)","u":"%3Cinit%3E(java.util.Properties,org.apache.kafka.streams.processor.TaskId,java.io.File)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"MockProducer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"MockProducer(boolean, Partitioner, Serializer, Serializer)","u":"%3Cinit%3E(boolean,org.apache.kafka.clients.producer.Partitioner,org.apache.kafka.common.serialization.Serializer,org.apache.kafka.common.serialization.Serializer)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"MockProducer(Cluster, boolean, Partitioner, Serializer, Serializer)","u":"%3Cinit%3E(org.apache.kafka.common.Cluster,boolean,org.apache.kafka.clients.producer.Partitioner,org.apache.kafka.common.serialization.Serializer,org.apache.kafka.common.serialization.Serializer)"},{"p":"org.apache.kafka.clients.consumer","c":"MockShareConsumer","l":"MockShareConsumer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.tools","c":"MockSinkConnector","l":"MockSinkConnector()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.tools","c":"MockSinkTask","l":"MockSinkTask()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.tools","c":"MockSourceConnector","l":"MockSourceConnector()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.tools","c":"MockSourceTask","l":"MockSourceTask()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.tools","c":"SchemaSourceTask","l":"MULTIPLE_SCHEMA_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"GroupProtocol","l":"name"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ConfigKey","l":"name"},{"p":"org.apache.kafka.common.metrics","c":"Sensor.RecordingLevel","l":"name"},{"p":"org.apache.kafka.common.security.auth","c":"SecurityProtocol","l":"name"},{"p":"org.apache.kafka.streams.errors","c":"DeserializationExceptionHandler.DeserializationHandlerResponse","l":"name"},{"p":"org.apache.kafka.streams.errors","c":"ProcessingExceptionHandler.ProcessingHandlerResponse","l":"name"},{"p":"org.apache.kafka.streams.errors","c":"ProductionExceptionHandler.ProductionExceptionHandlerResponse","l":"name"},{"p":"org.apache.kafka.streams.errors","c":"StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse","l":"name"},{"p":"org.apache.kafka.streams","c":"GroupProtocol","l":"name"},{"p":"org.apache.kafka.connect.tools","c":"SchemaSourceTask","l":"NAME_CONFIG"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSinkTask","l":"NAME_CONFIG"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSourceTask","l":"NAME_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"ClientMetricsResourceListing","l":"name()"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigSynonym","l":"name()"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry","l":"name()"},{"p":"org.apache.kafka.clients.admin","c":"NewTopic","l":"name()"},{"p":"org.apache.kafka.clients.admin","c":"RaftVoterEndpoint","l":"name()"},{"p":"org.apache.kafka.clients.admin","c":"TopicDescription","l":"name()"},{"p":"org.apache.kafka.clients.admin","c":"TopicListing","l":"name()"},{"p":"org.apache.kafka.clients.admin","c":"UserScramCredentialsDescription","l":"name()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor","l":"name()"},{"p":"org.apache.kafka.clients.consumer","c":"CooperativeStickyAssignor","l":"name()"},{"p":"org.apache.kafka.clients.consumer","c":"RangeAssignor","l":"name()"},{"p":"org.apache.kafka.clients.consumer","c":"RoundRobinAssignor","l":"name()"},{"p":"org.apache.kafka.clients.consumer","c":"StickyAssignor","l":"name()"},{"p":"org.apache.kafka.common.config","c":"ConfigResource","l":"name()"},{"p":"org.apache.kafka.common.config","c":"ConfigValue","l":"name()"},{"p":"org.apache.kafka.common","c":"MetricName","l":"name()"},{"p":"org.apache.kafka.common","c":"MetricNameTemplate","l":"name()"},{"p":"org.apache.kafka.common.metrics","c":"CompoundStat.NamedMeasurable","l":"name()"},{"p":"org.apache.kafka.common.metrics","c":"Sensor","l":"name()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Frequency","l":"name()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Percentile","l":"name()"},{"p":"org.apache.kafka.common.resource","c":"Resource","l":"name()"},{"p":"org.apache.kafka.common.resource","c":"ResourcePattern","l":"name()"},{"p":"org.apache.kafka.common.resource","c":"ResourcePatternFilter","l":"name()"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"name()"},{"p":"org.apache.kafka.connect.data","c":"Field","l":"name()"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"name()"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"name()"},{"p":"org.apache.kafka.connect.health","c":"ConnectorHealth","l":"name()"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"PartitionAssignor","l":"name()"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaEntity.ConfigEntity","l":"name()"},{"p":"org.apache.kafka.streams.processor","c":"StateStore","l":"name()"},{"p":"org.apache.kafka.streams.state","c":"DslKeyValueParams","l":"name()"},{"p":"org.apache.kafka.streams.state","c":"DslSessionParams","l":"name()"},{"p":"org.apache.kafka.streams.state","c":"DslWindowParams","l":"name()"},{"p":"org.apache.kafka.streams.state","c":"StoreBuilder","l":"name()"},{"p":"org.apache.kafka.streams.state","c":"StoreSupplier","l":"name()"},{"p":"org.apache.kafka.streams","c":"TopologyDescription.Node","l":"name()"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"name(String)","u":"name(java.lang.String)"},{"p":"org.apache.kafka.streams.processor","c":"TaskId","l":"NAMED_TOPOLOGY_DELIMITER"},{"p":"org.apache.kafka.common.metrics","c":"CompoundStat.NamedMeasurable","l":"NamedMeasurable(MetricName, Measurable)","u":"%3Cinit%3E(org.apache.kafka.common.MetricName,org.apache.kafka.common.metrics.Measurable)"},{"p":"org.apache.kafka.clients.admin","c":"ListTopicsResult","l":"names()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"names()"},{"p":"org.apache.kafka.common.security.auth","c":"SecurityProtocol","l":"names()"},{"p":"org.apache.kafka.common.metrics","c":"MetricsContext","l":"NAMESPACE"},{"p":"org.apache.kafka.clients.admin","c":"ListTopicsResult","l":"namesToListings()"},{"p":"org.apache.kafka.clients.producer","c":"KafkaProducer","l":"NETWORK_THREAD_PREFIX"},{"p":"org.apache.kafka.common.errors","c":"NetworkException","l":"NetworkException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"NetworkException","l":"NetworkException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"NetworkException","l":"NetworkException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"NetworkException","l":"NetworkException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.connect.storage","c":"ConverterConfig","l":"newConfigDef()"},{"p":"org.apache.kafka.common.errors","c":"NewLeaderElectedException","l":"NewLeaderElectedException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"NewPartitionReassignment","l":"NewPartitionReassignment(List)","u":"%3Cinit%3E(java.util.List)"},{"p":"org.apache.kafka.connect.connector","c":"ConnectRecord","l":"newRecord(String, Integer, Schema, Object, Schema, Object, Long)","u":"newRecord(java.lang.String,java.lang.Integer,org.apache.kafka.connect.data.Schema,java.lang.Object,org.apache.kafka.connect.data.Schema,java.lang.Object,java.lang.Long)"},{"p":"org.apache.kafka.connect.sink","c":"SinkRecord","l":"newRecord(String, Integer, Schema, Object, Schema, Object, Long)","u":"newRecord(java.lang.String,java.lang.Integer,org.apache.kafka.connect.data.Schema,java.lang.Object,org.apache.kafka.connect.data.Schema,java.lang.Object,java.lang.Long)"},{"p":"org.apache.kafka.connect.source","c":"SourceRecord","l":"newRecord(String, Integer, Schema, Object, Schema, Object, Long)","u":"newRecord(java.lang.String,java.lang.Integer,org.apache.kafka.connect.data.Schema,java.lang.Object,org.apache.kafka.connect.data.Schema,java.lang.Object,java.lang.Long)"},{"p":"org.apache.kafka.connect.connector","c":"ConnectRecord","l":"newRecord(String, Integer, Schema, Object, Schema, Object, Long, Iterable
)","u":"newRecord(java.lang.String,java.lang.Integer,org.apache.kafka.connect.data.Schema,java.lang.Object,org.apache.kafka.connect.data.Schema,java.lang.Object,java.lang.Long,java.lang.Iterable)"},{"p":"org.apache.kafka.connect.sink","c":"SinkRecord","l":"newRecord(String, Integer, Schema, Object, Schema, Object, Long, Iterable
)","u":"newRecord(java.lang.String,java.lang.Integer,org.apache.kafka.connect.data.Schema,java.lang.Object,org.apache.kafka.connect.data.Schema,java.lang.Object,java.lang.Long,java.lang.Iterable)"},{"p":"org.apache.kafka.connect.source","c":"SourceRecord","l":"newRecord(String, Integer, Schema, Object, Schema, Object, Long, Iterable
)","u":"newRecord(java.lang.String,java.lang.Integer,org.apache.kafka.connect.data.Schema,java.lang.Object,org.apache.kafka.connect.data.Schema,java.lang.Object,java.lang.Long,java.lang.Iterable)"},{"p":"org.apache.kafka.clients.admin","c":"NewTopic","l":"NewTopic(String, int, short)","u":"%3Cinit%3E(java.lang.String,int,short)"},{"p":"org.apache.kafka.clients.admin","c":"NewTopic","l":"NewTopic(String, Map>)","u":"%3Cinit%3E(java.lang.String,java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"NewTopic","l":"NewTopic(String, Optional, Optional)","u":"%3Cinit%3E(java.lang.String,java.util.Optional,java.util.Optional)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecords","l":"nextOffsets()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogMetadataManager","l":"nextSegmentWithTxnIndex(TopicIdPartition, int, long)","u":"nextSegmentWithTxnIndex(org.apache.kafka.common.TopicIdPartition,int,long)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"NO_DEFAULT_VALUE"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"NO_OPTIMIZATION"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecord","l":"NO_TIMESTAMP"},{"p":"org.apache.kafka.common","c":"Node","l":"Node(int, String, int)","u":"%3Cinit%3E(int,java.lang.String,int)"},{"p":"org.apache.kafka.common","c":"Node","l":"Node(int, String, int, String)","u":"%3Cinit%3E(int,java.lang.String,int,java.lang.String)"},{"p":"org.apache.kafka.common","c":"Node","l":"Node(int, String, int, String, boolean)","u":"%3Cinit%3E(int,java.lang.String,int,java.lang.String,boolean)"},{"p":"org.apache.kafka.common","c":"Cluster","l":"nodeById(int)"},{"p":"org.apache.kafka.streams.kstream","c":"BranchedKStream","l":"noDefaultBranch()"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo.Node","l":"nodeId()"},{"p":"org.apache.kafka.common","c":"Cluster","l":"nodeIfOnline(TopicPartition, int)","u":"nodeIfOnline(org.apache.kafka.common.TopicPartition,int)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeClusterResult","l":"nodes()"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo","l":"nodes()"},{"p":"org.apache.kafka.common","c":"Cluster","l":"nodes()"},{"p":"org.apache.kafka.streams","c":"TopologyDescription.Subtopology","l":"nodes()"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"NON_RECONFIGURABLE_CONFIGS"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetResetStrategy","l":"NONE"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Width","l":"NONE"},{"p":"org.apache.kafka.common.config","c":"SslClientAuth","l":"NONE"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignor.AssignmentError","l":"NONE"},{"p":"org.apache.kafka.streams","c":"AutoOffsetReset","l":"none()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.NonEmptyString","l":"NonEmptyString()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.NonEmptyStringWithoutControlChars","l":"nonEmptyStringWithoutControlChars()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.NonEmptyStringWithoutControlChars","l":"NonEmptyStringWithoutControlChars()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"nonInternalValues()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.NonNullValidator","l":"NonNullValidator()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common","c":"Node","l":"noNode()"},{"p":"org.apache.kafka.clients.consumer","c":"NoOffsetForPartitionException","l":"NoOffsetForPartitionException(Collection)","u":"%3Cinit%3E(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"NoOffsetForPartitionException","l":"NoOffsetForPartitionException(TopicPartition)","u":"%3Cinit%3E(org.apache.kafka.common.TopicPartition)"},{"p":"org.apache.kafka.common.errors","c":"NoReassignmentInProgressException","l":"NoReassignmentInProgressException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"NoReassignmentInProgressException","l":"NoReassignmentInProgressException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.streams.query","c":"FailureReason","l":"NOT_ACTIVE"},{"p":"org.apache.kafka.streams","c":"KeyQueryMetadata","l":"NOT_AVAILABLE"},{"p":"org.apache.kafka.streams.query","c":"FailureReason","l":"NOT_PRESENT"},{"p":"org.apache.kafka.common","c":"GroupState","l":"NOT_READY"},{"p":"org.apache.kafka.streams","c":"KafkaStreams.State","l":"NOT_RUNNING"},{"p":"org.apache.kafka.streams.query","c":"FailureReason","l":"NOT_UP_TO_BOUND"},{"p":"org.apache.kafka.common.errors","c":"NotControllerException","l":"NotControllerException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"NotControllerException","l":"NotControllerException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"NotCoordinatorException","l":"NotCoordinatorException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"NotCoordinatorException","l":"NotCoordinatorException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"NotEnoughReplicasAfterAppendException","l":"NotEnoughReplicasAfterAppendException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"NotEnoughReplicasException","l":"NotEnoughReplicasException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"NotEnoughReplicasException","l":"NotEnoughReplicasException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"NotEnoughReplicasException","l":"NotEnoughReplicasException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"NotEnoughReplicasException","l":"NotEnoughReplicasException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.connect.errors","c":"NotFoundException","l":"NotFoundException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.connect.errors","c":"NotFoundException","l":"NotFoundException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.connect.errors","c":"NotFoundException","l":"NotFoundException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"NotLeaderOrFollowerException","l":"NotLeaderOrFollowerException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"NotLeaderOrFollowerException","l":"NotLeaderOrFollowerException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"NotLeaderOrFollowerException","l":"NotLeaderOrFollowerException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"NotLeaderOrFollowerException","l":"NotLeaderOrFollowerException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.streams.query","c":"QueryResult","l":"notUpToBound(Position, PositionBound, Integer)","u":"notUpToBound(org.apache.kafka.streams.query.Position,org.apache.kafka.streams.query.PositionBound,java.lang.Integer)"},{"p":"org.apache.kafka.connect.data","c":"SchemaAndValue","l":"NULL"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecord","l":"NULL_SIZE"},{"p":"org.apache.kafka.connect.tools","c":"SchemaSourceTask","l":"NUM_MSGS_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"NUM_STANDBY_REPLICAS_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"NUM_STREAM_THREADS_CONFIG"},{"p":"org.apache.kafka.streams.kstream","c":"Repartitioned","l":"numberOfPartitions(int)"},{"p":"org.apache.kafka.clients.admin","c":"CreateTopicsResult.TopicMetadataAndConfig","l":"numPartitions()"},{"p":"org.apache.kafka.clients.admin","c":"NewTopic","l":"numPartitions()"},{"p":"org.apache.kafka.server.policy","c":"CreateTopicPolicy.RequestMetadata","l":"numPartitions()"},{"p":"org.apache.kafka.clients.admin","c":"CreateTopicsResult","l":"numPartitions(String)","u":"numPartitions(java.lang.String)"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"SubscribedTopicDescriber","l":"numPartitions(Uuid)","u":"numPartitions(org.apache.kafka.common.Uuid)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsState","l":"numProcessingThreads()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"AssignmentConfigs","l":"numStandbyReplicas()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerLoginModule","l":"OAUTHBEARER_MECHANISM"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerExtensionsValidatorCallback","l":"OAuthBearerExtensionsValidatorCallback(OAuthBearerToken, SaslExtensions)","u":"%3Cinit%3E(org.apache.kafka.common.security.oauthbearer.OAuthBearerToken,org.apache.kafka.common.security.auth.SaslExtensions)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerLoginCallbackHandler","l":"OAuthBearerLoginCallbackHandler()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerLoginModule","l":"OAuthBearerLoginModule()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerTokenCallback","l":"OAuthBearerTokenCallback()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerValidatorCallback","l":"OAuthBearerValidatorCallback(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerValidatorCallbackHandler","l":"OAuthBearerValidatorCallbackHandler()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo","l":"observers()"},{"p":"org.apache.kafka.streams.kstream","c":"UnlimitedWindows","l":"of()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignmentUtils.RackAwareOptimizationParams","l":"of(ApplicationState)","u":"of(org.apache.kafka.streams.processor.assignment.ApplicationState)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.CompositeValidator","l":"of(ConfigDef.Validator...)","u":"of(org.apache.kafka.common.config.ConfigDef.Validator...)"},{"p":"org.apache.kafka.streams.kstream","c":"JoinWindows","l":"of(Duration)","u":"of(java.time.Duration)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsAssignment","l":"of(ProcessId, Set)","u":"of(org.apache.kafka.streams.processor.assignment.ProcessId,java.util.Set)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"AssignmentConfigs","l":"of(StreamsConfig)","u":"of(org.apache.kafka.streams.StreamsConfig)"},{"p":"org.apache.kafka.clients.consumer","c":"GroupProtocol","l":"of(String)","u":"of(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"GroupProtocol","l":"of(String)","u":"of(java.lang.String)"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaFilterComponent","l":"ofDefaultEntity(String)","u":"ofDefaultEntity(java.lang.String)"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaFilterComponent","l":"ofEntity(String, String)","u":"ofEntity(java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaFilterComponent","l":"ofEntityType(String)","u":"ofEntityType(java.lang.String)"},{"p":"org.apache.kafka.common","c":"PartitionInfo","l":"offlineReplicas()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageManager.IndexType","l":"OFFSET"},{"p":"org.apache.kafka.clients.admin","c":"ListOffsetsResult.ListOffsetsResultInfo","l":"offset()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription.TaskOffset","l":"offset()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecord","l":"offset()"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetAndMetadata","l":"offset()"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetAndTimestamp","l":"offset()"},{"p":"org.apache.kafka.clients.producer","c":"RecordMetadata","l":"offset()"},{"p":"org.apache.kafka.common.errors","c":"RecordDeserializationException","l":"offset()"},{"p":"org.apache.kafka.streams.errors","c":"ErrorHandlerContext","l":"offset()"},{"p":"org.apache.kafka.streams.processor.api","c":"RecordMetadata","l":"offset()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"offset()"},{"p":"org.apache.kafka.streams.processor","c":"ProcessorContext","l":"offset()"},{"p":"org.apache.kafka.streams.processor","c":"RecordContext","l":"offset()"},{"p":"org.apache.kafka.connect.storage","c":"OffsetStorageReader","l":"offset(Map)","u":"offset(java.util.Map)"},{"p":"org.apache.kafka.connect.sink","c":"SinkTaskContext","l":"offset(Map)","u":"offset(java.util.Map)"},{"p":"org.apache.kafka.connect.sink","c":"SinkTaskContext","l":"offset(TopicPartition, long)","u":"offset(org.apache.kafka.common.TopicPartition,long)"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"offsetAndMetadata()"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetAndMetadata","l":"OffsetAndMetadata(long)","u":"%3Cinit%3E(long)"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetAndMetadata","l":"OffsetAndMetadata(long, Optional, String)","u":"%3Cinit%3E(long,java.util.Optional,java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetAndMetadata","l":"OffsetAndMetadata(long, String)","u":"%3Cinit%3E(long,java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetAndTimestamp","l":"OffsetAndTimestamp(long, long)","u":"%3Cinit%3E(long,long)"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetAndTimestamp","l":"OffsetAndTimestamp(long, long, Optional)","u":"%3Cinit%3E(long,long,java.util.Optional)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"LogSegmentData","l":"offsetIndex()"},{"p":"org.apache.kafka.clients.admin","c":"ReplicaInfo","l":"offsetLag()"},{"p":"org.apache.kafka.streams","c":"LagInfo","l":"offsetLag()"},{"p":"org.apache.kafka.common.errors","c":"OffsetMetadataTooLarge","l":"OffsetMetadataTooLarge()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"OffsetMetadataTooLarge","l":"OffsetMetadataTooLarge(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"OffsetMetadataTooLarge","l":"OffsetMetadataTooLarge(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"OffsetMetadataTooLarge","l":"OffsetMetadataTooLarge(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"OffsetMovedToTieredStorageException","l":"OffsetMovedToTieredStorageException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"OffsetMovedToTieredStorageException","l":"OffsetMovedToTieredStorageException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"OffsetNotAvailableException","l":"OffsetNotAvailableException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetOutOfRangeException","l":"OffsetOutOfRangeException(Map)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.common.errors","c":"OffsetOutOfRangeException","l":"OffsetOutOfRangeException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetOutOfRangeException","l":"OffsetOutOfRangeException(String, Map)","u":"%3Cinit%3E(java.lang.String,java.util.Map)"},{"p":"org.apache.kafka.common.errors","c":"OffsetOutOfRangeException","l":"OffsetOutOfRangeException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetOutOfRangeException","l":"offsetOutOfRangePartitions()"},{"p":"org.apache.kafka.connect.storage","c":"OffsetStorageReader","l":"offsets(Collection>)","u":"offsets(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"offsetsForTimes(Map)","u":"offsetsForTimes(java.util.Map)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"offsetsForTimes(Map)","u":"offsetsForTimes(java.util.Map)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"offsetsForTimes(Map)","u":"offsetsForTimes(java.util.Map)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"offsetsForTimes(Map, Duration)","u":"offsetsForTimes(java.util.Map,java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"offsetsForTimes(Map, Duration)","u":"offsetsForTimes(java.util.Map,java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"offsetsForTimes(Map, Duration)","u":"offsetsForTimes(java.util.Map,java.time.Duration)"},{"p":"org.apache.kafka.clients.admin","c":"OffsetSpec","l":"OffsetSpec()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.source","c":"SourceConnectorContext","l":"offsetStorageReader()"},{"p":"org.apache.kafka.connect.source","c":"SourceTaskContext","l":"offsetStorageReader()"},{"p":"org.apache.kafka.connect.mirror","c":"DefaultReplicationPolicy","l":"offsetSyncsTopic(String)","u":"offsetSyncsTopic(java.lang.String)"},{"p":"org.apache.kafka.connect.mirror","c":"ReplicationPolicy","l":"offsetSyncsTopic(String)","u":"offsetSyncsTopic(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindows","l":"ofInactivityGapAndGrace(Duration, Duration)","u":"ofInactivityGapAndGrace(java.time.Duration,java.time.Duration)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindows","l":"ofInactivityGapWithNoGrace(Duration)","u":"ofInactivityGapWithNoGrace(java.time.Duration)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindows","l":"ofSizeAndGrace(Duration, Duration)","u":"ofSizeAndGrace(java.time.Duration,java.time.Duration)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindows","l":"ofSizeWithNoGrace(Duration)","u":"ofSizeWithNoGrace(java.time.Duration)"},{"p":"org.apache.kafka.streams.kstream","c":"JoinWindows","l":"ofTimeDifferenceAndGrace(Duration, Duration)","u":"ofTimeDifferenceAndGrace(java.time.Duration,java.time.Duration)"},{"p":"org.apache.kafka.streams.kstream","c":"SlidingWindows","l":"ofTimeDifferenceAndGrace(Duration, Duration)","u":"ofTimeDifferenceAndGrace(java.time.Duration,java.time.Duration)"},{"p":"org.apache.kafka.streams.kstream","c":"JoinWindows","l":"ofTimeDifferenceWithNoGrace(Duration)","u":"ofTimeDifferenceWithNoGrace(java.time.Duration)"},{"p":"org.apache.kafka.streams.kstream","c":"SlidingWindows","l":"ofTimeDifferenceWithNoGrace(Duration)","u":"ofTimeDifferenceWithNoGrace(java.time.Duration)"},{"p":"org.apache.kafka.common","c":"TopicCollection","l":"ofTopicIds(Collection)","u":"ofTopicIds(java.util.Collection)"},{"p":"org.apache.kafka.common","c":"TopicCollection","l":"ofTopicNames(Collection)","u":"ofTopicNames(java.util.Collection)"},{"p":"org.apache.kafka.common.metrics.stats","c":"SampledStat","l":"oldest(long)"},{"p":"org.apache.kafka.streams.kstream","c":"EmitStrategy.StrategyType","l":"ON_WINDOW_CLOSE"},{"p":"org.apache.kafka.streams.kstream","c":"EmitStrategy.StrategyType","l":"ON_WINDOW_UPDATE"},{"p":"org.apache.kafka.clients.producer","c":"ProducerInterceptor","l":"onAcknowledgement(RecordMetadata, Exception)","u":"onAcknowledgement(org.apache.kafka.clients.producer.RecordMetadata,java.lang.Exception)"},{"p":"org.apache.kafka.clients.producer","c":"ProducerInterceptor","l":"onAcknowledgement(RecordMetadata, Exception, Headers)","u":"onAcknowledgement(org.apache.kafka.clients.producer.RecordMetadata,java.lang.Exception,org.apache.kafka.common.header.Headers)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor","l":"onAssignment(ConsumerPartitionAssignor.Assignment, ConsumerGroupMetadata)","u":"onAssignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment,org.apache.kafka.clients.consumer.ConsumerGroupMetadata)"},{"p":"org.apache.kafka.clients.consumer","c":"CooperativeStickyAssignor","l":"onAssignment(ConsumerPartitionAssignor.Assignment, ConsumerGroupMetadata)","u":"onAssignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment,org.apache.kafka.clients.consumer.ConsumerGroupMetadata)"},{"p":"org.apache.kafka.clients.consumer","c":"StickyAssignor","l":"onAssignment(ConsumerPartitionAssignor.Assignment, ConsumerGroupMetadata)","u":"onAssignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment,org.apache.kafka.clients.consumer.ConsumerGroupMetadata)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignor","l":"onAssignmentComputed(ConsumerPartitionAssignor.GroupAssignment, ConsumerPartitionAssignor.GroupSubscription, TaskAssignor.AssignmentError)","u":"onAssignmentComputed(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupAssignment,org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription,org.apache.kafka.streams.processor.assignment.TaskAssignor.AssignmentError)"},{"p":"org.apache.kafka.streams.processor","c":"StandbyUpdateListener","l":"onBatchLoaded(TopicPartition, String, TaskId, long, long, long)","u":"onBatchLoaded(org.apache.kafka.common.TopicPartition,java.lang.String,org.apache.kafka.streams.processor.TaskId,long,long,long)"},{"p":"org.apache.kafka.streams.processor","c":"StateRestoreListener","l":"onBatchRestored(TopicPartition, String, long, long)","u":"onBatchRestored(org.apache.kafka.common.TopicPartition,java.lang.String,long,long)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams.StateListener","l":"onChange(KafkaStreams.State, KafkaStreams.State)","u":"onChange(org.apache.kafka.streams.KafkaStreams.State,org.apache.kafka.streams.KafkaStreams.State)"},{"p":"org.apache.kafka.common.config","c":"ConfigChangeCallback","l":"onChange(String, ConfigData)","u":"onChange(java.lang.String,org.apache.kafka.common.config.ConfigData)"},{"p":"org.apache.kafka.streams.processor","c":"CommitCallback","l":"onCommit()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerInterceptor","l":"onCommit(Map)","u":"onCommit(java.util.Map)"},{"p":"org.apache.kafka.clients.consumer","c":"AcknowledgementCommitCallback","l":"onComplete(Map>, Exception)","u":"onComplete(java.util.Map,java.lang.Exception)"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetCommitCallback","l":"onComplete(Map, Exception)","u":"onComplete(java.util.Map,java.lang.Exception)"},{"p":"org.apache.kafka.clients.producer","c":"Callback","l":"onCompletion(RecordMetadata, Exception)","u":"onCompletion(org.apache.kafka.clients.producer.RecordMetadata,java.lang.Exception)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerInterceptor","l":"onConsume(ConsumerRecords)","u":"onConsume(org.apache.kafka.clients.consumer.ConsumerRecords)"},{"p":"org.apache.kafka.common","c":"Uuid","l":"ONE_UUID"},{"p":"org.apache.kafka.clients.admin","c":"TransactionState","l":"ONGOING"},{"p":"org.apache.kafka.streams.processor","c":"FailOnInvalidTimestamp","l":"onInvalidTimestamp(ConsumerRecord, long, long)","u":"onInvalidTimestamp(org.apache.kafka.clients.consumer.ConsumerRecord,long,long)"},{"p":"org.apache.kafka.streams.processor","c":"LogAndSkipOnInvalidTimestamp","l":"onInvalidTimestamp(ConsumerRecord, long, long)","u":"onInvalidTimestamp(org.apache.kafka.clients.consumer.ConsumerRecord,long,long)"},{"p":"org.apache.kafka.streams.processor","c":"UsePartitionTimeOnInvalidTimestamp","l":"onInvalidTimestamp(ConsumerRecord, long, long)","u":"onInvalidTimestamp(org.apache.kafka.clients.consumer.ConsumerRecord,long,long)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogMetadataManager","l":"onPartitionLeadershipChanges(Set, Set)","u":"onPartitionLeadershipChanges(java.util.Set,java.util.Set)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRebalanceListener","l":"onPartitionsAssigned(Collection)","u":"onPartitionsAssigned(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRebalanceListener","l":"onPartitionsLost(Collection)","u":"onPartitionsLost(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRebalanceListener","l":"onPartitionsRevoked(Collection)","u":"onPartitionsRevoked(java.util.Collection)"},{"p":"org.apache.kafka.streams.processor","c":"StateRestoreListener","l":"onRestoreEnd(TopicPartition, String, long)","u":"onRestoreEnd(org.apache.kafka.common.TopicPartition,java.lang.String,long)"},{"p":"org.apache.kafka.streams.processor","c":"StateRestoreListener","l":"onRestoreStart(TopicPartition, String, long, long)","u":"onRestoreStart(org.apache.kafka.common.TopicPartition,java.lang.String,long,long)"},{"p":"org.apache.kafka.streams.processor","c":"StateRestoreListener","l":"onRestoreSuspended(TopicPartition, String, long)","u":"onRestoreSuspended(org.apache.kafka.common.TopicPartition,java.lang.String,long)"},{"p":"org.apache.kafka.clients.producer","c":"ProducerInterceptor","l":"onSend(ProducerRecord)","u":"onSend(org.apache.kafka.clients.producer.ProducerRecord)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogMetadataManager","l":"onStopPartitions(Set)","u":"onStopPartitions(java.util.Set)"},{"p":"org.apache.kafka.common","c":"ClusterResourceListener","l":"onUpdate(ClusterResource)","u":"onUpdate(org.apache.kafka.common.ClusterResource)"},{"p":"org.apache.kafka.streams.processor","c":"StandbyUpdateListener","l":"onUpdateStart(TopicPartition, String, long)","u":"onUpdateStart(org.apache.kafka.common.TopicPartition,java.lang.String,long)"},{"p":"org.apache.kafka.streams.processor","c":"StandbyUpdateListener","l":"onUpdateSuspended(TopicPartition, String, long, long, StandbyUpdateListener.SuspendReason)","u":"onUpdateSuspended(org.apache.kafka.common.TopicPartition,java.lang.String,long,long,org.apache.kafka.streams.processor.StandbyUpdateListener.SuspendReason)"},{"p":"org.apache.kafka.streams.kstream","c":"EmitStrategy","l":"onWindowClose()"},{"p":"org.apache.kafka.streams.kstream","c":"EmitStrategy","l":"onWindowUpdate()"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaAlteration.Op","l":"Op(String, Double)","u":"%3Cinit%3E(java.lang.String,java.lang.Double)"},{"p":"org.apache.kafka.connect.sink","c":"SinkTask","l":"open(Collection)","u":"open(java.util.Collection)"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntry","l":"operation()"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntryFilter","l":"operation()"},{"p":"org.apache.kafka.server.authorizer","c":"Action","l":"operation()"},{"p":"org.apache.kafka.common.errors","c":"OperationNotAttemptedException","l":"OperationNotAttemptedException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaAlteration","l":"ops()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"OPTIMIZE"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignmentUtils","l":"optimizeRackAwareActiveTasks(TaskAssignmentUtils.RackAwareOptimizationParams, Map)","u":"optimizeRackAwareActiveTasks(org.apache.kafka.streams.processor.assignment.TaskAssignmentUtils.RackAwareOptimizationParams,java.util.Map)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignmentUtils","l":"optimizeRackAwareStandbyTasks(TaskAssignmentUtils.RackAwareOptimizationParams, Map)","u":"optimizeRackAwareStandbyTasks(org.apache.kafka.streams.processor.assignment.TaskAssignmentUtils.RackAwareOptimizationParams,java.util.Map)"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"OPTIONAL_BOOLEAN_SCHEMA"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"OPTIONAL_BYTES_SCHEMA"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"OPTIONAL_FLOAT32_SCHEMA"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"OPTIONAL_FLOAT64_SCHEMA"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"OPTIONAL_INT16_SCHEMA"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"OPTIONAL_INT32_SCHEMA"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"OPTIONAL_INT64_SCHEMA"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"OPTIONAL_INT8_SCHEMA"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"OPTIONAL_STRING_SCHEMA"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"optional()"},{"p":"org.apache.kafka.clients.admin","c":"AlterConfigOp","l":"opType()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ConfigKey","l":"orderInGroup"},{"p":"org.apache.kafka.common.errors","c":"RecordDeserializationException","l":"origin()"},{"p":"org.apache.kafka.connect.sink","c":"SinkRecord","l":"originalKafkaOffset()"},{"p":"org.apache.kafka.connect.sink","c":"SinkRecord","l":"originalKafkaPartition()"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"originals()"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"originals(Map)","u":"originals(java.util.Map)"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"originalsStrings()"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"originalsWithPrefix(String)","u":"originalsWithPrefix(java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"originalsWithPrefix(String, boolean)","u":"originalsWithPrefix(java.lang.String,boolean)"},{"p":"org.apache.kafka.connect.sink","c":"SinkRecord","l":"originalTopic()"},{"p":"org.apache.kafka.connect.mirror","c":"ReplicationPolicy","l":"originalTopic(String)","u":"originalTopic(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Joined","l":"otherValueSerde()"},{"p":"org.apache.kafka.streams.kstream","c":"Joined","l":"otherValueSerde(Serde)","u":"otherValueSerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"outerJoin(KStream, ValueJoiner, JoinWindows)","u":"outerJoin(org.apache.kafka.streams.kstream.KStream,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.JoinWindows)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"outerJoin(KStream, ValueJoiner, JoinWindows, StreamJoined)","u":"outerJoin(org.apache.kafka.streams.kstream.KStream,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.JoinWindows,org.apache.kafka.streams.kstream.StreamJoined)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"outerJoin(KStream, ValueJoinerWithKey, JoinWindows)","u":"outerJoin(org.apache.kafka.streams.kstream.KStream,org.apache.kafka.streams.kstream.ValueJoinerWithKey,org.apache.kafka.streams.kstream.JoinWindows)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"outerJoin(KStream, ValueJoinerWithKey, JoinWindows, StreamJoined)","u":"outerJoin(org.apache.kafka.streams.kstream.KStream,org.apache.kafka.streams.kstream.ValueJoinerWithKey,org.apache.kafka.streams.kstream.JoinWindows,org.apache.kafka.streams.kstream.StreamJoined)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"outerJoin(KTable, ValueJoiner)","u":"outerJoin(org.apache.kafka.streams.kstream.KTable,org.apache.kafka.streams.kstream.ValueJoiner)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"outerJoin(KTable, ValueJoiner, Materialized>)","u":"outerJoin(org.apache.kafka.streams.kstream.KTable,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"outerJoin(KTable, ValueJoiner, Named)","u":"outerJoin(org.apache.kafka.streams.kstream.KTable,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"outerJoin(KTable, ValueJoiner, Named, Materialized>)","u":"outerJoin(org.apache.kafka.streams.kstream.KTable,org.apache.kafka.streams.kstream.ValueJoiner,org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.common.errors","c":"OutOfOrderSequenceException","l":"OutOfOrderSequenceException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Window","l":"overlap(Window)","u":"overlap(org.apache.kafka.streams.kstream.Window)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.Subscription","l":"ownedPartitions()"},{"p":"org.apache.kafka.clients.admin","c":"CreateDelegationTokenOptions","l":"owner()"},{"p":"org.apache.kafka.common.security.token.delegation","c":"TokenInformation","l":"owner()"},{"p":"org.apache.kafka.clients.admin","c":"CreateDelegationTokenOptions","l":"owner(KafkaPrincipal)","u":"owner(org.apache.kafka.common.security.auth.KafkaPrincipal)"},{"p":"org.apache.kafka.common.security.token.delegation","c":"TokenInformation","l":"ownerAsString()"},{"p":"org.apache.kafka.common.security.token.delegation","c":"TokenInformation","l":"ownerOrRenewer(KafkaPrincipal)","u":"ownerOrRenewer(org.apache.kafka.common.security.auth.KafkaPrincipal)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeDelegationTokenOptions","l":"owners()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeDelegationTokenOptions","l":"owners(List)","u":"owners(java.util.List)"},{"p":"org.apache.kafka.streams","c":"KeyValue","l":"pair(K, V)","u":"pair(K,V)"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"parameter(String, String)","u":"parameter(java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"parameters()"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"parameters()"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"parameters()"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"parameters(Map)","u":"parameters(java.util.Map)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"parse(Map)","u":"parse(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"TransactionState","l":"parse(String)","u":"parse(java.lang.String)"},{"p":"org.apache.kafka.common","c":"ClassicGroupState","l":"parse(String)","u":"parse(java.lang.String)"},{"p":"org.apache.kafka.common","c":"ConsumerGroupState","l":"parse(String)","u":"parse(java.lang.String)"},{"p":"org.apache.kafka.common","c":"GroupState","l":"parse(String)","u":"parse(java.lang.String)"},{"p":"org.apache.kafka.common","c":"GroupType","l":"parse(String)","u":"parse(java.lang.String)"},{"p":"org.apache.kafka.streams.processor","c":"TaskId","l":"parse(String)","u":"parse(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"TopologyConfig","l":"parseStoreType()"},{"p":"org.apache.kafka.connect.data","c":"Values","l":"parseString(String)","u":"parseString(java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"parseType(String, Object, ConfigDef.Type)","u":"parseType(java.lang.String,java.lang.Object,org.apache.kafka.common.config.ConfigDef.Type)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"PARTITION_ASSIGNMENT_STRATEGY_CONFIG"},{"p":"org.apache.kafka.connect.tools","c":"SchemaSourceTask","l":"PARTITION_COUNT_CONFIG"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"PARTITION_KEY"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription.TaskOffset","l":"partition()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecord","l":"partition()"},{"p":"org.apache.kafka.clients.producer","c":"ProducerRecord","l":"partition()"},{"p":"org.apache.kafka.clients.producer","c":"RecordMetadata","l":"partition()"},{"p":"org.apache.kafka.common","c":"PartitionInfo","l":"partition()"},{"p":"org.apache.kafka.common","c":"TopicIdPartition","l":"partition()"},{"p":"org.apache.kafka.common","c":"TopicPartition","l":"partition()"},{"p":"org.apache.kafka.common","c":"TopicPartitionInfo","l":"partition()"},{"p":"org.apache.kafka.common","c":"TopicPartitionReplica","l":"partition()"},{"p":"org.apache.kafka.streams.errors","c":"ErrorHandlerContext","l":"partition()"},{"p":"org.apache.kafka.streams","c":"KeyQueryMetadata","l":"partition()"},{"p":"org.apache.kafka.streams.processor.api","c":"RecordMetadata","l":"partition()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"partition()"},{"p":"org.apache.kafka.streams.processor","c":"ProcessorContext","l":"partition()"},{"p":"org.apache.kafka.streams.processor","c":"RecordContext","l":"partition()"},{"p":"org.apache.kafka.streams.processor","c":"TaskId","l":"partition()"},{"p":"org.apache.kafka.streams","c":"StoreQueryParameters","l":"partition()"},{"p":"org.apache.kafka.clients.producer","c":"Partitioner","l":"partition(String, Object, byte[], Object, byte[], Cluster)","u":"partition(java.lang.String,java.lang.Object,byte[],java.lang.Object,byte[],org.apache.kafka.common.Cluster)"},{"p":"org.apache.kafka.clients.producer","c":"RoundRobinPartitioner","l":"partition(String, Object, byte[], Object, byte[], Cluster)","u":"partition(java.lang.String,java.lang.Object,byte[],java.lang.Object,byte[],org.apache.kafka.common.Cluster)"},{"p":"org.apache.kafka.common","c":"Cluster","l":"partition(TopicPartition)","u":"partition(org.apache.kafka.common.TopicPartition)"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupDescription","l":"partitionAssignor()"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"PartitionAssignorException","l":"PartitionAssignorException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"PartitionAssignorException","l":"PartitionAssignorException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common","c":"Cluster","l":"partitionCountForTopic(String)","u":"partitionCountForTopic(java.lang.String)"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"PARTITIONER_AVAILABILITY_TIMEOUT_MS_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"PARTITIONER_CLASS_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"PARTITIONER_IGNORE_KEYS_CONFIG"},{"p":"org.apache.kafka.common","c":"PartitionInfo","l":"PartitionInfo(String, int, Node, Node[], Node[])","u":"%3Cinit%3E(java.lang.String,int,org.apache.kafka.common.Node,org.apache.kafka.common.Node[],org.apache.kafka.common.Node[])"},{"p":"org.apache.kafka.common","c":"PartitionInfo","l":"PartitionInfo(String, int, Node, Node[], Node[], Node[])","u":"%3Cinit%3E(java.lang.String,int,org.apache.kafka.common.Node,org.apache.kafka.common.Node[],org.apache.kafka.common.Node[],org.apache.kafka.common.Node[])"},{"p":"org.apache.kafka.clients.admin","c":"DescribeProducersResult.PartitionProducerState","l":"PartitionProducerState(List)","u":"%3Cinit%3E(java.util.List)"},{"p":"org.apache.kafka.clients.admin","c":"PartitionReassignment","l":"PartitionReassignment(List, List, List)","u":"%3Cinit%3E(java.util.List,java.util.List,java.util.List)"},{"p":"org.apache.kafka.clients.admin","c":"AlterConsumerGroupOffsetsResult","l":"partitionResult(TopicPartition)","u":"partitionResult(org.apache.kafka.common.TopicPartition)"},{"p":"org.apache.kafka.clients.admin","c":"AlterShareGroupOffsetsResult","l":"partitionResult(TopicPartition)","u":"partitionResult(org.apache.kafka.common.TopicPartition)"},{"p":"org.apache.kafka.clients.admin","c":"AlterStreamsGroupOffsetsResult","l":"partitionResult(TopicPartition)","u":"partitionResult(org.apache.kafka.common.TopicPartition)"},{"p":"org.apache.kafka.clients.admin","c":"DeleteConsumerGroupOffsetsResult","l":"partitionResult(TopicPartition)","u":"partitionResult(org.apache.kafka.common.TopicPartition)"},{"p":"org.apache.kafka.clients.admin","c":"DeleteStreamsGroupOffsetsResult","l":"partitionResult(TopicPartition)","u":"partitionResult(org.apache.kafka.common.TopicPartition)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeProducersResult","l":"partitionResult(TopicPartition)","u":"partitionResult(org.apache.kafka.common.TopicPartition)"},{"p":"org.apache.kafka.clients.admin","c":"ListOffsetsResult","l":"partitionResult(TopicPartition)","u":"partitionResult(org.apache.kafka.common.TopicPartition)"},{"p":"org.apache.kafka.clients.admin","c":"ElectLeadersResult","l":"partitions()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberAssignment.TaskIds","l":"partitions()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupSubtopologyDescription.TopicInfo","l":"partitions()"},{"p":"org.apache.kafka.clients.admin","c":"TopicDescription","l":"partitions()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.Assignment","l":"partitions()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecords","l":"partitions()"},{"p":"org.apache.kafka.clients.consumer","c":"InvalidOffsetException","l":"partitions()"},{"p":"org.apache.kafka.clients.consumer","c":"NoOffsetForPartitionException","l":"partitions()"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetOutOfRangeException","l":"partitions()"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"MemberAssignment","l":"partitions()"},{"p":"org.apache.kafka.streams.processor","c":"StreamPartitioner","l":"partitions(String, K, V, int)","u":"partitions(java.lang.String,K,V,int)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"partitionsFor(String)","u":"partitionsFor(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"partitionsFor(String)","u":"partitionsFor(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"partitionsFor(String)","u":"partitionsFor(java.lang.String)"},{"p":"org.apache.kafka.clients.producer","c":"KafkaProducer","l":"partitionsFor(String)","u":"partitionsFor(java.lang.String)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"partitionsFor(String)","u":"partitionsFor(java.lang.String)"},{"p":"org.apache.kafka.clients.producer","c":"Producer","l":"partitionsFor(String)","u":"partitionsFor(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"partitionsFor(String, Duration)","u":"partitionsFor(java.lang.String,java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"partitionsFor(String, Duration)","u":"partitionsFor(java.lang.String,java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"partitionsFor(String, Duration)","u":"partitionsFor(java.lang.String,java.time.Duration)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"partitionsForException"},{"p":"org.apache.kafka.common","c":"Cluster","l":"partitionsForNode(int)"},{"p":"org.apache.kafka.common","c":"Cluster","l":"partitionsForTopic(String)","u":"partitionsForTopic(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeTopicsOptions","l":"partitionSizeLimitPerResponse()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeTopicsOptions","l":"partitionSizeLimitPerResponse(int)"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupOffsetsResult","l":"partitionsToOffsetAndMetadata()"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupOffsetsResult","l":"partitionsToOffsetAndMetadata(String)","u":"partitionsToOffsetAndMetadata(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"ListShareGroupOffsetsResult","l":"partitionsToOffsetAndMetadata(String)","u":"partitionsToOffsetAndMetadata(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"ListStreamsGroupOffsetsResult","l":"partitionsToOffsetAndMetadata(String)","u":"partitionsToOffsetAndMetadata(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigType","l":"PASSWORD"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Type","l":"PASSWORD"},{"p":"org.apache.kafka.clients.admin","c":"UserScramCredentialUpsertion","l":"password()"},{"p":"org.apache.kafka.common.security.plain","c":"PlainAuthenticateCallback","l":"password()"},{"p":"org.apache.kafka.clients.consumer","c":"SubscriptionPattern","l":"pattern()"},{"p":"org.apache.kafka.common.acl","c":"AclBinding","l":"pattern()"},{"p":"org.apache.kafka.common.acl","c":"AclBindingFilter","l":"patternFilter()"},{"p":"org.apache.kafka.common.resource","c":"ResourcePattern","l":"patternType()"},{"p":"org.apache.kafka.common.resource","c":"ResourcePatternFilter","l":"patternType()"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"pause()"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"pause(Collection)","u":"pause(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"pause(Collection)","u":"pause(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"pause(Collection)","u":"pause(java.util.Collection)"},{"p":"org.apache.kafka.connect.sink","c":"SinkTaskContext","l":"pause(TopicPartition...)","u":"pause(org.apache.kafka.common.TopicPartition...)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"paused()"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"paused()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"paused()"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"peek(ForeachAction)","u":"peek(org.apache.kafka.streams.kstream.ForeachAction)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"peek(ForeachAction, Named)","u":"peek(org.apache.kafka.streams.kstream.ForeachAction,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.state","c":"KeyValueIterator","l":"peekNextKey()"},{"p":"org.apache.kafka.streams","c":"KafkaStreams.State","l":"PENDING_ERROR"},{"p":"org.apache.kafka.streams","c":"KafkaStreams.State","l":"PENDING_SHUTDOWN"},{"p":"org.apache.kafka.common.metrics.stats","c":"Percentile","l":"percentile()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Percentile","l":"Percentile(MetricName, double)","u":"%3Cinit%3E(org.apache.kafka.common.MetricName,double)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Percentiles","l":"Percentiles(int, double, double, Percentiles.BucketSizing, Percentile...)","u":"%3Cinit%3E(int,double,double,org.apache.kafka.common.metrics.stats.Percentiles.BucketSizing,org.apache.kafka.common.metrics.stats.Percentile...)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Percentiles","l":"Percentiles(int, double, Percentiles.BucketSizing, Percentile...)","u":"%3Cinit%3E(int,double,org.apache.kafka.common.metrics.stats.Percentiles.BucketSizing,org.apache.kafka.common.metrics.stats.Percentile...)"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntry","l":"permissionType()"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntryFilter","l":"permissionType()"},{"p":"org.apache.kafka.streams.processor","c":"StateStore","l":"persistent()"},{"p":"org.apache.kafka.streams.state","c":"Stores","l":"persistentKeyValueStore(String)","u":"persistentKeyValueStore(java.lang.String)"},{"p":"org.apache.kafka.streams.state","c":"Stores","l":"persistentSessionStore(String, Duration)","u":"persistentSessionStore(java.lang.String,java.time.Duration)"},{"p":"org.apache.kafka.streams.state","c":"Stores","l":"persistentTimestampedKeyValueStore(String)","u":"persistentTimestampedKeyValueStore(java.lang.String)"},{"p":"org.apache.kafka.streams.state","c":"Stores","l":"persistentTimestampedWindowStore(String, Duration, Duration, boolean)","u":"persistentTimestampedWindowStore(java.lang.String,java.time.Duration,java.time.Duration,boolean)"},{"p":"org.apache.kafka.streams.state","c":"Stores","l":"persistentVersionedKeyValueStore(String, Duration)","u":"persistentVersionedKeyValueStore(java.lang.String,java.time.Duration)"},{"p":"org.apache.kafka.streams.state","c":"Stores","l":"persistentVersionedKeyValueStore(String, Duration, Duration)","u":"persistentVersionedKeyValueStore(java.lang.String,java.time.Duration,java.time.Duration)"},{"p":"org.apache.kafka.streams.state","c":"Stores","l":"persistentWindowStore(String, Duration, Duration, boolean)","u":"persistentWindowStore(java.lang.String,java.time.Duration,java.time.Duration,boolean)"},{"p":"org.apache.kafka.streams","c":"TestInputTopic","l":"pipeInput(K, V)","u":"pipeInput(K,V)"},{"p":"org.apache.kafka.streams","c":"TestInputTopic","l":"pipeInput(K, V, Instant)","u":"pipeInput(K,V,java.time.Instant)"},{"p":"org.apache.kafka.streams","c":"TestInputTopic","l":"pipeInput(K, V, long)","u":"pipeInput(K,V,long)"},{"p":"org.apache.kafka.streams","c":"TestInputTopic","l":"pipeInput(TestRecord)","u":"pipeInput(org.apache.kafka.streams.test.TestRecord)"},{"p":"org.apache.kafka.streams","c":"TestInputTopic","l":"pipeInput(V)"},{"p":"org.apache.kafka.streams","c":"TestInputTopic","l":"pipeInput(V, Instant)","u":"pipeInput(V,java.time.Instant)"},{"p":"org.apache.kafka.streams","c":"TestInputTopic","l":"pipeKeyValueList(List>)","u":"pipeKeyValueList(java.util.List)"},{"p":"org.apache.kafka.streams","c":"TestInputTopic","l":"pipeKeyValueList(List>, Instant, Duration)","u":"pipeKeyValueList(java.util.List,java.time.Instant,java.time.Duration)"},{"p":"org.apache.kafka.streams","c":"TestInputTopic","l":"pipeRecordList(List>)","u":"pipeRecordList(java.util.List)"},{"p":"org.apache.kafka.streams","c":"TestInputTopic","l":"pipeValueList(List)","u":"pipeValueList(java.util.List)"},{"p":"org.apache.kafka.streams","c":"TestInputTopic","l":"pipeValueList(List, Instant, Duration)","u":"pipeValueList(java.util.List,java.time.Instant,java.time.Duration)"},{"p":"org.apache.kafka.common.security.plain","c":"PlainAuthenticateCallback","l":"PlainAuthenticateCallback(char[])","u":"%3Cinit%3E(char[])"},{"p":"org.apache.kafka.common.security.plain","c":"PlainLoginModule","l":"PlainLoginModule()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.security.auth","c":"SecurityProtocol","l":"PLAINTEXT"},{"p":"org.apache.kafka.common.security.auth","c":"PlaintextAuthenticationContext","l":"PlaintextAuthenticationContext(InetAddress, String)","u":"%3Cinit%3E(java.net.InetAddress,java.lang.String)"},{"p":"org.apache.kafka.connect.connector","c":"ConnectorContext","l":"pluginMetrics()"},{"p":"org.apache.kafka.connect.sink","c":"SinkTaskContext","l":"pluginMetrics()"},{"p":"org.apache.kafka.connect.source","c":"SourceTaskContext","l":"pluginMetrics()"},{"p":"org.apache.kafka.common.errors","c":"PolicyViolationException","l":"PolicyViolationException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"PolicyViolationException","l":"PolicyViolationException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.connect.source","c":"SourceTask.TransactionBoundary","l":"POLL"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"POLL_MS_CONFIG"},{"p":"org.apache.kafka.connect.source","c":"SourceTask","l":"poll()"},{"p":"org.apache.kafka.connect.tools","c":"MockSourceTask","l":"poll()"},{"p":"org.apache.kafka.connect.tools","c":"SchemaSourceTask","l":"poll()"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSourceTask","l":"poll()"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"poll(Duration)","u":"poll(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"poll(Duration)","u":"poll(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaShareConsumer","l":"poll(Duration)","u":"poll(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"poll(Duration)","u":"poll(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"MockShareConsumer","l":"poll(Duration)","u":"poll(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"ShareConsumer","l":"poll(Duration)","u":"poll(java.time.Duration)"},{"p":"org.apache.kafka.clients.admin","c":"RaftVoterEndpoint","l":"port()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription.Endpoint","l":"port()"},{"p":"org.apache.kafka.common","c":"Endpoint","l":"port()"},{"p":"org.apache.kafka.common","c":"Node","l":"port()"},{"p":"org.apache.kafka.streams.state","c":"HostInfo","l":"port()"},{"p":"org.apache.kafka.streams","c":"StreamsMetadata","l":"port()"},{"p":"org.apache.kafka.streams.query","c":"PositionBound","l":"position()"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"position(TopicPartition)","u":"position(org.apache.kafka.common.TopicPartition)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"position(TopicPartition)","u":"position(org.apache.kafka.common.TopicPartition)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"position(TopicPartition)","u":"position(org.apache.kafka.common.TopicPartition)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"position(TopicPartition, Duration)","u":"position(org.apache.kafka.common.TopicPartition,java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"position(TopicPartition, Duration)","u":"position(org.apache.kafka.common.TopicPartition,java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"position(TopicPartition, Duration)","u":"position(org.apache.kafka.common.TopicPartition,java.time.Duration)"},{"p":"org.apache.kafka.common.errors","c":"PositionOutOfRangeException","l":"PositionOutOfRangeException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"PositionOutOfRangeException","l":"PositionOutOfRangeException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"PREALLOCATE_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"PREALLOCATE_DOC"},{"p":"org.apache.kafka.connect.sink","c":"SinkTask","l":"preCommit(Map)","u":"preCommit(java.util.Map)"},{"p":"org.apache.kafka.streams","c":"TopologyDescription.Node","l":"predecessors()"},{"p":"org.apache.kafka.common","c":"ElectionType","l":"PREFERRED"},{"p":"org.apache.kafka.common.errors","c":"PreferredLeaderNotAvailableException","l":"PreferredLeaderNotAvailableException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"PreferredLeaderNotAvailableException","l":"PreferredLeaderNotAvailableException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.resource","c":"PatternType","l":"PREFIXED"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlyKeyValueStore","l":"prefixScan(P, PS)","u":"prefixScan(P,PS)"},{"p":"org.apache.kafka.clients.admin","c":"TransactionState","l":"PREPARE_ABORT"},{"p":"org.apache.kafka.clients.admin","c":"TransactionState","l":"PREPARE_COMMIT"},{"p":"org.apache.kafka.clients.admin","c":"TransactionState","l":"PREPARE_EPOCH_FENCE"},{"p":"org.apache.kafka.clients.producer","c":"PreparedTxnState","l":"PreparedTxnState()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.producer","c":"PreparedTxnState","l":"PreparedTxnState(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common","c":"ClassicGroupState","l":"PREPARING_REBALANCE"},{"p":"org.apache.kafka.common","c":"ConsumerGroupState","l":"PREPARING_REBALANCE"},{"p":"org.apache.kafka.common","c":"GroupState","l":"PREPARING_REBALANCE"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsState","l":"previousActiveTasks()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsState","l":"previousStandbyTasks()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsState","l":"prevTasksByLag(String)","u":"prevTasksByLag(java.lang.String)"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntry","l":"principal()"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntryFilter","l":"principal()"},{"p":"org.apache.kafka.server.authorizer","c":"AuthorizableRequestContext","l":"principal()"},{"p":"org.apache.kafka.common.errors","c":"PrincipalDeserializationException","l":"PrincipalDeserializationException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"PrincipalDeserializationException","l":"PrincipalDeserializationException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerToken","l":"principalName()"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"print(Printed)","u":"print(org.apache.kafka.streams.kstream.Printed)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"PROBING_REBALANCE_INTERVAL_MS_CONFIG"},{"p":"org.apache.kafka.streams.processor.assignment","c":"AssignmentConfigs","l":"probingRebalanceIntervalMs()"},{"p":"org.apache.kafka.streams.processor.api","c":"FixedKeyProcessor","l":"process(FixedKeyRecord)","u":"process(org.apache.kafka.streams.processor.api.FixedKeyRecord)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"process(ProcessorSupplier, Named, String...)","u":"process(org.apache.kafka.streams.processor.api.ProcessorSupplier,org.apache.kafka.streams.kstream.Named,java.lang.String...)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"process(ProcessorSupplier, String...)","u":"process(org.apache.kafka.streams.processor.api.ProcessorSupplier,java.lang.String...)"},{"p":"org.apache.kafka.streams.kstream","c":"ForeachProcessor","l":"process(Record)","u":"process(org.apache.kafka.streams.processor.api.Record)"},{"p":"org.apache.kafka.streams.processor.api","c":"Processor","l":"process(Record)","u":"process(org.apache.kafka.streams.processor.api.Record)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription","l":"processId()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsAssignment","l":"processId()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsState","l":"processId()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"ProcessId","l":"ProcessId(UUID)","u":"%3Cinit%3E(java.util.UUID)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"PROCESSING_EXCEPTION_HANDLER_CLASS_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"PROCESSING_EXCEPTION_HANDLER_CLASS_DOC"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"PROCESSING_GUARANTEE_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig.InternalConfig","l":"PROCESSING_THREADS_ENABLED"},{"p":"org.apache.kafka.streams","c":"TopologyConfig.TaskConfig","l":"processingExceptionHandler"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"processingExceptionHandler()"},{"p":"org.apache.kafka.streams","c":"TopologyConfig","l":"processingExceptionHandlerSupplier"},{"p":"org.apache.kafka.streams","c":"StreamsConfig.InternalConfig","l":"processingThreadsEnabled(Map)","u":"processingThreadsEnabled(java.util.Map)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"PROCESSOR_WRAPPER_CLASS_CONFIG"},{"p":"org.apache.kafka.streams","c":"TopologyDescription.GlobalStore","l":"processor()"},{"p":"org.apache.kafka.streams.errors","c":"ErrorHandlerContext","l":"processorNodeId()"},{"p":"org.apache.kafka.streams.errors","c":"ProcessorStateException","l":"ProcessorStateException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"ProcessorStateException","l":"ProcessorStateException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.streams.errors","c":"ProcessorStateException","l":"ProcessorStateException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"processValues(FixedKeyProcessorSupplier, Named, String...)","u":"processValues(org.apache.kafka.streams.processor.api.FixedKeyProcessorSupplier,org.apache.kafka.streams.kstream.Named,java.lang.String...)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"processValues(FixedKeyProcessorSupplier, String...)","u":"processValues(org.apache.kafka.streams.processor.api.FixedKeyProcessorSupplier,java.lang.String...)"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaType","l":"PRODUCE"},{"p":"org.apache.kafka.streams","c":"TopologyTestDriver","l":"producedTopicNames()"},{"p":"org.apache.kafka.connect.connector.policy","c":"ConnectorClientConfigRequest.ClientType","l":"PRODUCER"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClientConfig","l":"PRODUCER_CLIENT_PREFIX"},{"p":"org.apache.kafka.clients.producer","c":"KafkaProducer","l":"PRODUCER_METRIC_GROUP_NAME"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"PRODUCER_PREFIX"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageManager.IndexType","l":"PRODUCER_SNAPSHOT"},{"p":"org.apache.kafka.streams","c":"ThreadMetadata","l":"producerClientIds()"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClientConfig","l":"producerConfig()"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"ProducerConfig(Map)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"ProducerConfig(Properties)","u":"%3Cinit%3E(java.util.Properties)"},{"p":"org.apache.kafka.clients.admin","c":"AbortTransactionSpec","l":"producerEpoch()"},{"p":"org.apache.kafka.clients.admin","c":"ProducerState","l":"producerEpoch()"},{"p":"org.apache.kafka.clients.admin","c":"TransactionDescription","l":"producerEpoch()"},{"p":"org.apache.kafka.common.errors","c":"ProducerFencedException","l":"ProducerFencedException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"AbortTransactionSpec","l":"producerId()"},{"p":"org.apache.kafka.clients.admin","c":"ProducerState","l":"producerId()"},{"p":"org.apache.kafka.clients.admin","c":"TransactionDescription","l":"producerId()"},{"p":"org.apache.kafka.clients.admin","c":"TransactionListing","l":"producerId()"},{"p":"org.apache.kafka.clients.producer","c":"PreparedTxnState","l":"producerId()"},{"p":"org.apache.kafka.clients.admin","c":"FenceProducersResult","l":"producerId(String)","u":"producerId(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"ClientInstanceIds","l":"producerInstanceIds()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"producerPrefix(String)","u":"producerPrefix(java.lang.String)"},{"p":"org.apache.kafka.clients.producer","c":"ProducerRecord","l":"ProducerRecord(String, Integer, K, V)","u":"%3Cinit%3E(java.lang.String,java.lang.Integer,K,V)"},{"p":"org.apache.kafka.clients.producer","c":"ProducerRecord","l":"ProducerRecord(String, Integer, K, V, Iterable
)","u":"%3Cinit%3E(java.lang.String,java.lang.Integer,K,V,java.lang.Iterable)"},{"p":"org.apache.kafka.clients.producer","c":"ProducerRecord","l":"ProducerRecord(String, Integer, Long, K, V)","u":"%3Cinit%3E(java.lang.String,java.lang.Integer,java.lang.Long,K,V)"},{"p":"org.apache.kafka.clients.producer","c":"ProducerRecord","l":"ProducerRecord(String, Integer, Long, K, V, Iterable
)","u":"%3Cinit%3E(java.lang.String,java.lang.Integer,java.lang.Long,K,V,java.lang.Iterable)"},{"p":"org.apache.kafka.clients.producer","c":"ProducerRecord","l":"ProducerRecord(String, K, V)","u":"%3Cinit%3E(java.lang.String,K,V)"},{"p":"org.apache.kafka.clients.producer","c":"ProducerRecord","l":"ProducerRecord(String, V)","u":"%3Cinit%3E(java.lang.String,V)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"LogSegmentData","l":"producerSnapshotIndex()"},{"p":"org.apache.kafka.clients.admin","c":"ProducerState","l":"ProducerState(long, int, int, long, OptionalInt, OptionalLong)","u":"%3Cinit%3E(long,int,int,long,java.util.OptionalInt,java.util.OptionalLong)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"PRODUCTION_EXCEPTION_HANDLER_CLASS_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"productionExceptionHandler()"},{"p":"org.apache.kafka.connect.data","c":"SchemaProjector","l":"project(Schema, Object, Schema)","u":"project(org.apache.kafka.connect.data.Schema,java.lang.Object,org.apache.kafka.connect.data.Schema)"},{"p":"org.apache.kafka.streams.processor","c":"StandbyUpdateListener.SuspendReason","l":"PROMOTED"},{"p":"org.apache.kafka.clients.admin","c":"ClassicGroupDescription","l":"protocol()"},{"p":"org.apache.kafka.clients.admin","c":"GroupListing","l":"protocol()"},{"p":"org.apache.kafka.clients.admin","c":"ClassicGroupDescription","l":"protocolData()"},{"p":"org.apache.kafka.clients.admin","c":"ListGroupsOptions","l":"protocolTypes()"},{"p":"org.apache.kafka.streams.processor","c":"Punctuator","l":"punctuate(long)"},{"p":"org.apache.kafka.streams.state","c":"VersionedKeyValueStore","l":"PUT_RETURN_CODE_NOT_PUT"},{"p":"org.apache.kafka.streams.state","c":"VersionedKeyValueStore","l":"PUT_RETURN_CODE_VALID_TO_UNDEFINED"},{"p":"org.apache.kafka.streams.state","c":"VersionedBytesStore","l":"put(Bytes, byte[], long)","u":"put(org.apache.kafka.common.utils.Bytes,byte[],long)"},{"p":"org.apache.kafka.connect.sink","c":"SinkTask","l":"put(Collection)","u":"put(java.util.Collection)"},{"p":"org.apache.kafka.connect.tools","c":"MockSinkTask","l":"put(Collection)","u":"put(java.util.Collection)"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSinkTask","l":"put(Collection)","u":"put(java.util.Collection)"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"put(Field, Object)","u":"put(org.apache.kafka.connect.data.Field,java.lang.Object)"},{"p":"org.apache.kafka.streams.state","c":"KeyValueStore","l":"put(K, V)","u":"put(K,V)"},{"p":"org.apache.kafka.streams.state","c":"VersionedKeyValueStore","l":"put(K, V, long)","u":"put(K,V,long)"},{"p":"org.apache.kafka.streams.state","c":"WindowStore","l":"put(K, V, long)","u":"put(K,V,long)"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"put(String, Object)","u":"put(java.lang.String,java.lang.Object)"},{"p":"org.apache.kafka.streams.state","c":"SessionStore","l":"put(Windowed, AGG)","u":"put(org.apache.kafka.streams.kstream.Windowed,AGG)"},{"p":"org.apache.kafka.streams.state","c":"KeyValueStore","l":"putAll(List>)","u":"putAll(java.util.List)"},{"p":"org.apache.kafka.streams.state","c":"KeyValueStore","l":"putIfAbsent(K, V)","u":"putIfAbsent(K,V)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogMetadataManager","l":"putRemotePartitionDeleteMetadata(RemotePartitionDeleteMetadata)","u":"putRemotePartitionDeleteMetadata(org.apache.kafka.server.log.remote.storage.RemotePartitionDeleteMetadata)"},{"p":"org.apache.kafka.streams.processor","c":"StateStore","l":"query(Query, PositionBound, QueryConfig)","u":"query(org.apache.kafka.streams.query.Query,org.apache.kafka.streams.query.PositionBound,org.apache.kafka.streams.query.QueryConfig)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"query(StateQueryRequest)","u":"query(org.apache.kafka.streams.query.StateQueryRequest)"},{"p":"org.apache.kafka.streams.kstream","c":"GlobalKTable","l":"queryableStoreName()"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"queryableStoreName()"},{"p":"org.apache.kafka.streams","c":"StoreQueryParameters","l":"queryableStoreType()"},{"p":"org.apache.kafka.streams.state","c":"QueryableStoreTypes","l":"QueryableStoreTypes()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.query","c":"QueryConfig","l":"QueryConfig(boolean)","u":"%3Cinit%3E(boolean)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"queryMetadataForKey(String, K, Serializer)","u":"queryMetadataForKey(java.lang.String,K,org.apache.kafka.common.serialization.Serializer)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"queryMetadataForKey(String, K, StreamPartitioner)","u":"queryMetadataForKey(java.lang.String,K,org.apache.kafka.streams.processor.StreamPartitioner)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeMetadataQuorumResult","l":"quorumInfo()"},{"p":"org.apache.kafka.common.metrics","c":"MetricConfig","l":"quota()"},{"p":"org.apache.kafka.common.metrics","c":"Quota","l":"Quota(double, boolean)","u":"%3Cinit%3E(double,boolean)"},{"p":"org.apache.kafka.common.metrics","c":"MetricConfig","l":"quota(Quota)","u":"quota(org.apache.kafka.common.metrics.Quota)"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaCallback","l":"quotaLimit(ClientQuotaType, Map)","u":"quotaLimit(org.apache.kafka.server.quota.ClientQuotaType,java.util.Map)"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaCallback","l":"quotaMetricTags(ClientQuotaType, KafkaPrincipal, String)","u":"quotaMetricTags(org.apache.kafka.server.quota.ClientQuotaType,org.apache.kafka.common.security.auth.KafkaPrincipal,java.lang.String)"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaCallback","l":"quotaResetRequired(ClientQuotaType)","u":"quotaResetRequired(org.apache.kafka.server.quota.ClientQuotaType)"},{"p":"org.apache.kafka.common.metrics","c":"QuotaViolationException","l":"QuotaViolationException(KafkaMetric, double, double)","u":"%3Cinit%3E(org.apache.kafka.common.metrics.KafkaMetric,double,double)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_DOC"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"RACK_AWARE_ASSIGNMENT_STRATEGY_DOC"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"RACK_AWARE_ASSIGNMENT_STRATEGY_NONE"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"RACK_AWARE_ASSIGNMENT_TAGS_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_DOC"},{"p":"org.apache.kafka.common","c":"Node","l":"rack()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"AssignmentConfigs","l":"rackAwareAssignmentStrategy()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"AssignmentConfigs","l":"rackAwareAssignmentTags()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"AssignmentConfigs","l":"rackAwareNonOverlapCost()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"AssignmentConfigs","l":"rackAwareTrafficCost()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription","l":"rackId()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.Subscription","l":"rackId()"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"MemberSubscription","l":"rackId()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsState","l":"rackId()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskTopicPartition","l":"rackIds()"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"SubscribedTopicDescriber","l":"racksForPartition(Uuid, int)","u":"racksForPartition(org.apache.kafka.common.Uuid,int)"},{"p":"org.apache.kafka.clients.admin","c":"RaftVoterEndpoint","l":"RaftVoterEndpoint(String, String, int)","u":"%3Cinit%3E(java.lang.String,java.lang.String,int)"},{"p":"org.apache.kafka.connect.connector","c":"ConnectorContext","l":"raiseError(Exception)","u":"raiseError(java.lang.Exception)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"ProcessId","l":"randomProcessId()"},{"p":"org.apache.kafka.common","c":"Uuid","l":"randomUuid()"},{"p":"org.apache.kafka.clients.consumer","c":"RangeAssignor","l":"RANGE_ASSIGNOR_NAME"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlyKeyValueStore","l":"range(K, K)","u":"range(K,K)"},{"p":"org.apache.kafka.clients.consumer","c":"RangeAssignor","l":"RangeAssignor()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Rate","l":"Rate()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Rate","l":"Rate(SampledStat)","u":"%3Cinit%3E(org.apache.kafka.common.metrics.stats.SampledStat)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Rate","l":"Rate(TimeUnit)","u":"%3Cinit%3E(java.util.concurrent.TimeUnit)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Rate","l":"Rate(TimeUnit, SampledStat)","u":"%3Cinit%3E(java.util.concurrent.TimeUnit,org.apache.kafka.common.metrics.stats.SampledStat)"},{"p":"org.apache.kafka.streams.state","c":"StateSerdes","l":"rawKey(K)"},{"p":"org.apache.kafka.streams.state","c":"StateSerdes","l":"rawValue(V)"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"READ"},{"p":"org.apache.kafka.common","c":"IsolationLevel","l":"READ_COMMITTED"},{"p":"org.apache.kafka.common","c":"IsolationLevel","l":"READ_UNCOMMITTED"},{"p":"org.apache.kafka.streams","c":"TestOutputTopic","l":"readKeyValue()"},{"p":"org.apache.kafka.streams","c":"TestOutputTopic","l":"readKeyValuesToList()"},{"p":"org.apache.kafka.streams","c":"TestOutputTopic","l":"readKeyValuesToMap()"},{"p":"org.apache.kafka.streams","c":"TestOutputTopic","l":"readRecord()"},{"p":"org.apache.kafka.tools.api","c":"RecordReader","l":"readRecords(InputStream)","u":"readRecords(java.io.InputStream)"},{"p":"org.apache.kafka.streams","c":"TestOutputTopic","l":"readRecordsToList()"},{"p":"org.apache.kafka.streams","c":"TestOutputTopic","l":"readValue()"},{"p":"org.apache.kafka.streams","c":"TestOutputTopic","l":"readValuesToList()"},{"p":"org.apache.kafka.clients.admin","c":"RemoveMembersFromConsumerGroupOptions","l":"reason()"},{"p":"org.apache.kafka.clients.admin","c":"RemoveMembersFromConsumerGroupOptions","l":"reason(String)","u":"reason(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"ReassignmentInProgressException","l":"ReassignmentInProgressException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"ReassignmentInProgressException","l":"ReassignmentInProgressException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.clients.admin","c":"ListPartitionReassignmentsResult","l":"reassignments()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"rebalance(Collection)","u":"rebalance(java.util.Collection)"},{"p":"org.apache.kafka.common.errors","c":"RebalanceInProgressException","l":"RebalanceInProgressException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"RebalanceInProgressException","l":"RebalanceInProgressException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"RebalanceInProgressException","l":"RebalanceInProgressException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"RebalanceInProgressException","l":"RebalanceInProgressException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams.State","l":"REBALANCING"},{"p":"org.apache.kafka.common.errors","c":"RebootstrapRequiredException","l":"RebootstrapRequiredException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"RebootstrapRequiredException","l":"RebootstrapRequiredException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"RECEIVE_BUFFER_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"RECEIVE_BUFFER_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"RECEIVE_BUFFER_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"RECEIVE_BUFFER_CONFIG"},{"p":"org.apache.kafka.common.config","c":"ConfigValue","l":"recommendedValues()"},{"p":"org.apache.kafka.common.config","c":"ConfigValue","l":"recommendedValues(List)","u":"recommendedValues(java.util.List)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ConfigKey","l":"recommender"},{"p":"org.apache.kafka.common","c":"ConsumerGroupState","l":"RECONCILING"},{"p":"org.apache.kafka.common","c":"GroupState","l":"RECONCILING"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"RECONFIGURABLE_CONFIGS"},{"p":"org.apache.kafka.common.metrics","c":"JmxReporter","l":"RECONFIGURABLE_CONFIGS"},{"p":"org.apache.kafka.common.metrics","c":"JmxReporter","l":"reconfigurableConfigs()"},{"p":"org.apache.kafka.common.metrics","c":"MetricsReporter","l":"reconfigurableConfigs()"},{"p":"org.apache.kafka.common","c":"Reconfigurable","l":"reconfigurableConfigs()"},{"p":"org.apache.kafka.common.security.auth","c":"SslEngineFactory","l":"reconfigurableConfigs()"},{"p":"org.apache.kafka.common.metrics","c":"JmxReporter","l":"reconfigure(Map)","u":"reconfigure(java.util.Map)"},{"p":"org.apache.kafka.common.metrics","c":"MetricsReporter","l":"reconfigure(Map)","u":"reconfigure(java.util.Map)"},{"p":"org.apache.kafka.common","c":"Reconfigurable","l":"reconfigure(Map)","u":"reconfigure(java.util.Map)"},{"p":"org.apache.kafka.connect.connector","c":"Connector","l":"reconfigure(Map)","u":"reconfigure(java.util.Map)"},{"p":"org.apache.kafka.connect.tools","c":"MockSinkConnector","l":"reconfigure(Map)","u":"reconfigure(java.util.Map)"},{"p":"org.apache.kafka.connect.tools","c":"MockSourceConnector","l":"reconfigure(Map)","u":"reconfigure(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"RECONNECT_BACKOFF_MAX_MS_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"RECONNECT_BACKOFF_MAX_MS_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"RECONNECT_BACKOFF_MAX_MS_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"RECONNECT_BACKOFF_MAX_MS_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"RECONNECT_BACKOFF_MS_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"RECONNECT_BACKOFF_MS_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"RECONNECT_BACKOFF_MS_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"RECONNECT_BACKOFF_MS_CONFIG"},{"p":"org.apache.kafka.common.metrics","c":"Sensor","l":"record()"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext.CapturedForward","l":"record()"},{"p":"org.apache.kafka.common.metrics","c":"Sensor","l":"record(double)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Histogram","l":"record(double)"},{"p":"org.apache.kafka.common.metrics","c":"Sensor","l":"record(double, long)","u":"record(double,long)"},{"p":"org.apache.kafka.common.metrics","c":"Sensor","l":"record(double, long, boolean)","u":"record(double,long,boolean)"},{"p":"org.apache.kafka.streams.processor.api","c":"Record","l":"Record(K, V, long)","u":"%3Cinit%3E(K,V,long)"},{"p":"org.apache.kafka.streams.processor.api","c":"Record","l":"Record(K, V, long, Headers)","u":"%3Cinit%3E(K,V,long,org.apache.kafka.common.header.Headers)"},{"p":"org.apache.kafka.common.metrics","c":"Stat","l":"record(MetricConfig, double, long)","u":"record(org.apache.kafka.common.metrics.MetricConfig,double,long)"},{"p":"org.apache.kafka.common.metrics.stats","c":"CumulativeCount","l":"record(MetricConfig, double, long)","u":"record(org.apache.kafka.common.metrics.MetricConfig,double,long)"},{"p":"org.apache.kafka.common.metrics.stats","c":"CumulativeSum","l":"record(MetricConfig, double, long)","u":"record(org.apache.kafka.common.metrics.MetricConfig,double,long)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Meter","l":"record(MetricConfig, double, long)","u":"record(org.apache.kafka.common.metrics.MetricConfig,double,long)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Rate","l":"record(MetricConfig, double, long)","u":"record(org.apache.kafka.common.metrics.MetricConfig,double,long)"},{"p":"org.apache.kafka.common.metrics.stats","c":"SampledStat","l":"record(MetricConfig, double, long)","u":"record(org.apache.kafka.common.metrics.MetricConfig,double,long)"},{"p":"org.apache.kafka.common.metrics.stats","c":"TokenBucket","l":"record(MetricConfig, double, long)","u":"record(org.apache.kafka.common.metrics.MetricConfig,double,long)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Value","l":"record(MetricConfig, double, long)","u":"record(org.apache.kafka.common.metrics.MetricConfig,double,long)"},{"p":"org.apache.kafka.common.errors","c":"RecordBatchTooLargeException","l":"RecordBatchTooLargeException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"RecordBatchTooLargeException","l":"RecordBatchTooLargeException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"RecordBatchTooLargeException","l":"RecordBatchTooLargeException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"RecordBatchTooLargeException","l":"RecordBatchTooLargeException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"recordCollector()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"recordCollector()"},{"p":"org.apache.kafka.common.errors","c":"RecordDeserializationException","l":"RecordDeserializationException(RecordDeserializationException.DeserializationExceptionOrigin, TopicPartition, long, long, TimestampType, ByteBuffer, ByteBuffer, Headers, String, Throwable)","u":"%3Cinit%3E(org.apache.kafka.common.errors.RecordDeserializationException.DeserializationExceptionOrigin,org.apache.kafka.common.TopicPartition,long,long,org.apache.kafka.common.record.TimestampType,java.nio.ByteBuffer,java.nio.ByteBuffer,org.apache.kafka.common.header.Headers,java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"RecordDeserializationException","l":"RecordDeserializationException(TopicPartition, long, String, Throwable)","u":"%3Cinit%3E(org.apache.kafka.common.TopicPartition,long,java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.metrics","c":"MetricConfig","l":"recordLevel()"},{"p":"org.apache.kafka.common.metrics","c":"MetricConfig","l":"recordLevel(Sensor.RecordingLevel)","u":"recordLevel(org.apache.kafka.common.metrics.Sensor.RecordingLevel)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"recordMetadata()"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessingContext","l":"recordMetadata()"},{"p":"org.apache.kafka.streams.processor","c":"StateStoreContext","l":"recordMetadata()"},{"p":"org.apache.kafka.clients.producer","c":"RecordMetadata","l":"RecordMetadata(TopicPartition, long, int, long, int, int)","u":"%3Cinit%3E(org.apache.kafka.common.TopicPartition,long,int,long,int,int)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecords","l":"records(String)","u":"records(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecords","l":"records(TopicPartition)","u":"records(org.apache.kafka.common.TopicPartition)"},{"p":"org.apache.kafka.common.errors","c":"RecordTooLargeException","l":"RecordTooLargeException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"RecordTooLargeException","l":"RecordTooLargeException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"RecordTooLargeException","l":"RecordTooLargeException(String, Map)","u":"%3Cinit%3E(java.lang.String,java.util.Map)"},{"p":"org.apache.kafka.common.errors","c":"RecordTooLargeException","l":"RecordTooLargeException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"RecordTooLargeException","l":"RecordTooLargeException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"RecordTooLargeException","l":"recordTooLargePartitions()"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedStream","l":"reduce(Reducer)","u":"reduce(org.apache.kafka.streams.kstream.Reducer)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedKStream","l":"reduce(Reducer)","u":"reduce(org.apache.kafka.streams.kstream.Reducer)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedKStream","l":"reduce(Reducer)","u":"reduce(org.apache.kafka.streams.kstream.Reducer)"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedStream","l":"reduce(Reducer, Materialized>)","u":"reduce(org.apache.kafka.streams.kstream.Reducer,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedKStream","l":"reduce(Reducer, Materialized>)","u":"reduce(org.apache.kafka.streams.kstream.Reducer,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedKStream","l":"reduce(Reducer, Materialized>)","u":"reduce(org.apache.kafka.streams.kstream.Reducer,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedKStream","l":"reduce(Reducer, Named)","u":"reduce(org.apache.kafka.streams.kstream.Reducer,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedKStream","l":"reduce(Reducer, Named)","u":"reduce(org.apache.kafka.streams.kstream.Reducer,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedStream","l":"reduce(Reducer, Named, Materialized>)","u":"reduce(org.apache.kafka.streams.kstream.Reducer,org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedKStream","l":"reduce(Reducer, Named, Materialized>)","u":"reduce(org.apache.kafka.streams.kstream.Reducer,org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedKStream","l":"reduce(Reducer, Named, Materialized>)","u":"reduce(org.apache.kafka.streams.kstream.Reducer,org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedTable","l":"reduce(Reducer, Reducer)","u":"reduce(org.apache.kafka.streams.kstream.Reducer,org.apache.kafka.streams.kstream.Reducer)"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedTable","l":"reduce(Reducer, Reducer, Materialized>)","u":"reduce(org.apache.kafka.streams.kstream.Reducer,org.apache.kafka.streams.kstream.Reducer,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedTable","l":"reduce(Reducer, Reducer, Named, Materialized>)","u":"reduce(org.apache.kafka.streams.kstream.Reducer,org.apache.kafka.streams.kstream.Reducer,org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig.InternalConfig","l":"REFERENCE_CONTAINER_PARTITION_ASSIGNOR"},{"p":"org.apache.kafka.common.errors","c":"RefreshRetriableException","l":"RefreshRetriableException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"RefreshRetriableException","l":"RefreshRetriableException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"RefreshRetriableException","l":"RefreshRetriableException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"RefreshRetriableException","l":"RefreshRetriableException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.connect.rest","c":"ConnectRestExtension","l":"register(ConnectRestExtensionContext)","u":"register(org.apache.kafka.connect.rest.ConnectRestExtensionContext)"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"register(StateStore, StateRestoreCallback)","u":"register(org.apache.kafka.streams.processor.StateStore,org.apache.kafka.streams.processor.StateRestoreCallback)"},{"p":"org.apache.kafka.streams.processor","c":"ProcessorContext","l":"register(StateStore, StateRestoreCallback)","u":"register(org.apache.kafka.streams.processor.StateStore,org.apache.kafka.streams.processor.StateRestoreCallback)"},{"p":"org.apache.kafka.streams.processor","c":"StateStoreContext","l":"register(StateStore, StateRestoreCallback)","u":"register(org.apache.kafka.streams.processor.StateStore,org.apache.kafka.streams.processor.StateRestoreCallback)"},{"p":"org.apache.kafka.streams.processor","c":"StateStoreContext","l":"register(StateStore, StateRestoreCallback, CommitCallback)","u":"register(org.apache.kafka.streams.processor.StateStore,org.apache.kafka.streams.processor.StateRestoreCallback,org.apache.kafka.streams.processor.CommitCallback)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"registerMetricForSubscription(KafkaMetric)","u":"registerMetricForSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"registerMetricForSubscription(KafkaMetric)","u":"registerMetricForSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"registerMetricForSubscription(KafkaMetric)","u":"registerMetricForSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"registerMetricForSubscription(KafkaMetric)","u":"registerMetricForSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"registerMetricForSubscription(KafkaMetric)","u":"registerMetricForSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaShareConsumer","l":"registerMetricForSubscription(KafkaMetric)","u":"registerMetricForSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"registerMetricForSubscription(KafkaMetric)","u":"registerMetricForSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.consumer","c":"MockShareConsumer","l":"registerMetricForSubscription(KafkaMetric)","u":"registerMetricForSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.consumer","c":"ShareConsumer","l":"registerMetricForSubscription(KafkaMetric)","u":"registerMetricForSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.producer","c":"KafkaProducer","l":"registerMetricForSubscription(KafkaMetric)","u":"registerMetricForSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"registerMetricForSubscription(KafkaMetric)","u":"registerMetricForSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.producer","c":"Producer","l":"registerMetricForSubscription(KafkaMetric)","u":"registerMetricForSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.consumer","c":"AcknowledgeType","l":"REJECT"},{"p":"org.apache.kafka.clients.consumer","c":"AcknowledgeType","l":"RELEASE"},{"p":"org.apache.kafka.clients.consumer","c":"CloseOptions.GroupMembershipOperation","l":"REMAIN_IN_GROUP"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"REMOTE_COPY_BYTES_PER_SEC_METRIC"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"REMOTE_COPY_LAG_BYTES_METRIC"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"REMOTE_COPY_LAG_SEGMENTS_METRIC"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"REMOTE_COPY_REQUESTS_PER_SEC_METRIC"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"REMOTE_DELETE_LAG_BYTES_METRIC"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"REMOTE_DELETE_LAG_SEGMENTS_METRIC"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"REMOTE_DELETE_REQUESTS_PER_SEC_METRIC"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"REMOTE_FETCH_BYTES_PER_SEC_METRIC"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"REMOTE_FETCH_REQUESTS_PER_SEC_METRIC"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"REMOTE_LOG_COPY_DISABLE_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"REMOTE_LOG_COPY_DISABLE_DOC"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"REMOTE_LOG_DELETE_ON_DISABLE_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"REMOTE_LOG_DELETE_ON_DISABLE_DOC"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"REMOTE_LOG_MANAGER_TASKS_AVG_IDLE_PERCENT_METRIC"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"REMOTE_LOG_METADATA_COUNT_METRIC"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"REMOTE_LOG_READER_AVG_IDLE_PERCENT_METRIC"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"REMOTE_LOG_READER_FETCH_RATE_AND_TIME_METRIC"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"REMOTE_LOG_READER_TASK_QUEUE_SIZE_METRIC"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"REMOTE_LOG_SIZE_BYTES_METRIC"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"REMOTE_LOG_SIZE_COMPUTATION_TIME_METRIC"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"REMOTE_LOG_STORAGE_ENABLE_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"REMOTE_LOG_STORAGE_ENABLE_DOC"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"REMOTE_STORAGE_THREAD_POOL_METRICS"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClient","l":"remoteConsumerOffsets(String, String, Duration)","u":"remoteConsumerOffsets(java.lang.String,java.lang.String,java.time.Duration)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata","l":"remoteLogSegmentId()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadataUpdate","l":"remoteLogSegmentId()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentId","l":"RemoteLogSegmentId(TopicIdPartition, Uuid)","u":"%3Cinit%3E(org.apache.kafka.common.TopicIdPartition,org.apache.kafka.common.Uuid)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata","l":"RemoteLogSegmentMetadata(RemoteLogSegmentId, long, long, long, int, long, int, Map)","u":"%3Cinit%3E(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId,long,long,long,int,long,int,java.util.Map)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata","l":"RemoteLogSegmentMetadata(RemoteLogSegmentId, long, long, long, int, long, int, Map, boolean)","u":"%3Cinit%3E(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId,long,long,long,int,long,int,java.util.Map,boolean)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata","l":"RemoteLogSegmentMetadata(RemoteLogSegmentId, long, long, long, int, long, int, Optional, RemoteLogSegmentState, Map)","u":"%3Cinit%3E(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId,long,long,long,int,long,int,java.util.Optional,org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState,java.util.Map)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata","l":"RemoteLogSegmentMetadata(RemoteLogSegmentId, long, long, long, int, long, int, Optional, RemoteLogSegmentState, Map, boolean)","u":"%3Cinit%3E(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId,long,long,long,int,long,int,java.util.Optional,org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState,java.util.Map,boolean)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogMetadataManager","l":"remoteLogSegmentMetadata(TopicIdPartition, int, long)","u":"remoteLogSegmentMetadata(org.apache.kafka.common.TopicIdPartition,int,long)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadataUpdate","l":"RemoteLogSegmentMetadataUpdate(RemoteLogSegmentId, long, Optional, RemoteLogSegmentState, int)","u":"%3Cinit%3E(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId,long,java.util.Optional,org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState,int)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogMetadataManager","l":"remoteLogSize(TopicIdPartition, int)","u":"remoteLogSize(org.apache.kafka.common.TopicIdPartition,int)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemotePartitionDeleteMetadata","l":"RemotePartitionDeleteMetadata(TopicIdPartition, RemotePartitionDeleteState, long, int)","u":"%3Cinit%3E(org.apache.kafka.common.TopicIdPartition,org.apache.kafka.server.log.remote.storage.RemotePartitionDeleteState,long,int)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteResourceNotFoundException","l":"RemoteResourceNotFoundException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteResourceNotFoundException","l":"RemoteResourceNotFoundException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteResourceNotFoundException","l":"RemoteResourceNotFoundException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageException","l":"RemoteStorageException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageException","l":"RemoteStorageException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageException","l":"RemoteStorageException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageMetrics","l":"RemoteStorageMetrics()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClient","l":"remoteTopics()"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClient","l":"remoteTopics(String)","u":"remoteTopics(java.lang.String)"},{"p":"org.apache.kafka.common.header","c":"Headers","l":"remove(String)","u":"remove(java.lang.String)"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"remove(String)","u":"remove(java.lang.String)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"remove(String)","u":"remove(java.lang.String)"},{"p":"org.apache.kafka.streams.state","c":"SessionStore","l":"remove(Windowed)","u":"remove(org.apache.kafka.streams.kstream.Windowed)"},{"p":"org.apache.kafka.clients.admin","c":"RemoveMembersFromConsumerGroupOptions","l":"removeAll()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"removeMembersFromConsumerGroup(String, RemoveMembersFromConsumerGroupOptions)","u":"removeMembersFromConsumerGroup(java.lang.String,org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"removeMembersFromConsumerGroup(String, RemoveMembersFromConsumerGroupOptions)","u":"removeMembersFromConsumerGroup(java.lang.String,org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"removeMembersFromConsumerGroup(String, RemoveMembersFromConsumerGroupOptions)","u":"removeMembersFromConsumerGroup(java.lang.String,org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupOptions)"},{"p":"org.apache.kafka.clients.admin","c":"RemoveMembersFromConsumerGroupOptions","l":"RemoveMembersFromConsumerGroupOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"RemoveMembersFromConsumerGroupOptions","l":"RemoveMembersFromConsumerGroupOptions(Collection)","u":"%3Cinit%3E(java.util.Collection)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"removeMetric(MetricName)","u":"removeMetric(org.apache.kafka.common.MetricName)"},{"p":"org.apache.kafka.common.metrics","c":"PluginMetrics","l":"removeMetric(MetricName)","u":"removeMetric(org.apache.kafka.common.MetricName)"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaCallback","l":"removeQuota(ClientQuotaType, ClientQuotaEntity)","u":"removeQuota(org.apache.kafka.server.quota.ClientQuotaType,org.apache.kafka.server.quota.ClientQuotaEntity)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"removeRaftVoter(int, Uuid)","u":"removeRaftVoter(int,org.apache.kafka.common.Uuid)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"removeRaftVoter(int, Uuid, RemoveRaftVoterOptions)","u":"removeRaftVoter(int,org.apache.kafka.common.Uuid,org.apache.kafka.clients.admin.RemoveRaftVoterOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"removeRaftVoter(int, Uuid, RemoveRaftVoterOptions)","u":"removeRaftVoter(int,org.apache.kafka.common.Uuid,org.apache.kafka.clients.admin.RemoveRaftVoterOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"removeRaftVoter(int, Uuid, RemoveRaftVoterOptions)","u":"removeRaftVoter(int,org.apache.kafka.common.Uuid,org.apache.kafka.clients.admin.RemoveRaftVoterOptions)"},{"p":"org.apache.kafka.clients.admin","c":"RemoveRaftVoterOptions","l":"RemoveRaftVoterOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"removeReporter(MetricsReporter)","u":"removeReporter(org.apache.kafka.common.metrics.MetricsReporter)"},{"p":"org.apache.kafka.streams","c":"StreamsMetrics","l":"removeSensor(Sensor)","u":"removeSensor(org.apache.kafka.common.metrics.Sensor)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"removeSensor(String)","u":"removeSensor(java.lang.String)"},{"p":"org.apache.kafka.common.metrics","c":"PluginMetrics","l":"removeSensor(String)","u":"removeSensor(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"removeStreamThread()"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"removeStreamThread(Duration)","u":"removeStreamThread(java.time.Duration)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsAssignment","l":"removeTask(KafkaStreamsAssignment.AssignedTask)","u":"removeTask(org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment.AssignedTask)"},{"p":"org.apache.kafka.clients.admin","c":"PartitionReassignment","l":"removingReplicas()"},{"p":"org.apache.kafka.connect.header","c":"Header","l":"rename(String)","u":"rename(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"renewDelegationToken(byte[])"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"renewDelegationToken(byte[], RenewDelegationTokenOptions)","u":"renewDelegationToken(byte[],org.apache.kafka.clients.admin.RenewDelegationTokenOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"renewDelegationToken(byte[], RenewDelegationTokenOptions)","u":"renewDelegationToken(byte[],org.apache.kafka.clients.admin.RenewDelegationTokenOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"renewDelegationToken(byte[], RenewDelegationTokenOptions)","u":"renewDelegationToken(byte[],org.apache.kafka.clients.admin.RenewDelegationTokenOptions)"},{"p":"org.apache.kafka.clients.admin","c":"RenewDelegationTokenOptions","l":"RenewDelegationTokenOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"CreateDelegationTokenOptions","l":"renewers()"},{"p":"org.apache.kafka.common.security.token.delegation","c":"TokenInformation","l":"renewers()"},{"p":"org.apache.kafka.clients.admin","c":"CreateDelegationTokenOptions","l":"renewers(List)","u":"renewers(java.util.List)"},{"p":"org.apache.kafka.common.security.token.delegation","c":"TokenInformation","l":"renewersAsString()"},{"p":"org.apache.kafka.clients.admin","c":"RenewDelegationTokenOptions","l":"renewTimePeriodMs()"},{"p":"org.apache.kafka.clients.admin","c":"RenewDelegationTokenOptions","l":"renewTimePeriodMs(long)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"REPARTITION_PURGE_INTERVAL_MS_CONFIG"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"repartition()"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"repartition(Repartitioned)","u":"repartition(org.apache.kafka.streams.kstream.Repartitioned)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupSubtopologyDescription","l":"repartitionSinkTopics()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupSubtopologyDescription","l":"repartitionSourceTopics()"},{"p":"org.apache.kafka.streams.errors","c":"StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse","l":"REPLACE_THREAD"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo.ReplicaState","l":"replicaDirectoryId()"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo.ReplicaState","l":"replicaId()"},{"p":"org.apache.kafka.clients.admin","c":"ReplicaInfo","l":"ReplicaInfo(long, long, boolean)","u":"%3Cinit%3E(long,long,boolean)"},{"p":"org.apache.kafka.clients.admin","c":"LogDirDescription","l":"replicaInfos()"},{"p":"org.apache.kafka.common.errors","c":"ReplicaNotAvailableException","l":"ReplicaNotAvailableException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"ReplicaNotAvailableException","l":"ReplicaNotAvailableException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"ReplicaNotAvailableException","l":"ReplicaNotAvailableException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.clients.admin","c":"PartitionReassignment","l":"replicas()"},{"p":"org.apache.kafka.common","c":"PartitionInfo","l":"replicas()"},{"p":"org.apache.kafka.common","c":"TopicPartitionInfo","l":"replicas()"},{"p":"org.apache.kafka.clients.admin","c":"NewTopic","l":"replicasAssignments()"},{"p":"org.apache.kafka.server.policy","c":"CreateTopicPolicy.RequestMetadata","l":"replicasAssignments()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"REPLICATION_FACTOR_CONFIG"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClientConfig","l":"REPLICATION_POLICY_CLASS"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClientConfig","l":"REPLICATION_POLICY_CLASS_DEFAULT"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClientConfig","l":"REPLICATION_POLICY_SEPARATOR"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClientConfig","l":"REPLICATION_POLICY_SEPARATOR_DEFAULT"},{"p":"org.apache.kafka.clients.admin","c":"CreateTopicsResult.TopicMetadataAndConfig","l":"replicationFactor()"},{"p":"org.apache.kafka.clients.admin","c":"NewTopic","l":"replicationFactor()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupSubtopologyDescription.TopicInfo","l":"replicationFactor()"},{"p":"org.apache.kafka.server.policy","c":"CreateTopicPolicy.RequestMetadata","l":"replicationFactor()"},{"p":"org.apache.kafka.clients.admin","c":"CreateTopicsResult","l":"replicationFactor(String)","u":"replicationFactor(java.lang.String)"},{"p":"org.apache.kafka.connect.mirror","c":"RemoteClusterUtils","l":"replicationHops(Map, String)","u":"replicationHops(java.util.Map,java.lang.String)"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClient","l":"replicationHops(String)","u":"replicationHops(java.lang.String)"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClient","l":"replicationPolicy()"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClientConfig","l":"replicationPolicy()"},{"p":"org.apache.kafka.connect.sink","c":"ErrantRecordReporter","l":"report(SinkRecord, Throwable)","u":"report(org.apache.kafka.connect.sink.SinkRecord,java.lang.Throwable)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"reporters()"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaType","l":"REQUEST"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"REQUEST_TIMEOUT_MS_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"REQUEST_TIMEOUT_MS_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"REQUEST_TIMEOUT_MS_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"REQUEST_TIMEOUT_MS_CONFIG"},{"p":"org.apache.kafka.connect.sink","c":"SinkTaskContext","l":"requestCommit()"},{"p":"org.apache.kafka.common.config","c":"SslClientAuth","l":"REQUESTED"},{"p":"org.apache.kafka.server.policy","c":"AlterConfigPolicy.RequestMetadata","l":"RequestMetadata(ConfigResource, Map)","u":"%3Cinit%3E(org.apache.kafka.common.config.ConfigResource,java.util.Map)"},{"p":"org.apache.kafka.server.policy","c":"CreateTopicPolicy.RequestMetadata","l":"RequestMetadata(String, Integer, Short, Map>, Map)","u":"%3Cinit%3E(java.lang.String,java.lang.Integer,java.lang.Short,java.util.Map,java.util.Map)"},{"p":"org.apache.kafka.connect.connector","c":"ConnectorContext","l":"requestTaskReconfiguration()"},{"p":"org.apache.kafka.server.authorizer","c":"AuthorizableRequestContext","l":"requestType()"},{"p":"org.apache.kafka.server.authorizer","c":"AuthorizableRequestContext","l":"requestVersion()"},{"p":"org.apache.kafka.streams.query","c":"StateQueryRequest","l":"requireActive()"},{"p":"org.apache.kafka.common.config","c":"SslClientAuth","l":"REQUIRED"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"required()"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupOffsetsOptions","l":"requireStable()"},{"p":"org.apache.kafka.clients.admin","c":"ListStreamsGroupOffsetsOptions","l":"requireStable()"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupOffsetsOptions","l":"requireStable(boolean)"},{"p":"org.apache.kafka.clients.admin","c":"ListStreamsGroupOffsetsOptions","l":"requireStable(boolean)"},{"p":"org.apache.kafka.common","c":"Uuid","l":"RESERVED"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"resetCommit()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"resetCommit()"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"resetForwards()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"resetForwards()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"resetShouldRebalance()"},{"p":"org.apache.kafka.streams","c":"TopologyConfig","l":"resolveDslStoreSuppliers()"},{"p":"org.apache.kafka.common.errors","c":"DuplicateResourceException","l":"resource()"},{"p":"org.apache.kafka.common.errors","c":"ResourceNotFoundException","l":"resource()"},{"p":"org.apache.kafka.server.policy","c":"AlterConfigPolicy.RequestMetadata","l":"resource()"},{"p":"org.apache.kafka.common.resource","c":"Resource","l":"Resource(ResourceType, String)","u":"%3Cinit%3E(org.apache.kafka.common.resource.ResourceType,java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"ResourceNotFoundException","l":"ResourceNotFoundException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"ResourceNotFoundException","l":"ResourceNotFoundException(String, String)","u":"%3Cinit%3E(java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"ResourceNotFoundException","l":"ResourceNotFoundException(String, String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"ResourceNotFoundException","l":"ResourceNotFoundException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.server.authorizer","c":"Action","l":"resourcePattern()"},{"p":"org.apache.kafka.common.resource","c":"ResourcePattern","l":"ResourcePattern(ResourceType, String, PatternType)","u":"%3Cinit%3E(org.apache.kafka.common.resource.ResourceType,java.lang.String,org.apache.kafka.common.resource.PatternType)"},{"p":"org.apache.kafka.common.resource","c":"ResourcePatternFilter","l":"ResourcePatternFilter(ResourceType, String, PatternType)","u":"%3Cinit%3E(org.apache.kafka.common.resource.ResourceType,java.lang.String,org.apache.kafka.common.resource.PatternType)"},{"p":"org.apache.kafka.server.authorizer","c":"Action","l":"resourceReferenceCount()"},{"p":"org.apache.kafka.common.resource","c":"Resource","l":"resourceType()"},{"p":"org.apache.kafka.common.resource","c":"ResourcePattern","l":"resourceType()"},{"p":"org.apache.kafka.common.resource","c":"ResourcePatternFilter","l":"resourceType()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"RESTORE_CONSUMER_PREFIX"},{"p":"org.apache.kafka.streams.processor","c":"BatchingStateRestoreCallback","l":"restore(byte[], byte[])","u":"restore(byte[],byte[])"},{"p":"org.apache.kafka.streams.processor","c":"StateRestoreCallback","l":"restore(byte[], byte[])","u":"restore(byte[],byte[])"},{"p":"org.apache.kafka.streams.processor","c":"BatchingStateRestoreCallback","l":"restoreAll(Collection>)","u":"restoreAll(java.util.Collection)"},{"p":"org.apache.kafka.streams","c":"ThreadMetadata","l":"restoreConsumerClientId()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"restoreConsumerPrefix(String)","u":"restoreConsumerPrefix(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"TerminateTransactionResult","l":"result()"},{"p":"org.apache.kafka.streams.query","c":"MultiVersionedKeyQuery","l":"resultOrder()"},{"p":"org.apache.kafka.streams.query","c":"RangeQuery","l":"resultOrder()"},{"p":"org.apache.kafka.streams.query","c":"TimestampedRangeQuery","l":"resultOrder()"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"resume()"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"resume(Collection)","u":"resume(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"resume(Collection)","u":"resume(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"resume(Collection)","u":"resume(java.util.Collection)"},{"p":"org.apache.kafka.connect.sink","c":"SinkTaskContext","l":"resume(TopicPartition...)","u":"resume(org.apache.kafka.common.TopicPartition...)"},{"p":"org.apache.kafka.streams.state","c":"DslWindowParams","l":"retainDuplicates()"},{"p":"org.apache.kafka.streams.state","c":"WindowBytesStoreSupplier","l":"retainDuplicates()"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"retainLatest()"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"retainLatest()"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"retainLatest(String)","u":"retainLatest(java.lang.String)"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"retainLatest(String)","u":"retainLatest(java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"RETENTION_BYTES_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"RETENTION_BYTES_DOC"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"RETENTION_MS_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"RETENTION_MS_DOC"},{"p":"org.apache.kafka.streams.state","c":"DslSessionParams","l":"retentionPeriod()"},{"p":"org.apache.kafka.streams.state","c":"DslWindowParams","l":"retentionPeriod()"},{"p":"org.apache.kafka.streams.state","c":"SessionBytesStoreSupplier","l":"retentionPeriod()"},{"p":"org.apache.kafka.streams.state","c":"WindowBytesStoreSupplier","l":"retentionPeriod()"},{"p":"org.apache.kafka.clients.consumer","c":"RetriableCommitFailedException","l":"RetriableCommitFailedException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"RetriableCommitFailedException","l":"RetriableCommitFailedException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.clients.consumer","c":"RetriableCommitFailedException","l":"RetriableCommitFailedException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"RetriableException","l":"RetriableException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"RetriableException","l":"RetriableException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.connect.errors","c":"RetriableException","l":"RetriableException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"RetriableException","l":"RetriableException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.connect.errors","c":"RetriableException","l":"RetriableException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"RetriableException","l":"RetriableException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.connect.errors","c":"RetriableException","l":"RetriableException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"RETRIES_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"RETRIES_CONFIG"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"ClientCredentialsJwtRetriever","l":"retrieve()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"DefaultJwtRetriever","l":"retrieve()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"FileJwtRetriever","l":"retrieve()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"JwtBearerJwtRetriever","l":"retrieve()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"JwtRetriever","l":"retrieve()"},{"p":"org.apache.kafka.streams.errors","c":"ProductionExceptionHandler.ProductionExceptionHandlerResponse","l":"RETRY"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"RETRY_BACKOFF_MAX_MS_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"RETRY_BACKOFF_MAX_MS_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"RETRY_BACKOFF_MAX_MS_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"RETRY_BACKOFF_MS_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"RETRY_BACKOFF_MS_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"RETRY_BACKOFF_MS_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"RETRY_BACKOFF_MS_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"CreatePartitionsOptions","l":"retryOnQuotaViolation(boolean)"},{"p":"org.apache.kafka.clients.admin","c":"CreateTopicsOptions","l":"retryOnQuotaViolation(boolean)"},{"p":"org.apache.kafka.clients.admin","c":"DeleteTopicsOptions","l":"retryOnQuotaViolation(boolean)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"REUSE_KTABLE_SOURCE_TOPICS"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlyKeyValueStore","l":"reverseAll()"},{"p":"org.apache.kafka.streams.state","c":"ReadOnlyKeyValueStore","l":"reverseRange(K, K)","u":"reverseRange(K,K)"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized.StoreType","l":"ROCKS_DB"},{"p":"org.apache.kafka.streams.state","c":"BuiltInDslStoreSuppliers","l":"ROCKS_DB"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"ROCKS_DB"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"ROCKSDB_CONFIG_SETTER_CLASS_CONFIG"},{"p":"org.apache.kafka.streams.state","c":"BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers","l":"RocksDBDslStoreSuppliers()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.consumer","c":"RoundRobinAssignor","l":"ROUNDROBIN_ASSIGNOR_NAME"},{"p":"org.apache.kafka.clients.consumer","c":"RoundRobinAssignor","l":"RoundRobinAssignor()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.producer","c":"RoundRobinPartitioner","l":"RoundRobinPartitioner()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams","c":"KafkaStreams.State","l":"RUNNING"},{"p":"org.apache.kafka.clients.admin","c":"FeatureUpdate.UpgradeType","l":"SAFE_DOWNGRADE"},{"p":"org.apache.kafka.clients.admin","c":"UserScramCredentialUpsertion","l":"salt()"},{"p":"org.apache.kafka.common.security.scram","c":"ScramCredential","l":"salt()"},{"p":"org.apache.kafka.common.metrics.stats","c":"SampledStat","l":"SampledStat(double)","u":"%3Cinit%3E(double)"},{"p":"org.apache.kafka.common.metrics","c":"MetricConfig","l":"samples()"},{"p":"org.apache.kafka.common.metrics","c":"MetricConfig","l":"samples(int)"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_CLIENT_CALLBACK_HANDLER_CLASS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_CLIENT_CALLBACK_HANDLER_CLASS_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_JAAS_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_JAAS_CONFIG_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_KERBEROS_KINIT_CMD"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_KERBEROS_KINIT_CMD_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_KERBEROS_MIN_TIME_BEFORE_RELOGIN"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_KERBEROS_MIN_TIME_BEFORE_RELOGIN_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_KERBEROS_SERVICE_NAME"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_KERBEROS_SERVICE_NAME_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_KERBEROS_TICKET_RENEW_JITTER"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_KERBEROS_TICKET_RENEW_JITTER_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_KERBEROS_TICKET_RENEW_WINDOW_FACTOR"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_KERBEROS_TICKET_RENEW_WINDOW_FACTOR_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_LOGIN_CALLBACK_HANDLER_CLASS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_LOGIN_CALLBACK_HANDLER_CLASS_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_LOGIN_CLASS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_LOGIN_CLASS_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_LOGIN_CONNECT_TIMEOUT_MS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_LOGIN_CONNECT_TIMEOUT_MS_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_LOGIN_READ_TIMEOUT_MS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_LOGIN_READ_TIMEOUT_MS_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_LOGIN_REFRESH_BUFFER_SECONDS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_LOGIN_REFRESH_BUFFER_SECONDS_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_LOGIN_REFRESH_MIN_PERIOD_SECONDS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_LOGIN_REFRESH_MIN_PERIOD_SECONDS_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_LOGIN_REFRESH_WINDOW_FACTOR"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_LOGIN_REFRESH_WINDOW_FACTOR_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_LOGIN_REFRESH_WINDOW_JITTER"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_LOGIN_REFRESH_WINDOW_JITTER_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_LOGIN_RETRY_BACKOFF_MAX_MS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_LOGIN_RETRY_BACKOFF_MAX_MS_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_LOGIN_RETRY_BACKOFF_MS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_LOGIN_RETRY_BACKOFF_MS_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_MECHANISM"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_MECHANISM_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_ALGORITHM"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_ALGORITHM_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_FILE"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_FILE_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_EXPECTED_AUDIENCE"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_EXPECTED_AUDIENCE_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_EXPECTED_ISSUER"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_EXPECTED_ISSUER_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_HEADER_URLENCODE"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_HEADER_URLENCODE_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_JWKS_ENDPOINT_URL"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_JWKS_ENDPOINT_URL_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_SCOPE"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_SCOPE_CLAIM_NAME"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_SCOPE_CLAIM_NAME_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_SCOPE_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_SUB_CLAIM_NAME"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_SUB_CLAIM_NAME_DOC"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL_DOC"},{"p":"org.apache.kafka.common.security.auth","c":"SecurityProtocol","l":"SASL_PLAINTEXT"},{"p":"org.apache.kafka.common.security.auth","c":"SecurityProtocol","l":"SASL_SSL"},{"p":"org.apache.kafka.common.security.auth","c":"SaslAuthenticationContext","l":"SaslAuthenticationContext(SaslServer, SecurityProtocol, InetAddress, String)","u":"%3Cinit%3E(javax.security.sasl.SaslServer,org.apache.kafka.common.security.auth.SecurityProtocol,java.net.InetAddress,java.lang.String)"},{"p":"org.apache.kafka.common.security.auth","c":"SaslAuthenticationContext","l":"SaslAuthenticationContext(SaslServer, SecurityProtocol, InetAddress, String, Optional)","u":"%3Cinit%3E(javax.security.sasl.SaslServer,org.apache.kafka.common.security.auth.SecurityProtocol,java.net.InetAddress,java.lang.String,java.util.Optional)"},{"p":"org.apache.kafka.common.errors","c":"SaslAuthenticationException","l":"SaslAuthenticationException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"SaslAuthenticationException","l":"SaslAuthenticationException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.config","c":"SaslConfigs","l":"SaslConfigs()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.security.auth","c":"SaslExtensions","l":"SaslExtensions(Map)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.common.security.auth","c":"SaslExtensionsCallback","l":"SaslExtensionsCallback()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.data","c":"Decimal","l":"SCALE_FIELD"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"schedule(Duration, PunctuationType, Punctuator)","u":"schedule(java.time.Duration,org.apache.kafka.streams.processor.PunctuationType,org.apache.kafka.streams.processor.Punctuator)"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessingContext","l":"schedule(Duration, PunctuationType, Punctuator)","u":"schedule(java.time.Duration,org.apache.kafka.streams.processor.PunctuationType,org.apache.kafka.streams.processor.Punctuator)"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"schedule(Duration, PunctuationType, Punctuator)","u":"schedule(java.time.Duration,org.apache.kafka.streams.processor.PunctuationType,org.apache.kafka.streams.processor.Punctuator)"},{"p":"org.apache.kafka.streams.processor","c":"ProcessorContext","l":"schedule(Duration, PunctuationType, Punctuator)","u":"schedule(java.time.Duration,org.apache.kafka.streams.processor.PunctuationType,org.apache.kafka.streams.processor.Punctuator)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"scheduledPunctuators()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"scheduledPunctuators()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"scheduleNopPollTask()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"schedulePollTask(Runnable)","u":"schedulePollTask(java.lang.Runnable)"},{"p":"org.apache.kafka.connect.data","c":"Date","l":"SCHEMA"},{"p":"org.apache.kafka.connect.data","c":"Time","l":"SCHEMA"},{"p":"org.apache.kafka.connect.data","c":"Timestamp","l":"SCHEMA"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"schema()"},{"p":"org.apache.kafka.connect.data","c":"Field","l":"schema()"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"schema()"},{"p":"org.apache.kafka.connect.data","c":"SchemaAndValue","l":"schema()"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"schema()"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"schema()"},{"p":"org.apache.kafka.connect.header","c":"Header","l":"schema()"},{"p":"org.apache.kafka.connect.data","c":"Decimal","l":"schema(int)"},{"p":"org.apache.kafka.connect.data","c":"SchemaAndValue","l":"SchemaAndValue(Schema, Object)","u":"%3Cinit%3E(org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"SchemaBuilder(Schema.Type)","u":"%3Cinit%3E(org.apache.kafka.connect.data.Schema.Type)"},{"p":"org.apache.kafka.connect.errors","c":"SchemaBuilderException","l":"SchemaBuilderException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.connect.errors","c":"SchemaBuilderException","l":"SchemaBuilderException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.connect.errors","c":"SchemaBuilderException","l":"SchemaBuilderException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.connect.data","c":"SchemaProjector","l":"SchemaProjector()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.errors","c":"SchemaProjectorException","l":"SchemaProjectorException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.connect.errors","c":"SchemaProjectorException","l":"SchemaProjectorException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.connect.errors","c":"SchemaProjectorException","l":"SchemaProjectorException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.connect.tools","c":"SchemaSourceConnector","l":"SchemaSourceConnector()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.tools","c":"SchemaSourceTask","l":"SchemaSourceTask()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"schemaType(Class)","u":"schemaType(java.lang.Class)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerLoginCallbackHandler","l":"SCOPE_CONFIG"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerLoginCallbackHandler","l":"SCOPE_DOC"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerToken","l":"scope()"},{"p":"org.apache.kafka.clients.admin","c":"ScramMechanism","l":"SCRAM_SHA_256"},{"p":"org.apache.kafka.clients.admin","c":"ScramMechanism","l":"SCRAM_SHA_512"},{"p":"org.apache.kafka.common.security.scram","c":"ScramCredentialCallback","l":"scramCredential()"},{"p":"org.apache.kafka.common.security.scram","c":"ScramCredential","l":"ScramCredential(byte[], byte[], byte[], int)","u":"%3Cinit%3E(byte[],byte[],byte[],int)"},{"p":"org.apache.kafka.common.security.scram","c":"ScramCredentialCallback","l":"scramCredential(ScramCredential)","u":"scramCredential(org.apache.kafka.common.security.scram.ScramCredential)"},{"p":"org.apache.kafka.common.security.scram","c":"ScramCredentialCallback","l":"ScramCredentialCallback()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"ScramCredentialInfo","l":"ScramCredentialInfo(ScramMechanism, int)","u":"%3Cinit%3E(org.apache.kafka.clients.admin.ScramMechanism,int)"},{"p":"org.apache.kafka.common.security.scram","c":"ScramExtensionsCallback","l":"ScramExtensionsCallback()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.security.scram","c":"ScramLoginModule","l":"ScramLoginModule()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"SECURITY_PROTOCOL_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"SECURITY_PROTOCOL_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"SECURITY_PROVIDERS_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"SECURITY_PROVIDERS_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"SECURITY_PROVIDERS_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SecurityConfig","l":"SECURITY_PROVIDERS_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SecurityConfig","l":"SECURITY_PROVIDERS_DOC"},{"p":"org.apache.kafka.common.config","c":"SecurityConfig","l":"SecurityConfig()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"SecurityDisabledException","l":"SecurityDisabledException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"SecurityDisabledException","l":"SecurityDisabledException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common","c":"Endpoint","l":"securityProtocol()"},{"p":"org.apache.kafka.common.security.auth","c":"AuthenticationContext","l":"securityProtocol()"},{"p":"org.apache.kafka.common.security.auth","c":"PlaintextAuthenticationContext","l":"securityProtocol()"},{"p":"org.apache.kafka.common.security.auth","c":"SaslAuthenticationContext","l":"securityProtocol()"},{"p":"org.apache.kafka.common.security.auth","c":"SslAuthenticationContext","l":"securityProtocol()"},{"p":"org.apache.kafka.server.authorizer","c":"AuthorizableRequestContext","l":"securityProtocol()"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"seek(TopicPartition, long)","u":"seek(org.apache.kafka.common.TopicPartition,long)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"seek(TopicPartition, long)","u":"seek(org.apache.kafka.common.TopicPartition,long)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"seek(TopicPartition, long)","u":"seek(org.apache.kafka.common.TopicPartition,long)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"seek(TopicPartition, OffsetAndMetadata)","u":"seek(org.apache.kafka.common.TopicPartition,org.apache.kafka.clients.consumer.OffsetAndMetadata)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"seek(TopicPartition, OffsetAndMetadata)","u":"seek(org.apache.kafka.common.TopicPartition,org.apache.kafka.clients.consumer.OffsetAndMetadata)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"seek(TopicPartition, OffsetAndMetadata)","u":"seek(org.apache.kafka.common.TopicPartition,org.apache.kafka.clients.consumer.OffsetAndMetadata)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"seekToBeginning(Collection)","u":"seekToBeginning(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"seekToBeginning(Collection)","u":"seekToBeginning(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"seekToBeginning(Collection)","u":"seekToBeginning(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"seekToEnd(Collection)","u":"seekToEnd(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"seekToEnd(Collection)","u":"seekToEnd(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"seekToEnd(Collection)","u":"seekToEnd(java.util.Collection)"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"SEGMENT_BYTES_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"SEGMENT_BYTES_DOC"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"SEGMENT_INDEX_BYTES_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"SEGMENT_INDEX_BYTES_DOC"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"SEGMENT_JITTER_MS_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"SEGMENT_JITTER_MS_DOC"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"SEGMENT_MS_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"SEGMENT_MS_DOC"},{"p":"org.apache.kafka.streams.state","c":"SessionBytesStoreSupplier","l":"segmentIntervalMs()"},{"p":"org.apache.kafka.streams.state","c":"WindowBytesStoreSupplier","l":"segmentIntervalMs()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata","l":"segmentLeaderEpochs()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata","l":"segmentSizeInBytes()"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"selectKey(KeyValueMapper)","u":"selectKey(org.apache.kafka.streams.kstream.KeyValueMapper)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"selectKey(KeyValueMapper, Named)","u":"selectKey(org.apache.kafka.streams.kstream.KeyValueMapper,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"SEND_BUFFER_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"SEND_BUFFER_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"SEND_BUFFER_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"SEND_BUFFER_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"KafkaProducer","l":"send(ProducerRecord)","u":"send(org.apache.kafka.clients.producer.ProducerRecord)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"send(ProducerRecord)","u":"send(org.apache.kafka.clients.producer.ProducerRecord)"},{"p":"org.apache.kafka.clients.producer","c":"Producer","l":"send(ProducerRecord)","u":"send(org.apache.kafka.clients.producer.ProducerRecord)"},{"p":"org.apache.kafka.clients.producer","c":"KafkaProducer","l":"send(ProducerRecord, Callback)","u":"send(org.apache.kafka.clients.producer.ProducerRecord,org.apache.kafka.clients.producer.Callback)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"send(ProducerRecord, Callback)","u":"send(org.apache.kafka.clients.producer.ProducerRecord,org.apache.kafka.clients.producer.Callback)"},{"p":"org.apache.kafka.clients.producer","c":"Producer","l":"send(ProducerRecord, Callback)","u":"send(org.apache.kafka.clients.producer.ProducerRecord,org.apache.kafka.clients.producer.Callback)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"sendException"},{"p":"org.apache.kafka.clients.producer","c":"KafkaProducer","l":"sendOffsetsToTransaction(Map, ConsumerGroupMetadata)","u":"sendOffsetsToTransaction(java.util.Map,org.apache.kafka.clients.consumer.ConsumerGroupMetadata)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"sendOffsetsToTransaction(Map, ConsumerGroupMetadata)","u":"sendOffsetsToTransaction(java.util.Map,org.apache.kafka.clients.consumer.ConsumerGroupMetadata)"},{"p":"org.apache.kafka.clients.producer","c":"Producer","l":"sendOffsetsToTransaction(Map, ConsumerGroupMetadata)","u":"sendOffsetsToTransaction(java.util.Map,org.apache.kafka.clients.consumer.ConsumerGroupMetadata)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"sendOffsetsToTransactionException"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"sensor(String)","u":"sensor(java.lang.String)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"sensor(String, MetricConfig, long, Sensor...)","u":"sensor(java.lang.String,org.apache.kafka.common.metrics.MetricConfig,long,org.apache.kafka.common.metrics.Sensor...)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"sensor(String, MetricConfig, long, Sensor.RecordingLevel, Sensor...)","u":"sensor(java.lang.String,org.apache.kafka.common.metrics.MetricConfig,long,org.apache.kafka.common.metrics.Sensor.RecordingLevel,org.apache.kafka.common.metrics.Sensor...)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"sensor(String, MetricConfig, Sensor...)","u":"sensor(java.lang.String,org.apache.kafka.common.metrics.MetricConfig,org.apache.kafka.common.metrics.Sensor...)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"sensor(String, MetricConfig, Sensor.RecordingLevel, Sensor...)","u":"sensor(java.lang.String,org.apache.kafka.common.metrics.MetricConfig,org.apache.kafka.common.metrics.Sensor.RecordingLevel,org.apache.kafka.common.metrics.Sensor...)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"sensor(String, Sensor...)","u":"sensor(java.lang.String,org.apache.kafka.common.metrics.Sensor...)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"sensor(String, Sensor.RecordingLevel)","u":"sensor(java.lang.String,org.apache.kafka.common.metrics.Sensor.RecordingLevel)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"sensor(String, Sensor.RecordingLevel, Sensor...)","u":"sensor(java.lang.String,org.apache.kafka.common.metrics.Sensor.RecordingLevel,org.apache.kafka.common.metrics.Sensor...)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"sentOffsets()"},{"p":"org.apache.kafka.connect.mirror","c":"DefaultReplicationPolicy","l":"SEPARATOR_CONFIG"},{"p":"org.apache.kafka.connect.mirror","c":"DefaultReplicationPolicy","l":"SEPARATOR_DEFAULT"},{"p":"org.apache.kafka.common.serialization","c":"Serdes","l":"serdeFrom(Class)","u":"serdeFrom(java.lang.Class)"},{"p":"org.apache.kafka.common.serialization","c":"Serdes","l":"serdeFrom(Serializer, Deserializer)","u":"serdeFrom(org.apache.kafka.common.serialization.Serializer,org.apache.kafka.common.serialization.Deserializer)"},{"p":"org.apache.kafka.common.serialization","c":"Serdes","l":"Serdes()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"SerializationException","l":"SerializationException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"SerializationException","l":"SerializationException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"SerializationException","l":"SerializationException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"SerializationException","l":"SerializationException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.common.security.auth","c":"KafkaPrincipalSerde","l":"serialize(KafkaPrincipal)","u":"serialize(org.apache.kafka.common.security.auth.KafkaPrincipal)"},{"p":"org.apache.kafka.common.serialization","c":"BooleanSerializer","l":"serialize(String, Boolean)","u":"serialize(java.lang.String,java.lang.Boolean)"},{"p":"org.apache.kafka.common.serialization","c":"ByteArraySerializer","l":"serialize(String, byte[])","u":"serialize(java.lang.String,byte[])"},{"p":"org.apache.kafka.common.serialization","c":"ByteBufferSerializer","l":"serialize(String, ByteBuffer)","u":"serialize(java.lang.String,java.nio.ByteBuffer)"},{"p":"org.apache.kafka.common.serialization","c":"BytesSerializer","l":"serialize(String, Bytes)","u":"serialize(java.lang.String,org.apache.kafka.common.utils.Bytes)"},{"p":"org.apache.kafka.common.serialization","c":"DoubleSerializer","l":"serialize(String, Double)","u":"serialize(java.lang.String,java.lang.Double)"},{"p":"org.apache.kafka.common.serialization","c":"FloatSerializer","l":"serialize(String, Float)","u":"serialize(java.lang.String,java.lang.Float)"},{"p":"org.apache.kafka.common.serialization","c":"Serializer","l":"serialize(String, Headers, T)","u":"serialize(java.lang.String,org.apache.kafka.common.header.Headers,T)"},{"p":"org.apache.kafka.common.serialization","c":"IntegerSerializer","l":"serialize(String, Integer)","u":"serialize(java.lang.String,java.lang.Integer)"},{"p":"org.apache.kafka.common.serialization","c":"ListSerializer","l":"serialize(String, List)","u":"serialize(java.lang.String,java.util.List)"},{"p":"org.apache.kafka.common.serialization","c":"LongSerializer","l":"serialize(String, Long)","u":"serialize(java.lang.String,java.lang.Long)"},{"p":"org.apache.kafka.common.serialization","c":"ShortSerializer","l":"serialize(String, Short)","u":"serialize(java.lang.String,java.lang.Short)"},{"p":"org.apache.kafka.common.serialization","c":"StringSerializer","l":"serialize(String, String)","u":"serialize(java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.common.serialization","c":"Serializer","l":"serialize(String, T)","u":"serialize(java.lang.String,T)"},{"p":"org.apache.kafka.common.serialization","c":"UUIDSerializer","l":"serialize(String, UUID)","u":"serialize(java.lang.String,java.util.UUID)"},{"p":"org.apache.kafka.common.serialization","c":"VoidSerializer","l":"serialize(String, Void)","u":"serialize(java.lang.String,java.lang.Void)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedSerializer","l":"serialize(String, Windowed)","u":"serialize(java.lang.String,org.apache.kafka.streams.kstream.Windowed)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedSerializer","l":"serialize(String, Windowed)","u":"serialize(java.lang.String,org.apache.kafka.streams.kstream.Windowed)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedSerializer","l":"serializeBaseKey(String, Windowed)","u":"serializeBaseKey(java.lang.String,org.apache.kafka.streams.kstream.Windowed)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedSerializer","l":"serializeBaseKey(String, Windowed)","u":"serializeBaseKey(java.lang.String,org.apache.kafka.streams.kstream.Windowed)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecord","l":"serializedKeySize()"},{"p":"org.apache.kafka.clients.producer","c":"RecordMetadata","l":"serializedKeySize()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecord","l":"serializedValueSize()"},{"p":"org.apache.kafka.clients.producer","c":"RecordMetadata","l":"serializedValueSize()"},{"p":"org.apache.kafka.common.serialization","c":"Serde","l":"serializer()"},{"p":"org.apache.kafka.common.serialization","c":"Serdes.WrapperSerde","l":"serializer()"},{"p":"org.apache.kafka.common.security.auth","c":"SaslAuthenticationContext","l":"server()"},{"p":"org.apache.kafka.common.security.scram","c":"ScramCredential","l":"serverKey()"},{"p":"org.apache.kafka.common.security.auth","c":"Login","l":"serviceName()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"SESSION_TIMEOUT_MS_CONFIG"},{"p":"org.apache.kafka.common.security.auth","c":"SslAuthenticationContext","l":"session()"},{"p":"org.apache.kafka.streams.state","c":"QueryableStoreTypes","l":"sessionStore()"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized.StoreType","l":"sessionStore(DslSessionParams)","u":"sessionStore(org.apache.kafka.streams.state.DslSessionParams)"},{"p":"org.apache.kafka.streams.state","c":"BuiltInDslStoreSuppliers.InMemoryDslStoreSuppliers","l":"sessionStore(DslSessionParams)","u":"sessionStore(org.apache.kafka.streams.state.DslSessionParams)"},{"p":"org.apache.kafka.streams.state","c":"BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers","l":"sessionStore(DslSessionParams)","u":"sessionStore(org.apache.kafka.streams.state.DslSessionParams)"},{"p":"org.apache.kafka.streams.state","c":"DslStoreSuppliers","l":"sessionStore(DslSessionParams)","u":"sessionStore(org.apache.kafka.streams.state.DslSessionParams)"},{"p":"org.apache.kafka.streams.state","c":"Stores","l":"sessionStoreBuilder(SessionBytesStoreSupplier, Serde, Serde)","u":"sessionStoreBuilder(org.apache.kafka.streams.state.SessionBytesStoreSupplier,org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedDeserializer","l":"SessionWindowedDeserializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedDeserializer","l":"SessionWindowedDeserializer(Deserializer)","u":"%3Cinit%3E(org.apache.kafka.common.serialization.Deserializer)"},{"p":"org.apache.kafka.streams.kstream","c":"WindowedSerdes.SessionWindowedSerde","l":"SessionWindowedSerde()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.kstream","c":"WindowedSerdes.SessionWindowedSerde","l":"SessionWindowedSerde(Serde)","u":"%3Cinit%3E(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"WindowedSerdes","l":"sessionWindowedSerdeFrom(Class)","u":"sessionWindowedSerdeFrom(java.lang.Class)"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedSerializer","l":"SessionWindowedSerializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedSerializer","l":"SessionWindowedSerializer(Serializer)","u":"%3Cinit%3E(org.apache.kafka.common.serialization.Serializer)"},{"p":"org.apache.kafka.clients.admin","c":"AlterConfigOp.OpType","l":"SET"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaShareConsumer","l":"setAcknowledgementCommitCallback(AcknowledgementCommitCallback)","u":"setAcknowledgementCommitCallback(org.apache.kafka.clients.consumer.AcknowledgementCommitCallback)"},{"p":"org.apache.kafka.clients.consumer","c":"MockShareConsumer","l":"setAcknowledgementCommitCallback(AcknowledgementCommitCallback)","u":"setAcknowledgementCommitCallback(org.apache.kafka.clients.consumer.AcknowledgementCommitCallback)"},{"p":"org.apache.kafka.clients.consumer","c":"ShareConsumer","l":"setAcknowledgementCommitCallback(AcknowledgementCommitCallback)","u":"setAcknowledgementCommitCallback(org.apache.kafka.clients.consumer.AcknowledgementCommitCallback)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"setClientInstanceId(Uuid)","u":"setClientInstanceId(org.apache.kafka.common.Uuid)"},{"p":"org.apache.kafka.clients.consumer","c":"MockShareConsumer","l":"setClientInstanceId(Uuid)","u":"setClientInstanceId(org.apache.kafka.common.Uuid)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"setClientInstanceId(Uuid)","u":"setClientInstanceId(org.apache.kafka.common.Uuid)"},{"p":"org.apache.kafka.clients.admin","c":"AddRaftVoterOptions","l":"setClusterId(Optional)","u":"setClusterId(java.util.Optional)"},{"p":"org.apache.kafka.clients.admin","c":"RemoveRaftVoterOptions","l":"setClusterId(Optional)","u":"setClusterId(java.util.Optional)"},{"p":"org.apache.kafka.streams.state","c":"RocksDBConfigSetter","l":"setConfig(String, Options, Map)","u":"setConfig(java.lang.String,org.rocksdb.Options,java.util.Map)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"setCurrentStreamTimeMs(long)"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"setCurrentStreamTimeMs(long)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"setCurrentSystemTimeMs(long)"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"setCurrentSystemTimeMs(long)"},{"p":"org.apache.kafka.common.security.token.delegation","c":"TokenInformation","l":"setExpiryTimestamp(long)"},{"p":"org.apache.kafka.streams.query","c":"StateQueryResult","l":"setGlobalResult(QueryResult)","u":"setGlobalResult(org.apache.kafka.streams.query.QueryResult)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"setGlobalStateRestoreListener(StateRestoreListener)","u":"setGlobalStateRestoreListener(org.apache.kafka.streams.processor.StateRestoreListener)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.Subscription","l":"setGroupInstanceId(Optional)","u":"setGroupInstanceId(java.util.Optional)"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"setHeaders(Headers)","u":"setHeaders(org.apache.kafka.common.header.Headers)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedDeserializer","l":"setIsChangelogTopic(boolean)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"setMaxPollRecords(long)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"setMockMetrics(MetricName, Metric)","u":"setMockMetrics(org.apache.kafka.common.MetricName,org.apache.kafka.common.Metric)"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"setOffset(long)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"setOffsetsException(KafkaException)","u":"setOffsetsException(org.apache.kafka.common.KafkaException)"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"setPartition(int)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"setPollException(KafkaException)","u":"setPollException(org.apache.kafka.common.KafkaException)"},{"p":"org.apache.kafka.streams.query","c":"QueryResult","l":"setPosition(Position)","u":"setPosition(org.apache.kafka.streams.query.Position)"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"setRecordMetadata(String, int, long)","u":"setRecordMetadata(java.lang.String,int,long)"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"setRecordMetadata(String, int, long, Headers, long)","u":"setRecordMetadata(java.lang.String,int,long,org.apache.kafka.common.header.Headers,long)"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"setRecordTimestamp(long)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"setStandbyUpdateListener(StandbyUpdateListener)","u":"setStandbyUpdateListener(org.apache.kafka.streams.processor.StandbyUpdateListener)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"setStateListener(KafkaStreams.StateListener)","u":"setStateListener(org.apache.kafka.streams.KafkaStreams.StateListener)"},{"p":"org.apache.kafka.streams.errors","c":"StreamsException","l":"setTaskId(TaskId)","u":"setTaskId(org.apache.kafka.streams.processor.TaskId)"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"setTopic(String)","u":"setTopic(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"setUncaughtExceptionHandler(StreamsUncaughtExceptionHandler)","u":"setUncaughtExceptionHandler(org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler)"},{"p":"org.apache.kafka.common","c":"GroupType","l":"SHARE"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"SHARE_ACKNOWLEDGEMENT_MODE_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"ShareGroupDescription","l":"ShareGroupDescription(String, Collection, GroupState, Node, int, int)","u":"%3Cinit%3E(java.lang.String,java.util.Collection,org.apache.kafka.common.GroupState,org.apache.kafka.common.Node,int,int)"},{"p":"org.apache.kafka.clients.admin","c":"ShareGroupDescription","l":"ShareGroupDescription(String, Collection, GroupState, Node, int, int, Set)","u":"%3Cinit%3E(java.lang.String,java.util.Collection,org.apache.kafka.common.GroupState,org.apache.kafka.common.Node,int,int,java.util.Set)"},{"p":"org.apache.kafka.clients.admin","c":"ShareMemberAssignment","l":"ShareMemberAssignment(Set)","u":"%3Cinit%3E(java.util.Set)"},{"p":"org.apache.kafka.clients.admin","c":"ShareMemberDescription","l":"ShareMemberDescription(String, String, String, ShareMemberAssignment, int)","u":"%3Cinit%3E(java.lang.String,java.lang.String,java.lang.String,org.apache.kafka.clients.admin.ShareMemberAssignment,int)"},{"p":"org.apache.kafka.common.errors","c":"ShareSessionLimitReachedException","l":"ShareSessionLimitReachedException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"ShareSessionNotFoundException","l":"ShareSessionNotFoundException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigType","l":"SHORT"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Type","l":"SHORT"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Width","l":"SHORT"},{"p":"org.apache.kafka.common.serialization","c":"Serdes","l":"Short()"},{"p":"org.apache.kafka.common.serialization","c":"ShortDeserializer","l":"ShortDeserializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"Serdes.ShortSerde","l":"ShortSerde()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"ShortSerializer","l":"ShortSerializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.security.auth","c":"SslEngineFactory","l":"shouldBeRebuilt(Map)","u":"shouldBeRebuilt(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"ListTopicsOptions","l":"shouldListInternal()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"shouldRebalance()"},{"p":"org.apache.kafka.common.metrics","c":"Sensor","l":"shouldRecord()"},{"p":"org.apache.kafka.common.metrics","c":"Sensor.RecordingLevel","l":"shouldRecord(int)"},{"p":"org.apache.kafka.clients.admin","c":"CreatePartitionsOptions","l":"shouldRetryOnQuotaViolation()"},{"p":"org.apache.kafka.clients.admin","c":"CreateTopicsOptions","l":"shouldRetryOnQuotaViolation()"},{"p":"org.apache.kafka.clients.admin","c":"DeleteTopicsOptions","l":"shouldRetryOnQuotaViolation()"},{"p":"org.apache.kafka.clients.admin","c":"AlterConfigsOptions","l":"shouldValidateOnly()"},{"p":"org.apache.kafka.clients.admin","c":"CreateTopicsOptions","l":"shouldValidateOnly()"},{"p":"org.apache.kafka.streams.errors","c":"StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse","l":"SHUTDOWN_APPLICATION"},{"p":"org.apache.kafka.streams.errors","c":"StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse","l":"SHUTDOWN_CLIENT"},{"p":"org.apache.kafka.streams.kstream","c":"Suppressed.BufferConfig","l":"shutDownWhenFull()"},{"p":"org.apache.kafka.connect.storage","c":"SimpleHeaderConverter","l":"SimpleHeaderConverter()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.metrics.stats","c":"SimpleRate","l":"SimpleRate()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"SINGLE_STORE_SELF_JOIN"},{"p":"org.apache.kafka.connect.health","c":"ConnectorType","l":"SINK"},{"p":"org.apache.kafka.connect.sink","c":"SinkConnector","l":"SinkConnector()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.sink","c":"SinkRecord","l":"SinkRecord(String, int, Schema, Object, Schema, Object, long)","u":"%3Cinit%3E(java.lang.String,int,org.apache.kafka.connect.data.Schema,java.lang.Object,org.apache.kafka.connect.data.Schema,java.lang.Object,long)"},{"p":"org.apache.kafka.connect.sink","c":"SinkRecord","l":"SinkRecord(String, int, Schema, Object, Schema, Object, long, Long, TimestampType)","u":"%3Cinit%3E(java.lang.String,int,org.apache.kafka.connect.data.Schema,java.lang.Object,org.apache.kafka.connect.data.Schema,java.lang.Object,long,java.lang.Long,org.apache.kafka.common.record.TimestampType)"},{"p":"org.apache.kafka.connect.sink","c":"SinkRecord","l":"SinkRecord(String, int, Schema, Object, Schema, Object, long, Long, TimestampType, Iterable
)","u":"%3Cinit%3E(java.lang.String,int,org.apache.kafka.connect.data.Schema,java.lang.Object,org.apache.kafka.connect.data.Schema,java.lang.Object,long,java.lang.Long,org.apache.kafka.common.record.TimestampType,java.lang.Iterable)"},{"p":"org.apache.kafka.connect.sink","c":"SinkRecord","l":"SinkRecord(String, int, Schema, Object, Schema, Object, long, Long, TimestampType, Iterable
, String, Integer, long)","u":"%3Cinit%3E(java.lang.String,int,org.apache.kafka.connect.data.Schema,java.lang.Object,org.apache.kafka.connect.data.Schema,java.lang.Object,long,java.lang.Long,org.apache.kafka.common.record.TimestampType,java.lang.Iterable,java.lang.String,java.lang.Integer,long)"},{"p":"org.apache.kafka.connect.sink","c":"SinkTask","l":"SinkTask()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"ReplicaInfo","l":"size()"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"size()"},{"p":"org.apache.kafka.connect.header","c":"Headers","l":"size()"},{"p":"org.apache.kafka.streams.kstream","c":"JoinWindows","l":"size()"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindows","l":"size()"},{"p":"org.apache.kafka.streams.kstream","c":"UnlimitedWindows","l":"size()"},{"p":"org.apache.kafka.streams.kstream","c":"Windows","l":"size()"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindows","l":"sizeMs"},{"p":"org.apache.kafka.streams.query","c":"KeyQuery","l":"skipCache()"},{"p":"org.apache.kafka.streams.query","c":"TimestampedKeyQuery","l":"skipCache()"},{"p":"org.apache.kafka.common.errors","c":"SnapshotNotFoundException","l":"SnapshotNotFoundException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"SnapshotNotFoundException","l":"SnapshotNotFoundException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"AdminClientConfig","l":"SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG"},{"p":"org.apache.kafka.connect.health","c":"ConnectorType","l":"SOURCE"},{"p":"org.apache.kafka.connect.mirror","c":"IdentityReplicationPolicy","l":"SOURCE_CLUSTER_ALIAS_CONFIG"},{"p":"org.apache.kafka.connect.mirror","c":"Heartbeat","l":"SOURCE_CLUSTER_ALIAS_KEY"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigSynonym","l":"source()"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry","l":"source()"},{"p":"org.apache.kafka.connect.mirror","c":"SourceAndTarget","l":"source()"},{"p":"org.apache.kafka.streams","c":"TopologyDescription.GlobalStore","l":"source()"},{"p":"org.apache.kafka.connect.mirror","c":"SourceAndTarget","l":"SourceAndTarget(String, String)","u":"%3Cinit%3E(java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.connect.mirror","c":"Heartbeat","l":"sourceClusterAlias()"},{"p":"org.apache.kafka.connect.source","c":"SourceConnector","l":"SourceConnector()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.source","c":"SourceRecord","l":"sourceOffset()"},{"p":"org.apache.kafka.connect.source","c":"SourceRecord","l":"sourcePartition()"},{"p":"org.apache.kafka.streams.errors","c":"ErrorHandlerContext","l":"sourceRawKey()"},{"p":"org.apache.kafka.streams.processor","c":"RecordContext","l":"sourceRawKey()"},{"p":"org.apache.kafka.streams.errors","c":"ErrorHandlerContext","l":"sourceRawValue()"},{"p":"org.apache.kafka.streams.processor","c":"RecordContext","l":"sourceRawValue()"},{"p":"org.apache.kafka.connect.source","c":"SourceRecord","l":"SourceRecord(Map, Map, String, Integer, Schema, Object)","u":"%3Cinit%3E(java.util.Map,java.util.Map,java.lang.String,java.lang.Integer,org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.source","c":"SourceRecord","l":"SourceRecord(Map, Map, String, Integer, Schema, Object, Schema, Object)","u":"%3Cinit%3E(java.util.Map,java.util.Map,java.lang.String,java.lang.Integer,org.apache.kafka.connect.data.Schema,java.lang.Object,org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.source","c":"SourceRecord","l":"SourceRecord(Map, Map, String, Integer, Schema, Object, Schema, Object, Long)","u":"%3Cinit%3E(java.util.Map,java.util.Map,java.lang.String,java.lang.Integer,org.apache.kafka.connect.data.Schema,java.lang.Object,org.apache.kafka.connect.data.Schema,java.lang.Object,java.lang.Long)"},{"p":"org.apache.kafka.connect.source","c":"SourceRecord","l":"SourceRecord(Map, Map, String, Integer, Schema, Object, Schema, Object, Long, Iterable
)","u":"%3Cinit%3E(java.util.Map,java.util.Map,java.lang.String,java.lang.Integer,org.apache.kafka.connect.data.Schema,java.lang.Object,org.apache.kafka.connect.data.Schema,java.lang.Object,java.lang.Long,java.lang.Iterable)"},{"p":"org.apache.kafka.connect.source","c":"SourceRecord","l":"SourceRecord(Map, Map, String, Schema, Object)","u":"%3Cinit%3E(java.util.Map,java.util.Map,java.lang.String,org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.source","c":"SourceRecord","l":"SourceRecord(Map, Map, String, Schema, Object, Schema, Object)","u":"%3Cinit%3E(java.util.Map,java.util.Map,java.lang.String,org.apache.kafka.connect.data.Schema,java.lang.Object,org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.source","c":"SourceTask","l":"SourceTask()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupSubtopologyDescription","l":"sourceTopics()"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"split()"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"split(Named)","u":"split(org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.common.security.auth","c":"SecurityProtocol","l":"SSL"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_CIPHER_SUITES_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_CIPHER_SUITES_DOC"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_ENABLED_PROTOCOLS_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_ENABLED_PROTOCOLS_DOC"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_DOC"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_ENGINE_FACTORY_CLASS_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_ENGINE_FACTORY_CLASS_DOC"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_KEY_PASSWORD_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_KEY_PASSWORD_DOC"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_KEYMANAGER_ALGORITHM_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_KEYMANAGER_ALGORITHM_DOC"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_KEYSTORE_CERTIFICATE_CHAIN_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_KEYSTORE_CERTIFICATE_CHAIN_DOC"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_KEYSTORE_KEY_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_KEYSTORE_KEY_DOC"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_KEYSTORE_LOCATION_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_KEYSTORE_LOCATION_DOC"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_KEYSTORE_PASSWORD_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_KEYSTORE_PASSWORD_DOC"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_KEYSTORE_TYPE_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_KEYSTORE_TYPE_DOC"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_PROTOCOL_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_PROTOCOL_DOC"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_PROVIDER_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_PROVIDER_DOC"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_SECURE_RANDOM_IMPLEMENTATION_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_SECURE_RANDOM_IMPLEMENTATION_DOC"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_TRUSTMANAGER_ALGORITHM_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_TRUSTMANAGER_ALGORITHM_DOC"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_TRUSTSTORE_CERTIFICATES_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_TRUSTSTORE_CERTIFICATES_DOC"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_TRUSTSTORE_LOCATION_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_TRUSTSTORE_LOCATION_DOC"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_TRUSTSTORE_PASSWORD_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_TRUSTSTORE_PASSWORD_DOC"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_TRUSTSTORE_TYPE_CONFIG"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SSL_TRUSTSTORE_TYPE_DOC"},{"p":"org.apache.kafka.common.security.auth","c":"SslAuthenticationContext","l":"SslAuthenticationContext(SSLSession, InetAddress, String)","u":"%3Cinit%3E(javax.net.ssl.SSLSession,java.net.InetAddress,java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"SslAuthenticationException","l":"SslAuthenticationException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"SslAuthenticationException","l":"SslAuthenticationException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.config","c":"SslConfigs","l":"SslConfigs()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.security.auth","c":"SaslAuthenticationContext","l":"sslSession()"},{"p":"org.apache.kafka.common","c":"ClassicGroupState","l":"STABLE"},{"p":"org.apache.kafka.common","c":"ConsumerGroupState","l":"STABLE"},{"p":"org.apache.kafka.common","c":"GroupState","l":"STABLE"},{"p":"org.apache.kafka.common.errors","c":"StaleBrokerEpochException","l":"StaleBrokerEpochException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"StaleBrokerEpochException","l":"StaleBrokerEpochException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"StaleMemberEpochException","l":"StaleMemberEpochException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"StoreQueryParameters","l":"staleStoresEnabled()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsAssignment.AssignedTask.Type","l":"STANDBY"},{"p":"org.apache.kafka.streams","c":"KeyQueryMetadata","l":"standbyHosts()"},{"p":"org.apache.kafka.streams","c":"StreamsMetadata","l":"standbyStateStoreNames()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberAssignment","l":"standbyTasks()"},{"p":"org.apache.kafka.streams","c":"ThreadMetadata","l":"standbyTasks()"},{"p":"org.apache.kafka.streams","c":"StreamsMetadata","l":"standbyTopicPartitions()"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"start()"},{"p":"org.apache.kafka.streams.kstream","c":"Window","l":"start()"},{"p":"org.apache.kafka.server.authorizer","c":"Authorizer","l":"start(AuthorizerServerInfo)","u":"start(org.apache.kafka.server.authorizer.AuthorizerServerInfo)"},{"p":"org.apache.kafka.connect.connector","c":"Connector","l":"start(Map)","u":"start(java.util.Map)"},{"p":"org.apache.kafka.connect.connector","c":"Task","l":"start(Map)","u":"start(java.util.Map)"},{"p":"org.apache.kafka.connect.sink","c":"SinkTask","l":"start(Map)","u":"start(java.util.Map)"},{"p":"org.apache.kafka.connect.source","c":"SourceTask","l":"start(Map)","u":"start(java.util.Map)"},{"p":"org.apache.kafka.connect.tools","c":"MockConnector","l":"start(Map)","u":"start(java.util.Map)"},{"p":"org.apache.kafka.connect.tools","c":"MockSinkConnector","l":"start(Map)","u":"start(java.util.Map)"},{"p":"org.apache.kafka.connect.tools","c":"MockSinkTask","l":"start(Map)","u":"start(java.util.Map)"},{"p":"org.apache.kafka.connect.tools","c":"MockSourceConnector","l":"start(Map)","u":"start(java.util.Map)"},{"p":"org.apache.kafka.connect.tools","c":"MockSourceTask","l":"start(Map)","u":"start(java.util.Map)"},{"p":"org.apache.kafka.connect.tools","c":"SchemaSourceConnector","l":"start(Map)","u":"start(java.util.Map)"},{"p":"org.apache.kafka.connect.tools","c":"SchemaSourceTask","l":"start(Map)","u":"start(java.util.Map)"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSinkConnector","l":"start(Map)","u":"start(java.util.Map)"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSinkTask","l":"start(Map)","u":"start(java.util.Map)"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSourceConnector","l":"start(Map)","u":"start(java.util.Map)"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSourceTask","l":"start(Map)","u":"start(java.util.Map)"},{"p":"org.apache.kafka.streams.kstream","c":"UnlimitedWindows","l":"startMs"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata","l":"startOffset()"},{"p":"org.apache.kafka.streams.kstream","c":"UnlimitedWindows","l":"startOn(Instant)","u":"startOn(java.time.Instant)"},{"p":"org.apache.kafka.streams.kstream","c":"Window","l":"startTime()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerToken","l":"startTimeMs()"},{"p":"org.apache.kafka.common.metrics","c":"CompoundStat.NamedMeasurable","l":"stat()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"STATE_CLEANUP_DELAY_MS_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"STATE_DIR_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig.InternalConfig","l":"STATE_UPDATER_ENABLED"},{"p":"org.apache.kafka.clients.admin","c":"ClassicGroupDescription","l":"state()"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupDescription","l":"state()"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupListing","l":"state()"},{"p":"org.apache.kafka.clients.admin","c":"TransactionDescription","l":"state()"},{"p":"org.apache.kafka.clients.admin","c":"TransactionListing","l":"state()"},{"p":"org.apache.kafka.connect.health","c":"AbstractState","l":"state()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata","l":"state()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadataUpdate","l":"state()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemotePartitionDeleteMetadata","l":"state()"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"state()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupSubtopologyDescription","l":"stateChangelogTopics()"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"stateDir()"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessingContext","l":"stateDir()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"stateDir()"},{"p":"org.apache.kafka.streams.processor","c":"ProcessorContext","l":"stateDir()"},{"p":"org.apache.kafka.streams.processor","c":"StateStoreContext","l":"stateDir()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsState","l":"statefulTasksToLagSums()"},{"p":"org.apache.kafka.streams.query","c":"StateQueryResult","l":"StateQueryResult()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupsOptions","l":"states()"},{"p":"org.apache.kafka.streams.state","c":"StateSerdes","l":"StateSerdes(String, Serde, Serde)","u":"%3Cinit%3E(java.lang.String,org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"STATESTORE_CACHE_MAX_BYTES_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"STATESTORE_CACHE_MAX_BYTES_DOC"},{"p":"org.apache.kafka.streams.errors","c":"StateStoreMigratedException","l":"StateStoreMigratedException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"StateStoreMigratedException","l":"StateStoreMigratedException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskInfo","l":"stateStoreNames()"},{"p":"org.apache.kafka.streams","c":"StreamsMetadata","l":"stateStoreNames()"},{"p":"org.apache.kafka.streams.errors","c":"StateStoreNotAvailableException","l":"StateStoreNotAvailableException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"StateStoreNotAvailableException","l":"StateStoreNotAvailableException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig.InternalConfig","l":"stateUpdaterEnabled(Map)","u":"stateUpdaterEnabled(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigSource","l":"STATIC_BROKER_CONFIG"},{"p":"org.apache.kafka.common.metrics","c":"CompoundStat","l":"stats()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Frequencies","l":"stats()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Meter","l":"stats()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Percentiles","l":"stats()"},{"p":"org.apache.kafka.clients.consumer","c":"StickyAssignor","l":"STICKY_ASSIGNOR_NAME"},{"p":"org.apache.kafka.clients.consumer","c":"StickyAssignor","l":"StickyAssignor()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.processor.assignment.assignors","c":"StickyTaskAssignor","l":"StickyTaskAssignor()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.processor.assignment.assignors","c":"StickyTaskAssignor","l":"StickyTaskAssignor(boolean)","u":"%3Cinit%3E(boolean)"},{"p":"org.apache.kafka.connect.connector","c":"Connector","l":"stop()"},{"p":"org.apache.kafka.connect.connector","c":"Task","l":"stop()"},{"p":"org.apache.kafka.connect.sink","c":"SinkTask","l":"stop()"},{"p":"org.apache.kafka.connect.source","c":"SourceTask","l":"stop()"},{"p":"org.apache.kafka.connect.tools","c":"MockConnector","l":"stop()"},{"p":"org.apache.kafka.connect.tools","c":"MockSinkConnector","l":"stop()"},{"p":"org.apache.kafka.connect.tools","c":"MockSinkTask","l":"stop()"},{"p":"org.apache.kafka.connect.tools","c":"MockSourceConnector","l":"stop()"},{"p":"org.apache.kafka.connect.tools","c":"MockSourceTask","l":"stop()"},{"p":"org.apache.kafka.connect.tools","c":"SchemaSourceConnector","l":"stop()"},{"p":"org.apache.kafka.connect.tools","c":"SchemaSourceTask","l":"stop()"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSinkConnector","l":"stop()"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSinkTask","l":"stop()"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSourceConnector","l":"stop()"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSourceTask","l":"stop()"},{"p":"org.apache.kafka.streams.query","c":"FailureReason","l":"STORE_EXCEPTION"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"store(StoreQueryParameters)","u":"store(org.apache.kafka.streams.StoreQueryParameters)"},{"p":"org.apache.kafka.common.security.scram","c":"ScramCredential","l":"storedKey()"},{"p":"org.apache.kafka.streams","c":"StoreQueryParameters","l":"storeName()"},{"p":"org.apache.kafka.streams.processor","c":"ConnectedStoreProvider","l":"stores()"},{"p":"org.apache.kafka.streams","c":"TopologyDescription.Processor","l":"stores()"},{"p":"org.apache.kafka.streams.state","c":"Stores","l":"Stores()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams","c":"TopologyConfig","l":"storeType"},{"p":"org.apache.kafka.streams.processor","c":"PunctuationType","l":"STREAM_TIME"},{"p":"org.apache.kafka.streams","c":"StreamsBuilder","l":"stream(Collection)","u":"stream(java.util.Collection)"},{"p":"org.apache.kafka.streams","c":"StreamsBuilder","l":"stream(Collection, Consumed)","u":"stream(java.util.Collection,org.apache.kafka.streams.kstream.Consumed)"},{"p":"org.apache.kafka.streams","c":"StreamsBuilder","l":"stream(Pattern)","u":"stream(java.util.regex.Pattern)"},{"p":"org.apache.kafka.streams","c":"StreamsBuilder","l":"stream(Pattern, Consumed)","u":"stream(java.util.regex.Pattern,org.apache.kafka.streams.kstream.Consumed)"},{"p":"org.apache.kafka.streams","c":"StreamsBuilder","l":"stream(String)","u":"stream(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"StreamsBuilder","l":"stream(String, Consumed)","u":"stream(java.lang.String,org.apache.kafka.streams.kstream.Consumed)"},{"p":"org.apache.kafka.streams.kstream","c":"Produced","l":"streamPartitioner(StreamPartitioner)","u":"streamPartitioner(org.apache.kafka.streams.processor.StreamPartitioner)"},{"p":"org.apache.kafka.streams.kstream","c":"Repartitioned","l":"streamPartitioner(StreamPartitioner)","u":"streamPartitioner(org.apache.kafka.streams.processor.StreamPartitioner)"},{"p":"org.apache.kafka.common","c":"GroupType","l":"STREAMS"},{"p":"org.apache.kafka.streams","c":"GroupProtocol","l":"STREAMS"},{"p":"org.apache.kafka.streams","c":"StreamsBuilder","l":"StreamsBuilder()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams","c":"StreamsBuilder","l":"StreamsBuilder(TopologyConfig)","u":"%3Cinit%3E(org.apache.kafka.streams.TopologyConfig)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"StreamsConfig(Map)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.streams.errors","c":"StreamsException","l":"StreamsException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"StreamsException","l":"StreamsException(String, TaskId)","u":"%3Cinit%3E(java.lang.String,org.apache.kafka.streams.processor.TaskId)"},{"p":"org.apache.kafka.streams.errors","c":"StreamsException","l":"StreamsException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.streams.errors","c":"StreamsException","l":"StreamsException(String, Throwable, TaskId)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable,org.apache.kafka.streams.processor.TaskId)"},{"p":"org.apache.kafka.streams.errors","c":"StreamsException","l":"StreamsException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.streams.errors","c":"StreamsException","l":"StreamsException(Throwable, TaskId)","u":"%3Cinit%3E(java.lang.Throwable,org.apache.kafka.streams.processor.TaskId)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupDescription","l":"StreamsGroupDescription(String, int, int, int, Collection, Collection, GroupState, Node, Set)","u":"%3Cinit%3E(java.lang.String,int,int,int,java.util.Collection,java.util.Collection,org.apache.kafka.common.GroupState,org.apache.kafka.common.Node,java.util.Set)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberAssignment","l":"StreamsGroupMemberAssignment(List, List, List)","u":"%3Cinit%3E(java.util.List,java.util.List,java.util.List)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription","l":"StreamsGroupMemberDescription(String, int, Optional, Optional, String, String, int, String, Optional, Map, List, List, StreamsGroupMemberAssignment, StreamsGroupMemberAssignment, boolean)","u":"%3Cinit%3E(java.lang.String,int,java.util.Optional,java.util.Optional,java.lang.String,java.lang.String,int,java.lang.String,java.util.Optional,java.util.Map,java.util.List,java.util.List,org.apache.kafka.clients.admin.StreamsGroupMemberAssignment,org.apache.kafka.clients.admin.StreamsGroupMemberAssignment,boolean)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupSubtopologyDescription","l":"StreamsGroupSubtopologyDescription(String, List, List, Map, Map)","u":"%3Cinit%3E(java.lang.String,java.util.List,java.util.List,java.util.Map,java.util.Map)"},{"p":"org.apache.kafka.common.errors","c":"StreamsInvalidTopologyEpochException","l":"StreamsInvalidTopologyEpochException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"StreamsInvalidTopologyException","l":"StreamsInvalidTopologyException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams","l":"streamsMetadataForStore(String)","u":"streamsMetadataForStore(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"StreamsNotStartedException","l":"StreamsNotStartedException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"StreamsNotStartedException","l":"StreamsNotStartedException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.streams.errors","c":"StreamsRebalancingException","l":"StreamsRebalancingException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"StreamsRebalancingException","l":"StreamsRebalancingException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.streams.errors","c":"StreamsStoppedException","l":"StreamsStoppedException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"StreamsStoppedException","l":"StreamsStoppedException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"StreamsTopologyFencedException","l":"StreamsTopologyFencedException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaFilter","l":"strict()"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigType","l":"STRING"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Type","l":"STRING"},{"p":"org.apache.kafka.connect.data","c":"Schema.Type","l":"STRING"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"STRING_SCHEMA"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"string()"},{"p":"org.apache.kafka.common.serialization","c":"Serdes","l":"String()"},{"p":"org.apache.kafka.connect.storage","c":"StringConverter","l":"StringConverter()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.storage","c":"StringConverterConfig","l":"StringConverterConfig(Map)","u":"%3Cinit%3E(java.util.Map)"},{"p":"org.apache.kafka.tools.api","c":"StringDecoder","l":"StringDecoder()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"StringDeserializer","l":"StringDeserializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"Serdes.StringSerde","l":"StringSerde()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"StringSerializer","l":"StringSerializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.data","c":"Schema.Type","l":"STRUCT"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"struct()"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"Struct(Schema)","u":"%3Cinit%3E(org.apache.kafka.connect.data.Schema)"},{"p":"org.apache.kafka.common.security.auth","c":"Login","l":"subject()"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"subscribe(Collection)","u":"subscribe(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"subscribe(Collection)","u":"subscribe(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaShareConsumer","l":"subscribe(Collection)","u":"subscribe(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"subscribe(Collection)","u":"subscribe(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"MockShareConsumer","l":"subscribe(Collection)","u":"subscribe(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"ShareConsumer","l":"subscribe(Collection)","u":"subscribe(java.util.Collection)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"subscribe(Collection, ConsumerRebalanceListener)","u":"subscribe(java.util.Collection,org.apache.kafka.clients.consumer.ConsumerRebalanceListener)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"subscribe(Collection, ConsumerRebalanceListener)","u":"subscribe(java.util.Collection,org.apache.kafka.clients.consumer.ConsumerRebalanceListener)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"subscribe(Collection, ConsumerRebalanceListener)","u":"subscribe(java.util.Collection,org.apache.kafka.clients.consumer.ConsumerRebalanceListener)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"subscribe(Pattern)","u":"subscribe(java.util.regex.Pattern)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"subscribe(Pattern)","u":"subscribe(java.util.regex.Pattern)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"subscribe(Pattern)","u":"subscribe(java.util.regex.Pattern)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"subscribe(Pattern, ConsumerRebalanceListener)","u":"subscribe(java.util.regex.Pattern,org.apache.kafka.clients.consumer.ConsumerRebalanceListener)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"subscribe(Pattern, ConsumerRebalanceListener)","u":"subscribe(java.util.regex.Pattern,org.apache.kafka.clients.consumer.ConsumerRebalanceListener)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"subscribe(Pattern, ConsumerRebalanceListener)","u":"subscribe(java.util.regex.Pattern,org.apache.kafka.clients.consumer.ConsumerRebalanceListener)"},{"p":"org.apache.kafka.common.config.provider","c":"ConfigProvider","l":"subscribe(String, Set, ConfigChangeCallback)","u":"subscribe(java.lang.String,java.util.Set,org.apache.kafka.common.config.ConfigChangeCallback)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"subscribe(SubscriptionPattern)","u":"subscribe(org.apache.kafka.clients.consumer.SubscriptionPattern)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"subscribe(SubscriptionPattern)","u":"subscribe(org.apache.kafka.clients.consumer.SubscriptionPattern)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"subscribe(SubscriptionPattern)","u":"subscribe(org.apache.kafka.clients.consumer.SubscriptionPattern)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"subscribe(SubscriptionPattern, ConsumerRebalanceListener)","u":"subscribe(org.apache.kafka.clients.consumer.SubscriptionPattern,org.apache.kafka.clients.consumer.ConsumerRebalanceListener)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"subscribe(SubscriptionPattern, ConsumerRebalanceListener)","u":"subscribe(org.apache.kafka.clients.consumer.SubscriptionPattern,org.apache.kafka.clients.consumer.ConsumerRebalanceListener)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"subscribe(SubscriptionPattern, ConsumerRebalanceListener)","u":"subscribe(org.apache.kafka.clients.consumer.SubscriptionPattern,org.apache.kafka.clients.consumer.ConsumerRebalanceListener)"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"MemberSubscription","l":"subscribedTopicIds()"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"subscription()"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"subscription()"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaShareConsumer","l":"subscription()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"subscription()"},{"p":"org.apache.kafka.clients.consumer","c":"MockShareConsumer","l":"subscription()"},{"p":"org.apache.kafka.clients.consumer","c":"ShareConsumer","l":"subscription()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.Subscription","l":"Subscription(List)","u":"%3Cinit%3E(java.util.List)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.Subscription","l":"Subscription(List, ByteBuffer)","u":"%3Cinit%3E(java.util.List,java.nio.ByteBuffer)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.Subscription","l":"Subscription(List, ByteBuffer, List)","u":"%3Cinit%3E(java.util.List,java.nio.ByteBuffer,java.util.List)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.Subscription","l":"Subscription(List, ByteBuffer, List, int, Optional)","u":"%3Cinit%3E(java.util.List,java.nio.ByteBuffer,java.util.List,int,java.util.Optional)"},{"p":"org.apache.kafka.clients.consumer","c":"SubscriptionPattern","l":"SubscriptionPattern(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"GroupSpec","l":"subscriptionType()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor","l":"subscriptionUserData(Set)","u":"subscriptionUserData(java.util.Set)"},{"p":"org.apache.kafka.clients.consumer","c":"CooperativeStickyAssignor","l":"subscriptionUserData(Set)","u":"subscriptionUserData(java.util.Set)"},{"p":"org.apache.kafka.clients.consumer","c":"StickyAssignor","l":"subscriptionUserData(Set)","u":"subscriptionUserData(java.util.Set)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupDescription","l":"subtopologies()"},{"p":"org.apache.kafka.streams","c":"TopologyDescription","l":"subtopologies()"},{"p":"org.apache.kafka.streams.processor","c":"TaskId","l":"subtopology()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberAssignment.TaskIds","l":"subtopologyId()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription.TaskOffset","l":"subtopologyId()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupSubtopologyDescription","l":"subtopologyId()"},{"p":"org.apache.kafka.clients.admin","c":"AlterConfigOp.OpType","l":"SUBTRACT"},{"p":"org.apache.kafka.server.authorizer","c":"AclCreateResult","l":"SUCCESS"},{"p":"org.apache.kafka.streams","c":"TopologyDescription.Node","l":"successors()"},{"p":"org.apache.kafka.connect.source","c":"ConnectorTransactionBoundaries","l":"SUPPORTED"},{"p":"org.apache.kafka.connect.source","c":"ExactlyOnceSupport","l":"SUPPORTED"},{"p":"org.apache.kafka.clients.admin","c":"FeatureMetadata","l":"supportedFeatures()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor","l":"supportedProtocols()"},{"p":"org.apache.kafka.clients.consumer","c":"CooperativeStickyAssignor","l":"supportedProtocols()"},{"p":"org.apache.kafka.clients.admin","c":"SupportedVersionRange","l":"SupportedVersionRange(short, short)","u":"%3Cinit%3E(short,short)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"suppress(Suppressed)","u":"suppress(org.apache.kafka.streams.kstream.Suppressed)"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry","l":"synonyms()"},{"p":"org.apache.kafka.streams","c":"StreamsBuilder","l":"table(String)","u":"table(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"StreamsBuilder","l":"table(String, Consumed)","u":"table(java.lang.String,org.apache.kafka.streams.kstream.Consumed)"},{"p":"org.apache.kafka.streams","c":"StreamsBuilder","l":"table(String, Consumed, Materialized>)","u":"table(java.lang.String,org.apache.kafka.streams.kstream.Consumed,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams","c":"StreamsBuilder","l":"table(String, Materialized>)","u":"table(java.lang.String,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.common","c":"MetricName","l":"tags()"},{"p":"org.apache.kafka.common","c":"MetricNameTemplate","l":"tags()"},{"p":"org.apache.kafka.common.metrics","c":"MetricConfig","l":"tags()"},{"p":"org.apache.kafka.common.metrics","c":"MetricConfig","l":"tags(Map)","u":"tags(java.util.Map)"},{"p":"org.apache.kafka.connect.mirror","c":"Heartbeat","l":"TARGET_CLUSTER_ALIAS_KEY"},{"p":"org.apache.kafka.connect.mirror","c":"SourceAndTarget","l":"target()"},{"p":"org.apache.kafka.clients.admin","c":"MemberDescription","l":"targetAssignment()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription","l":"targetAssignment()"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupDescription","l":"targetAssignmentEpoch()"},{"p":"org.apache.kafka.clients.admin","c":"ShareGroupDescription","l":"targetAssignmentEpoch()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupDescription","l":"targetAssignmentEpoch()"},{"p":"org.apache.kafka.connect.mirror","c":"Heartbeat","l":"targetClusterAlias()"},{"p":"org.apache.kafka.clients.admin","c":"NewPartitionReassignment","l":"targetReplicas()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"TASK_ASSIGNOR_CLASS_CONFIG"},{"p":"org.apache.kafka.connect.tools","c":"MockConnector","l":"TASK_FAILURE"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"TASK_TIMEOUT_MS_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"TASK_TIMEOUT_MS_DOC"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignor.TaskAssignment","l":"TaskAssignment(Collection)","u":"%3Cinit%3E(java.util.Collection)"},{"p":"org.apache.kafka.streams.errors","c":"TaskAssignmentException","l":"TaskAssignmentException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"TaskAssignmentException","l":"TaskAssignmentException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.streams.errors","c":"TaskAssignmentException","l":"TaskAssignmentException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.connect.connector","c":"Connector","l":"taskClass()"},{"p":"org.apache.kafka.connect.tools","c":"MockConnector","l":"taskClass()"},{"p":"org.apache.kafka.connect.tools","c":"MockSinkConnector","l":"taskClass()"},{"p":"org.apache.kafka.connect.tools","c":"MockSourceConnector","l":"taskClass()"},{"p":"org.apache.kafka.connect.tools","c":"SchemaSourceConnector","l":"taskClass()"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSinkConnector","l":"taskClass()"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSourceConnector","l":"taskClass()"},{"p":"org.apache.kafka.connect.connector","c":"Connector","l":"taskConfigs(int)"},{"p":"org.apache.kafka.connect.tools","c":"MockConnector","l":"taskConfigs(int)"},{"p":"org.apache.kafka.connect.tools","c":"MockSinkConnector","l":"taskConfigs(int)"},{"p":"org.apache.kafka.connect.tools","c":"MockSourceConnector","l":"taskConfigs(int)"},{"p":"org.apache.kafka.connect.tools","c":"SchemaSourceConnector","l":"taskConfigs(int)"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSinkConnector","l":"taskConfigs(int)"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSourceConnector","l":"taskConfigs(int)"},{"p":"org.apache.kafka.streams.errors","c":"TaskCorruptedException","l":"TaskCorruptedException(Set)","u":"%3Cinit%3E(java.util.Set)"},{"p":"org.apache.kafka.streams.errors","c":"TaskCorruptedException","l":"TaskCorruptedException(Set, InvalidOffsetException)","u":"%3Cinit%3E(java.util.Set,org.apache.kafka.clients.consumer.InvalidOffsetException)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription","l":"taskEndOffsets()"},{"p":"org.apache.kafka.connect.health","c":"TaskState","l":"taskId()"},{"p":"org.apache.kafka.streams.errors","c":"ErrorHandlerContext","l":"taskId()"},{"p":"org.apache.kafka.streams.errors","c":"StreamsException","l":"taskId()"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"taskId()"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessingContext","l":"taskId()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"taskId()"},{"p":"org.apache.kafka.streams.processor","c":"ProcessorContext","l":"taskId()"},{"p":"org.apache.kafka.streams.processor","c":"StateStoreContext","l":"taskId()"},{"p":"org.apache.kafka.streams","c":"TaskMetadata","l":"taskId()"},{"p":"org.apache.kafka.streams.processor","c":"TaskId","l":"TaskId(int, int)","u":"%3Cinit%3E(int,int)"},{"p":"org.apache.kafka.streams.processor","c":"TaskId","l":"TaskId(int, int, String)","u":"%3Cinit%3E(int,int,java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"TaskIdFormatException","l":"TaskIdFormatException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"TaskIdFormatException","l":"TaskIdFormatException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.streams.errors","c":"TaskIdFormatException","l":"TaskIdFormatException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberAssignment.TaskIds","l":"TaskIds(String, List)","u":"%3Cinit%3E(java.lang.String,java.util.List)"},{"p":"org.apache.kafka.streams.errors","c":"TaskMigratedException","l":"TaskMigratedException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"TaskMigratedException","l":"TaskMigratedException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription.TaskOffset","l":"TaskOffset(String, int, long)","u":"%3Cinit%3E(java.lang.String,int,long)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription","l":"taskOffsets()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsAssignment","l":"tasks()"},{"p":"org.apache.kafka.connect.health","c":"ConnectorHealth","l":"tasksState()"},{"p":"org.apache.kafka.connect.health","c":"TaskState","l":"TaskState(int, String, String, String)","u":"%3Cinit%3E(int,java.lang.String,java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.streams","c":"TopologyConfig.TaskConfig","l":"taskTimeoutMs"},{"p":"org.apache.kafka.streams","c":"TopologyConfig","l":"taskTimeoutMs"},{"p":"org.apache.kafka.common.errors","c":"TelemetryTooLargeException","l":"TelemetryTooLargeException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"TerminateTransactionOptions","l":"TerminateTransactionOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.kstream","c":"Predicate","l":"test(K, V)","u":"test(K,V)"},{"p":"org.apache.kafka.connect.transforms.predicates","c":"Predicate","l":"test(R)"},{"p":"org.apache.kafka.streams.test","c":"TestRecord","l":"TestRecord(ConsumerRecord)","u":"%3Cinit%3E(org.apache.kafka.clients.consumer.ConsumerRecord)"},{"p":"org.apache.kafka.streams.test","c":"TestRecord","l":"TestRecord(K, V)","u":"%3Cinit%3E(K,V)"},{"p":"org.apache.kafka.streams.test","c":"TestRecord","l":"TestRecord(K, V, Headers)","u":"%3Cinit%3E(K,V,org.apache.kafka.common.header.Headers)"},{"p":"org.apache.kafka.streams.test","c":"TestRecord","l":"TestRecord(K, V, Headers, Instant)","u":"%3Cinit%3E(K,V,org.apache.kafka.common.header.Headers,java.time.Instant)"},{"p":"org.apache.kafka.streams.test","c":"TestRecord","l":"TestRecord(K, V, Headers, Long)","u":"%3Cinit%3E(K,V,org.apache.kafka.common.header.Headers,java.lang.Long)"},{"p":"org.apache.kafka.streams.test","c":"TestRecord","l":"TestRecord(K, V, Instant)","u":"%3Cinit%3E(K,V,java.time.Instant)"},{"p":"org.apache.kafka.streams.test","c":"TestRecord","l":"TestRecord(ProducerRecord)","u":"%3Cinit%3E(org.apache.kafka.clients.producer.ProducerRecord)"},{"p":"org.apache.kafka.streams.test","c":"TestRecord","l":"TestRecord(V)","u":"%3Cinit%3E(V)"},{"p":"org.apache.kafka.common","c":"KafkaFuture","l":"thenApply(KafkaFuture.BaseFunction)","u":"thenApply(org.apache.kafka.common.KafkaFuture.BaseFunction)"},{"p":"org.apache.kafka.streams","c":"ThreadMetadata","l":"threadName()"},{"p":"org.apache.kafka.streams","c":"ThreadMetadata","l":"threadState()"},{"p":"org.apache.kafka.common.errors","c":"ThrottlingQuotaExceededException","l":"throttleTimeMs()"},{"p":"org.apache.kafka.common.errors","c":"ThrottlingQuotaExceededException","l":"ThrottlingQuotaExceededException(int, String)","u":"%3Cinit%3E(int,java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"ThrottlingQuotaExceededException","l":"ThrottlingQuotaExceededException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.connect.tools","c":"SchemaSourceTask","l":"THROUGHPUT_CONFIG"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSourceTask","l":"THROUGHPUT_CONFIG"},{"p":"org.apache.kafka.connect.data","c":"Time","l":"Time()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams","c":"TaskMetadata","l":"timeCurrentIdlingStarted()"},{"p":"org.apache.kafka.streams.kstream","c":"SlidingWindows","l":"timeDifferenceMs()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"LogSegmentData","l":"timeIndex()"},{"p":"org.apache.kafka.clients.consumer","c":"CloseOptions","l":"timeout()"},{"p":"org.apache.kafka.clients.consumer","c":"CloseOptions","l":"timeout(Duration)","u":"timeout(java.time.Duration)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams.CloseOptions","l":"timeout(Duration)","u":"timeout(java.time.Duration)"},{"p":"org.apache.kafka.connect.sink","c":"SinkTaskContext","l":"timeout(long)"},{"p":"org.apache.kafka.common.errors","c":"TimeoutException","l":"TimeoutException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"TimeoutException","l":"TimeoutException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"TimeoutException","l":"TimeoutException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"TimeoutException","l":"TimeoutException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.clients.admin","c":"AbstractOptions","l":"timeoutMs()"},{"p":"org.apache.kafka.clients.admin","c":"AbstractOptions","l":"timeoutMs(Integer)","u":"timeoutMs(java.lang.Integer)"},{"p":"org.apache.kafka.clients.admin","c":"AlterConfigsOptions","l":"timeoutMs(Integer)","u":"timeoutMs(java.lang.Integer)"},{"p":"org.apache.kafka.clients.admin","c":"CreateAclsOptions","l":"timeoutMs(Integer)","u":"timeoutMs(java.lang.Integer)"},{"p":"org.apache.kafka.clients.admin","c":"CreateTopicsOptions","l":"timeoutMs(Integer)","u":"timeoutMs(java.lang.Integer)"},{"p":"org.apache.kafka.clients.admin","c":"DeleteAclsOptions","l":"timeoutMs(Integer)","u":"timeoutMs(java.lang.Integer)"},{"p":"org.apache.kafka.clients.admin","c":"DeleteTopicsOptions","l":"timeoutMs(Integer)","u":"timeoutMs(java.lang.Integer)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeAclsOptions","l":"timeoutMs(Integer)","u":"timeoutMs(java.lang.Integer)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeClusterOptions","l":"timeoutMs(Integer)","u":"timeoutMs(java.lang.Integer)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeConfigsOptions","l":"timeoutMs(Integer)","u":"timeoutMs(java.lang.Integer)"},{"p":"org.apache.kafka.clients.admin","c":"DescribeTopicsOptions","l":"timeoutMs(Integer)","u":"timeoutMs(java.lang.Integer)"},{"p":"org.apache.kafka.clients.admin","c":"ListTopicsOptions","l":"timeoutMs(Integer)","u":"timeoutMs(java.lang.Integer)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageManager.IndexType","l":"TIMESTAMP"},{"p":"org.apache.kafka.connect.mirror","c":"Heartbeat","l":"TIMESTAMP_KEY"},{"p":"org.apache.kafka.streams.state","c":"StateSerdes","l":"TIMESTAMP_SIZE"},{"p":"org.apache.kafka.clients.admin","c":"ListOffsetsResult.ListOffsetsResultInfo","l":"timestamp()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecord","l":"timestamp()"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetAndTimestamp","l":"timestamp()"},{"p":"org.apache.kafka.clients.producer","c":"ProducerRecord","l":"timestamp()"},{"p":"org.apache.kafka.clients.producer","c":"RecordMetadata","l":"timestamp()"},{"p":"org.apache.kafka.common.errors","c":"RecordDeserializationException","l":"timestamp()"},{"p":"org.apache.kafka.connect.connector","c":"ConnectRecord","l":"timestamp()"},{"p":"org.apache.kafka.connect.mirror","c":"Heartbeat","l":"timestamp()"},{"p":"org.apache.kafka.streams.errors","c":"ErrorHandlerContext","l":"timestamp()"},{"p":"org.apache.kafka.streams.processor.api","c":"FixedKeyRecord","l":"timestamp()"},{"p":"org.apache.kafka.streams.processor.api","c":"Record","l":"timestamp()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext.CapturedForward","l":"timestamp()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"timestamp()"},{"p":"org.apache.kafka.streams.processor","c":"ProcessorContext","l":"timestamp()"},{"p":"org.apache.kafka.streams.processor","c":"RecordContext","l":"timestamp()"},{"p":"org.apache.kafka.streams.state","c":"ValueAndTimestamp","l":"timestamp()"},{"p":"org.apache.kafka.streams.state","c":"VersionedRecord","l":"timestamp()"},{"p":"org.apache.kafka.streams.test","c":"TestRecord","l":"timestamp()"},{"p":"org.apache.kafka.connect.data","c":"Timestamp","l":"Timestamp()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.state","c":"QueryableStoreTypes","l":"timestampedKeyValueStore()"},{"p":"org.apache.kafka.streams.state","c":"Stores","l":"timestampedKeyValueStoreBuilder(KeyValueBytesStoreSupplier, Serde, Serde)","u":"timestampedKeyValueStoreBuilder(org.apache.kafka.streams.state.KeyValueBytesStoreSupplier,org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.state","c":"QueryableStoreTypes","l":"timestampedWindowStore()"},{"p":"org.apache.kafka.streams.state","c":"Stores","l":"timestampedWindowStoreBuilder(WindowBytesStoreSupplier, Serde, Serde)","u":"timestampedWindowStoreBuilder(org.apache.kafka.streams.state.WindowBytesStoreSupplier,org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams","c":"TopologyConfig.TaskConfig","l":"timestampExtractor"},{"p":"org.apache.kafka.streams","c":"TopologyConfig","l":"timestampExtractorSupplier"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecord","l":"timestampType()"},{"p":"org.apache.kafka.common.errors","c":"RecordDeserializationException","l":"timestampType()"},{"p":"org.apache.kafka.connect.sink","c":"SinkRecord","l":"timestampType()"},{"p":"org.apache.kafka.common.metrics","c":"MetricConfig","l":"timeWindow(long, TimeUnit)","u":"timeWindow(long,java.util.concurrent.TimeUnit)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedDeserializer","l":"TimeWindowedDeserializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedDeserializer","l":"TimeWindowedDeserializer(Deserializer, Long)","u":"%3Cinit%3E(org.apache.kafka.common.serialization.Deserializer,java.lang.Long)"},{"p":"org.apache.kafka.streams.kstream","c":"WindowedSerdes.TimeWindowedSerde","l":"TimeWindowedSerde()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.kstream","c":"WindowedSerdes.TimeWindowedSerde","l":"TimeWindowedSerde(Serde, long)","u":"%3Cinit%3E(org.apache.kafka.common.serialization.Serde,long)"},{"p":"org.apache.kafka.streams.kstream","c":"WindowedSerdes","l":"timeWindowedSerdeFrom(Class, long)","u":"timeWindowedSerdeFrom(java.lang.Class,long)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedSerializer","l":"TimeWindowedSerializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedSerializer","l":"TimeWindowedSerializer(Serializer)","u":"%3Cinit%3E(org.apache.kafka.common.serialization.Serializer)"},{"p":"org.apache.kafka.common.metrics","c":"MetricConfig","l":"timeWindowMs()"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"to(String)","u":"to(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"to(String, Produced)","u":"to(java.lang.String,org.apache.kafka.streams.kstream.Produced)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"to(TopicNameExtractor)","u":"to(org.apache.kafka.streams.processor.TopicNameExtractor)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"to(TopicNameExtractor, Produced)","u":"to(org.apache.kafka.streams.processor.TopicNameExtractor,org.apache.kafka.streams.kstream.Produced)"},{"p":"org.apache.kafka.common.header","c":"Headers","l":"toArray()"},{"p":"org.apache.kafka.common","c":"Uuid","l":"toArray(List)","u":"toArray(java.util.List)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Histogram.BinScheme","l":"toBin(double)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Histogram.ConstantBinScheme","l":"toBin(double)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Histogram.LinearBinScheme","l":"toBin(double)"},{"p":"org.apache.kafka.common","c":"KafkaFuture","l":"toCompletionStage()"},{"p":"org.apache.kafka.connect.storage","c":"Converter","l":"toConnectData(String, byte[])","u":"toConnectData(java.lang.String,byte[])"},{"p":"org.apache.kafka.connect.storage","c":"StringConverter","l":"toConnectData(String, byte[])","u":"toConnectData(java.lang.String,byte[])"},{"p":"org.apache.kafka.connect.storage","c":"Converter","l":"toConnectData(String, Headers, byte[])","u":"toConnectData(java.lang.String,org.apache.kafka.common.header.Headers,byte[])"},{"p":"org.apache.kafka.connect.storage","c":"HeaderConverter","l":"toConnectHeader(String, String, byte[])","u":"toConnectHeader(java.lang.String,java.lang.String,byte[])"},{"p":"org.apache.kafka.connect.storage","c":"SimpleHeaderConverter","l":"toConnectHeader(String, String, byte[])","u":"toConnectHeader(java.lang.String,java.lang.String,byte[])"},{"p":"org.apache.kafka.connect.storage","c":"StringConverter","l":"toConnectHeader(String, String, byte[])","u":"toConnectHeader(java.lang.String,java.lang.String,byte[])"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"toEnrichedRst()"},{"p":"org.apache.kafka.streams.kstream","c":"Printed","l":"toFile(String)","u":"toFile(java.lang.String)"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntry","l":"toFilter()"},{"p":"org.apache.kafka.common.acl","c":"AclBinding","l":"toFilter()"},{"p":"org.apache.kafka.common.resource","c":"ResourcePattern","l":"toFilter()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"toHtml()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"toHtml(int, Function)","u":"toHtml(int,java.util.function.Function)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"toHtml(int, Function, Map)","u":"toHtml(int,java.util.function.Function,java.util.Map)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"toHtml(Map)","u":"toHtml(java.util.Map)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"toHtmlTable()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"toHtmlTable(Map)","u":"toHtmlTable(java.util.Map)"},{"p":"org.apache.kafka.common.metrics","c":"Metrics","l":"toHtmlTable(String, Iterable)","u":"toHtmlTable(java.lang.String,java.lang.Iterable)"},{"p":"org.apache.kafka.common.security.scram","c":"ScramLoginModule","l":"TOKEN_AUTH_CONFIG"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerExtensionsValidatorCallback","l":"token()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerTokenCallback","l":"token()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerValidatorCallback","l":"token()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerTokenCallback","l":"token(OAuthBearerToken)","u":"token(org.apache.kafka.common.security.oauthbearer.OAuthBearerToken)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerValidatorCallback","l":"token(OAuthBearerToken)","u":"token(org.apache.kafka.common.security.oauthbearer.OAuthBearerToken)"},{"p":"org.apache.kafka.common.security.auth","c":"KafkaPrincipal","l":"tokenAuthenticated()"},{"p":"org.apache.kafka.common.security.auth","c":"KafkaPrincipal","l":"tokenAuthenticated(boolean)"},{"p":"org.apache.kafka.common.metrics.stats","c":"TokenBucket","l":"TokenBucket()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.metrics.stats","c":"TokenBucket","l":"TokenBucket(TimeUnit)","u":"%3Cinit%3E(java.util.concurrent.TimeUnit)"},{"p":"org.apache.kafka.common.security.token.delegation","c":"TokenInformation","l":"tokenId()"},{"p":"org.apache.kafka.common.security.token.delegation","c":"DelegationToken","l":"tokenInfo()"},{"p":"org.apache.kafka.common.security.token.delegation","c":"TokenInformation","l":"TokenInformation(String, KafkaPrincipal, Collection, long, long, long)","u":"%3Cinit%3E(java.lang.String,org.apache.kafka.common.security.auth.KafkaPrincipal,java.util.Collection,long,long,long)"},{"p":"org.apache.kafka.common.security.token.delegation","c":"TokenInformation","l":"TokenInformation(String, KafkaPrincipal, KafkaPrincipal, Collection, long, long, long)","u":"%3Cinit%3E(java.lang.String,org.apache.kafka.common.security.auth.KafkaPrincipal,org.apache.kafka.common.security.auth.KafkaPrincipal,java.util.Collection,long,long,long)"},{"p":"org.apache.kafka.common.security.token.delegation","c":"TokenInformation","l":"tokenRequester()"},{"p":"org.apache.kafka.common.security.token.delegation","c":"TokenInformation","l":"tokenRequesterAsString()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerValidatorCallback","l":"tokenValue()"},{"p":"org.apache.kafka.common","c":"Uuid","l":"toList(Uuid[])","u":"toList(org.apache.kafka.common.Uuid[])"},{"p":"org.apache.kafka.connect.data","c":"Decimal","l":"toLogical(Schema, byte[])","u":"toLogical(org.apache.kafka.connect.data.Schema,byte[])"},{"p":"org.apache.kafka.connect.data","c":"Date","l":"toLogical(Schema, int)","u":"toLogical(org.apache.kafka.connect.data.Schema,int)"},{"p":"org.apache.kafka.connect.data","c":"Time","l":"toLogical(Schema, int)","u":"toLogical(org.apache.kafka.connect.data.Schema,int)"},{"p":"org.apache.kafka.connect.data","c":"Timestamp","l":"toLogical(Schema, long)","u":"toLogical(org.apache.kafka.connect.data.Schema,long)"},{"p":"org.apache.kafka.common.config","c":"ConfigResource.Type","l":"TOPIC"},{"p":"org.apache.kafka.common.resource","c":"ResourceType","l":"TOPIC"},{"p":"org.apache.kafka.connect.tools","c":"SchemaSourceTask","l":"TOPIC_CONFIG"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSourceTask","l":"TOPIC_CONFIG"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"TOPIC_KEY"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"TOPIC_PREFIX"},{"p":"org.apache.kafka.streams","c":"StreamsConfig.InternalConfig","l":"TOPIC_PREFIX_ALTERNATIVE"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecord","l":"topic()"},{"p":"org.apache.kafka.clients.producer","c":"ProducerRecord","l":"topic()"},{"p":"org.apache.kafka.clients.producer","c":"RecordMetadata","l":"topic()"},{"p":"org.apache.kafka.common","c":"PartitionInfo","l":"topic()"},{"p":"org.apache.kafka.common","c":"TopicIdPartition","l":"topic()"},{"p":"org.apache.kafka.common","c":"TopicPartition","l":"topic()"},{"p":"org.apache.kafka.common","c":"TopicPartitionReplica","l":"topic()"},{"p":"org.apache.kafka.connect.connector","c":"ConnectRecord","l":"topic()"},{"p":"org.apache.kafka.server.policy","c":"CreateTopicPolicy.RequestMetadata","l":"topic()"},{"p":"org.apache.kafka.streams.errors","c":"ErrorHandlerContext","l":"topic()"},{"p":"org.apache.kafka.streams.processor.api","c":"RecordMetadata","l":"topic()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"topic()"},{"p":"org.apache.kafka.streams.processor","c":"ProcessorContext","l":"topic()"},{"p":"org.apache.kafka.streams.processor","c":"RecordContext","l":"topic()"},{"p":"org.apache.kafka.streams.state","c":"StateSerdes","l":"topic()"},{"p":"org.apache.kafka.streams","c":"TopologyDescription.Sink","l":"topic()"},{"p":"org.apache.kafka.common.errors","c":"TopicAuthorizationException","l":"TopicAuthorizationException(Set)","u":"%3Cinit%3E(java.util.Set)"},{"p":"org.apache.kafka.common.errors","c":"TopicAuthorizationException","l":"TopicAuthorizationException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"TopicAuthorizationException","l":"TopicAuthorizationException(String, Set)","u":"%3Cinit%3E(java.lang.String,java.util.Set)"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"TopicConfig()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupSubtopologyDescription.TopicInfo","l":"topicConfigs()"},{"p":"org.apache.kafka.common.errors","c":"TopicDeletionDisabledException","l":"TopicDeletionDisabledException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"TopicDeletionDisabledException","l":"TopicDeletionDisabledException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"TopicDescription","l":"TopicDescription(String, boolean, List)","u":"%3Cinit%3E(java.lang.String,boolean,java.util.List)"},{"p":"org.apache.kafka.clients.admin","c":"TopicDescription","l":"TopicDescription(String, boolean, List, Set)","u":"%3Cinit%3E(java.lang.String,boolean,java.util.List,java.util.Set)"},{"p":"org.apache.kafka.clients.admin","c":"TopicDescription","l":"TopicDescription(String, boolean, List, Set, Uuid)","u":"%3Cinit%3E(java.lang.String,boolean,java.util.List,java.util.Set,org.apache.kafka.common.Uuid)"},{"p":"org.apache.kafka.common.errors","c":"TopicExistsException","l":"TopicExistsException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"TopicExistsException","l":"TopicExistsException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.clients.admin","c":"CreateTopicsResult.TopicMetadataAndConfig","l":"topicId()"},{"p":"org.apache.kafka.clients.admin","c":"TopicDescription","l":"topicId()"},{"p":"org.apache.kafka.clients.admin","c":"TopicListing","l":"topicId()"},{"p":"org.apache.kafka.common","c":"TopicIdPartition","l":"topicId()"},{"p":"org.apache.kafka.clients.admin","c":"CreateTopicsResult","l":"topicId(String)","u":"topicId(java.lang.String)"},{"p":"org.apache.kafka.common","c":"Cluster","l":"topicId(String)","u":"topicId(java.lang.String)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogMetadata","l":"topicIdPartition()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentId","l":"topicIdPartition()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata","l":"topicIdPartition()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadataUpdate","l":"topicIdPartition()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemotePartitionDeleteMetadata","l":"topicIdPartition()"},{"p":"org.apache.kafka.common","c":"TopicIdPartition","l":"TopicIdPartition(Uuid, int, String)","u":"%3Cinit%3E(org.apache.kafka.common.Uuid,int,java.lang.String)"},{"p":"org.apache.kafka.common","c":"TopicIdPartition","l":"TopicIdPartition(Uuid, TopicPartition)","u":"%3Cinit%3E(org.apache.kafka.common.Uuid,org.apache.kafka.common.TopicPartition)"},{"p":"org.apache.kafka.common","c":"Cluster","l":"topicIds()"},{"p":"org.apache.kafka.common","c":"TopicCollection.TopicIdCollection","l":"topicIds()"},{"p":"org.apache.kafka.clients.admin","c":"DeleteTopicsResult","l":"topicIdValues()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeTopicsResult","l":"topicIdValues()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupSubtopologyDescription.TopicInfo","l":"TopicInfo(int, int, Map)","u":"%3Cinit%3E(int,int,java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"TopicListing","l":"TopicListing(String, Uuid, boolean)","u":"%3Cinit%3E(java.lang.String,org.apache.kafka.common.Uuid,boolean)"},{"p":"org.apache.kafka.clients.admin","c":"CreateTopicsResult.TopicMetadataAndConfig","l":"TopicMetadataAndConfig(ApiException)","u":"%3Cinit%3E(org.apache.kafka.common.errors.ApiException)"},{"p":"org.apache.kafka.clients.admin","c":"CreateTopicsResult.TopicMetadataAndConfig","l":"TopicMetadataAndConfig(Uuid, int, int, Config)","u":"%3Cinit%3E(org.apache.kafka.common.Uuid,int,int,org.apache.kafka.clients.admin.Config)"},{"p":"org.apache.kafka.common","c":"Cluster","l":"topicName(Uuid)","u":"topicName(org.apache.kafka.common.Uuid)"},{"p":"org.apache.kafka.streams","c":"TopologyDescription.Sink","l":"topicNameExtractor()"},{"p":"org.apache.kafka.common","c":"TopicCollection.TopicNameCollection","l":"topicNames()"},{"p":"org.apache.kafka.clients.admin","c":"DeleteTopicsResult","l":"topicNameValues()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeTopicsResult","l":"topicNameValues()"},{"p":"org.apache.kafka.clients.admin","c":"AbortTransactionSpec","l":"topicPartition()"},{"p":"org.apache.kafka.common.errors","c":"RecordDeserializationException","l":"topicPartition()"},{"p":"org.apache.kafka.common","c":"TopicIdPartition","l":"topicPartition()"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"topicPartition()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskTopicPartition","l":"topicPartition()"},{"p":"org.apache.kafka.common","c":"TopicPartition","l":"TopicPartition(String, int)","u":"%3Cinit%3E(java.lang.String,int)"},{"p":"org.apache.kafka.common","c":"TopicPartitionInfo","l":"TopicPartitionInfo(int, Node, List, List)","u":"%3Cinit%3E(int,org.apache.kafka.common.Node,java.util.List,java.util.List)"},{"p":"org.apache.kafka.common","c":"TopicPartitionInfo","l":"TopicPartitionInfo(int, Node, List, List, List, List)","u":"%3Cinit%3E(int,org.apache.kafka.common.Node,java.util.List,java.util.List,java.util.List,java.util.List)"},{"p":"org.apache.kafka.common","c":"TopicPartitionReplica","l":"TopicPartitionReplica(String, int, int)","u":"%3Cinit%3E(java.lang.String,int,int)"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupOffsetsSpec","l":"topicPartitions()"},{"p":"org.apache.kafka.clients.admin","c":"ListShareGroupOffsetsSpec","l":"topicPartitions()"},{"p":"org.apache.kafka.clients.admin","c":"MemberAssignment","l":"topicPartitions()"},{"p":"org.apache.kafka.clients.admin","c":"ShareMemberAssignment","l":"topicPartitions()"},{"p":"org.apache.kafka.clients.admin","c":"TransactionDescription","l":"topicPartitions()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskInfo","l":"topicPartitions()"},{"p":"org.apache.kafka.streams","c":"StreamsMetadata","l":"topicPartitions()"},{"p":"org.apache.kafka.streams","c":"TaskMetadata","l":"topicPartitions()"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupOffsetsSpec","l":"topicPartitions(Collection)","u":"topicPartitions(java.util.Collection)"},{"p":"org.apache.kafka.clients.admin","c":"ListShareGroupOffsetsSpec","l":"topicPartitions(Collection)","u":"topicPartitions(java.util.Collection)"},{"p":"org.apache.kafka.streams","c":"TopologyDescription.Source","l":"topicPattern()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"topicPrefix(String)","u":"topicPrefix(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"DeleteShareGroupOffsetsResult","l":"topicResult(String)","u":"topicResult(java.lang.String)"},{"p":"org.apache.kafka.connect.sink","c":"SinkConnector","l":"TOPICS_CONFIG"},{"p":"org.apache.kafka.connect.sink","c":"SinkTask","l":"TOPICS_CONFIG"},{"p":"org.apache.kafka.connect.sink","c":"SinkTask","l":"TOPICS_REGEX_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.Subscription","l":"topics()"},{"p":"org.apache.kafka.common","c":"Cluster","l":"topics()"},{"p":"org.apache.kafka.streams","c":"TopologyDescription.Source","l":"topicSet()"},{"p":"org.apache.kafka.connect.mirror","c":"DefaultReplicationPolicy","l":"topicSource(String)","u":"topicSource(java.lang.String)"},{"p":"org.apache.kafka.connect.mirror","c":"IdentityReplicationPolicy","l":"topicSource(String)","u":"topicSource(java.lang.String)"},{"p":"org.apache.kafka.connect.mirror","c":"ReplicationPolicy","l":"topicSource(String)","u":"topicSource(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"TOPOLOGY_OPTIMIZATION_CONFIG"},{"p":"org.apache.kafka.streams","c":"Topology","l":"Topology()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams","c":"Topology","l":"Topology(TopologyConfig)","u":"%3Cinit%3E(org.apache.kafka.streams.TopologyConfig)"},{"p":"org.apache.kafka.streams","c":"TopologyConfig","l":"TopologyConfig(StreamsConfig)","u":"%3Cinit%3E(org.apache.kafka.streams.StreamsConfig)"},{"p":"org.apache.kafka.streams","c":"TopologyConfig","l":"TopologyConfig(String, StreamsConfig, Properties)","u":"%3Cinit%3E(java.lang.String,org.apache.kafka.streams.StreamsConfig,java.util.Properties)"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupDescription","l":"topologyEpoch()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription","l":"topologyEpoch()"},{"p":"org.apache.kafka.streams.errors","c":"TopologyException","l":"TopologyException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"TopologyException","l":"TopologyException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.streams.errors","c":"TopologyException","l":"TopologyException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.streams","c":"TopologyConfig","l":"topologyName"},{"p":"org.apache.kafka.streams.processor","c":"TaskId","l":"topologyName()"},{"p":"org.apache.kafka.streams","c":"TopologyConfig","l":"topologyOverrides"},{"p":"org.apache.kafka.streams","c":"TopologyTestDriver","l":"TopologyTestDriver(Topology)","u":"%3Cinit%3E(org.apache.kafka.streams.Topology)"},{"p":"org.apache.kafka.streams","c":"TopologyTestDriver","l":"TopologyTestDriver(Topology, Instant)","u":"%3Cinit%3E(org.apache.kafka.streams.Topology,java.time.Instant)"},{"p":"org.apache.kafka.streams","c":"TopologyTestDriver","l":"TopologyTestDriver(Topology, Properties)","u":"%3Cinit%3E(org.apache.kafka.streams.Topology,java.util.Properties)"},{"p":"org.apache.kafka.streams","c":"TopologyTestDriver","l":"TopologyTestDriver(Topology, Properties, Instant)","u":"%3Cinit%3E(org.apache.kafka.streams.Topology,java.util.Properties,java.time.Instant)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"toRst()"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"toStream()"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"toStream(KeyValueMapper)","u":"toStream(org.apache.kafka.streams.kstream.KeyValueMapper)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"toStream(KeyValueMapper, Named)","u":"toStream(org.apache.kafka.streams.kstream.KeyValueMapper,org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"toStream(Named)","u":"toStream(org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.clients.admin","c":"AbortTransactionOptions","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"AbortTransactionSpec","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"AlterConfigOp","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"ClassicGroupDescription","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"ClientMetricsResourceListing","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"Config","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigSynonym","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupDescription","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupListing","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeProducersOptions","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeProducersResult.PartitionProducerState","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeReplicaLogDirsResult.ReplicaLogDirInfo","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeTransactionsOptions","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"FeatureMetadata","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"FeatureUpdate","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"FenceProducersOptions","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"FinalizedVersionRange","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"GroupListing","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupOffsetsSpec","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"ListOffsetsResult.ListOffsetsResultInfo","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"ListShareGroupOffsetsSpec","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"ListTopicsOptions","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"ListTransactionsOptions","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"LogDirDescription","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"MemberAssignment","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"MemberDescription","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"NewPartitions","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"NewTopic","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"PartitionReassignment","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"ProducerState","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo.Node","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo.ReplicaState","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"RaftVoterEndpoint","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"RecordsToDelete","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"ReplicaInfo","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"ScramCredentialInfo","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"ShareGroupDescription","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"ShareMemberAssignment","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"ShareMemberDescription","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupDescription","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberAssignment.TaskIds","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberAssignment","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription.Endpoint","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription.TaskOffset","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupSubtopologyDescription.TopicInfo","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupSubtopologyDescription","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"SupportedVersionRange","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"TerminateTransactionOptions","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"TopicDescription","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"TopicListing","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"TransactionDescription","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"TransactionListing","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"TransactionState","l":"toString()"},{"p":"org.apache.kafka.clients.admin","c":"UserScramCredentialsDescription","l":"toString()"},{"p":"org.apache.kafka.clients.consumer","c":"AcknowledgeType","l":"toString()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerGroupMetadata","l":"toString()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.Assignment","l":"toString()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.GroupAssignment","l":"toString()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.GroupSubscription","l":"toString()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.Subscription","l":"toString()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecord","l":"toString()"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetAndMetadata","l":"toString()"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetAndTimestamp","l":"toString()"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetResetStrategy","l":"toString()"},{"p":"org.apache.kafka.clients.consumer","c":"SubscriptionPattern","l":"toString()"},{"p":"org.apache.kafka.clients.producer","c":"PreparedTxnState","l":"toString()"},{"p":"org.apache.kafka.clients.producer","c":"ProducerRecord","l":"toString()"},{"p":"org.apache.kafka.clients.producer","c":"RecordMetadata","l":"toString()"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntry","l":"toString()"},{"p":"org.apache.kafka.common.acl","c":"AccessControlEntryFilter","l":"toString()"},{"p":"org.apache.kafka.common.acl","c":"AclBinding","l":"toString()"},{"p":"org.apache.kafka.common.acl","c":"AclBindingFilter","l":"toString()"},{"p":"org.apache.kafka.common","c":"ClassicGroupState","l":"toString()"},{"p":"org.apache.kafka.common","c":"Cluster","l":"toString()"},{"p":"org.apache.kafka.common","c":"ClusterResource","l":"toString()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.CaseInsensitiveValidString","l":"toString()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.CompositeValidator","l":"toString()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.LambdaValidator","l":"toString()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ListSize","l":"toString()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.NonEmptyString","l":"toString()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.NonEmptyStringWithoutControlChars","l":"toString()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.NonNullValidator","l":"toString()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Range","l":"toString()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ValidList","l":"toString()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ValidString","l":"toString()"},{"p":"org.apache.kafka.common.config","c":"ConfigResource","l":"toString()"},{"p":"org.apache.kafka.common.config","c":"ConfigValue","l":"toString()"},{"p":"org.apache.kafka.common.config","c":"SslClientAuth","l":"toString()"},{"p":"org.apache.kafka.common","c":"ConsumerGroupState","l":"toString()"},{"p":"org.apache.kafka.common","c":"Endpoint","l":"toString()"},{"p":"org.apache.kafka.common","c":"GroupState","l":"toString()"},{"p":"org.apache.kafka.common","c":"GroupType","l":"toString()"},{"p":"org.apache.kafka.common","c":"IsolationLevel","l":"toString()"},{"p":"org.apache.kafka.common","c":"MetricName","l":"toString()"},{"p":"org.apache.kafka.common","c":"MetricNameTemplate","l":"toString()"},{"p":"org.apache.kafka.common.metrics","c":"Quota","l":"toString()"},{"p":"org.apache.kafka.common.metrics","c":"QuotaViolationException","l":"toString()"},{"p":"org.apache.kafka.common.metrics.stats","c":"CumulativeSum","l":"toString()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Frequency","l":"toString()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Histogram","l":"toString()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Meter","l":"toString()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Rate","l":"toString()"},{"p":"org.apache.kafka.common.metrics.stats","c":"SampledStat","l":"toString()"},{"p":"org.apache.kafka.common.metrics.stats","c":"TokenBucket","l":"toString()"},{"p":"org.apache.kafka.common","c":"Node","l":"toString()"},{"p":"org.apache.kafka.common","c":"PartitionInfo","l":"toString()"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaAlteration.Op","l":"toString()"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaAlteration","l":"toString()"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaEntity","l":"toString()"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaFilter","l":"toString()"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaFilterComponent","l":"toString()"},{"p":"org.apache.kafka.common.resource","c":"Resource","l":"toString()"},{"p":"org.apache.kafka.common.resource","c":"ResourcePattern","l":"toString()"},{"p":"org.apache.kafka.common.resource","c":"ResourcePatternFilter","l":"toString()"},{"p":"org.apache.kafka.common.security.auth","c":"KafkaPrincipal","l":"toString()"},{"p":"org.apache.kafka.common.security.auth","c":"SaslExtensions","l":"toString()"},{"p":"org.apache.kafka.common.security.token.delegation","c":"DelegationToken","l":"toString()"},{"p":"org.apache.kafka.common.security.token.delegation","c":"TokenInformation","l":"toString()"},{"p":"org.apache.kafka.common","c":"TopicIdPartition","l":"toString()"},{"p":"org.apache.kafka.common","c":"TopicPartition","l":"toString()"},{"p":"org.apache.kafka.common","c":"TopicPartitionInfo","l":"toString()"},{"p":"org.apache.kafka.common","c":"TopicPartitionReplica","l":"toString()"},{"p":"org.apache.kafka.common","c":"Uuid","l":"toString()"},{"p":"org.apache.kafka.connect.connector","c":"ConnectRecord","l":"toString()"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"toString()"},{"p":"org.apache.kafka.connect.data","c":"Field","l":"toString()"},{"p":"org.apache.kafka.connect.data","c":"SchemaAndValue","l":"toString()"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"toString()"},{"p":"org.apache.kafka.connect.header","c":"ConnectHeaders","l":"toString()"},{"p":"org.apache.kafka.connect.health","c":"ConnectorHealth","l":"toString()"},{"p":"org.apache.kafka.connect.health","c":"ConnectorState","l":"toString()"},{"p":"org.apache.kafka.connect.health","c":"ConnectorType","l":"toString()"},{"p":"org.apache.kafka.connect.health","c":"TaskState","l":"toString()"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"toString()"},{"p":"org.apache.kafka.connect.mirror","c":"Heartbeat","l":"toString()"},{"p":"org.apache.kafka.connect.mirror","c":"SourceAndTarget","l":"toString()"},{"p":"org.apache.kafka.connect.sink","c":"SinkRecord","l":"toString()"},{"p":"org.apache.kafka.connect.source","c":"SourceRecord","l":"toString()"},{"p":"org.apache.kafka.connect.source","c":"SourceTask.TransactionBoundary","l":"toString()"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"GroupAssignment","l":"toString()"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"SubscriptionType","l":"toString()"},{"p":"org.apache.kafka.server.authorizer","c":"Action","l":"toString()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"LogSegmentData","l":"toString()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentId","l":"toString()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata.CustomMetadata","l":"toString()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata","l":"toString()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadataUpdate","l":"toString()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemotePartitionDeleteMetadata","l":"toString()"},{"p":"org.apache.kafka.server.policy","c":"AlterConfigPolicy.RequestMetadata","l":"toString()"},{"p":"org.apache.kafka.server.policy","c":"CreateTopicPolicy.RequestMetadata","l":"toString()"},{"p":"org.apache.kafka.streams","c":"KeyQueryMetadata","l":"toString()"},{"p":"org.apache.kafka.streams","c":"KeyValue","l":"toString()"},{"p":"org.apache.kafka.streams.kstream","c":"JoinWindows","l":"toString()"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindows","l":"toString()"},{"p":"org.apache.kafka.streams.kstream","c":"SlidingWindows","l":"toString()"},{"p":"org.apache.kafka.streams.kstream","c":"StreamJoined","l":"toString()"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindows","l":"toString()"},{"p":"org.apache.kafka.streams.kstream","c":"UnlimitedWindows","l":"toString()"},{"p":"org.apache.kafka.streams.kstream","c":"Window","l":"toString()"},{"p":"org.apache.kafka.streams.kstream","c":"Windowed","l":"toString()"},{"p":"org.apache.kafka.streams","c":"LagInfo","l":"toString()"},{"p":"org.apache.kafka.streams.processor.api","c":"FixedKeyRecord","l":"toString()"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext.CapturedForward","l":"toString()"},{"p":"org.apache.kafka.streams.processor.api","c":"Record","l":"toString()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"AssignmentConfigs","l":"toString()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsAssignment.AssignedTask","l":"toString()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsAssignment","l":"toString()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"ProcessId","l":"toString()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext.CapturedForward","l":"toString()"},{"p":"org.apache.kafka.streams.processor","c":"TaskId","l":"toString()"},{"p":"org.apache.kafka.streams.processor","c":"To","l":"toString()"},{"p":"org.apache.kafka.streams.query","c":"Position","l":"toString()"},{"p":"org.apache.kafka.streams.query","c":"PositionBound","l":"toString()"},{"p":"org.apache.kafka.streams.query","c":"StateQueryResult","l":"toString()"},{"p":"org.apache.kafka.streams.query","c":"WindowKeyQuery","l":"toString()"},{"p":"org.apache.kafka.streams.query","c":"WindowRangeQuery","l":"toString()"},{"p":"org.apache.kafka.streams.state","c":"DslKeyValueParams","l":"toString()"},{"p":"org.apache.kafka.streams.state","c":"DslSessionParams","l":"toString()"},{"p":"org.apache.kafka.streams.state","c":"DslWindowParams","l":"toString()"},{"p":"org.apache.kafka.streams.state","c":"HostInfo","l":"toString()"},{"p":"org.apache.kafka.streams.state","c":"ValueAndTimestamp","l":"toString()"},{"p":"org.apache.kafka.streams.state","c":"VersionedRecord","l":"toString()"},{"p":"org.apache.kafka.streams","c":"StoreQueryParameters","l":"toString()"},{"p":"org.apache.kafka.streams.test","c":"TestRecord","l":"toString()"},{"p":"org.apache.kafka.streams","c":"TestInputTopic","l":"toString()"},{"p":"org.apache.kafka.streams","c":"TestOutputTopic","l":"toString()"},{"p":"org.apache.kafka.streams.kstream","c":"Printed","l":"toSysOut()"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"toTable()"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"toTable(Materialized>)","u":"toTable(org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"toTable(Named)","u":"toTable(org.apache.kafka.streams.kstream.Named)"},{"p":"org.apache.kafka.streams.kstream","c":"KStream","l":"toTable(Named, Materialized>)","u":"toTable(org.apache.kafka.streams.kstream.Named,org.apache.kafka.streams.kstream.Materialized)"},{"p":"org.apache.kafka.clients.admin","c":"LogDirDescription","l":"totalBytes()"},{"p":"org.apache.kafka.clients.admin","c":"NewPartitions","l":"totalCount()"},{"p":"org.apache.kafka.streams.query","c":"MultiVersionedKeyQuery","l":"toTime()"},{"p":"org.apache.kafka.streams.query","c":"MultiVersionedKeyQuery","l":"toTime(Instant)","u":"toTime(java.time.Instant)"},{"p":"org.apache.kafka.common.metrics","c":"Sensor.RecordingLevel","l":"TRACE"},{"p":"org.apache.kafka.common.config","c":"LogLevelConfig","l":"TRACE_LOG_LEVEL"},{"p":"org.apache.kafka.connect.health","c":"AbstractState","l":"traceMessage()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageManager.IndexType","l":"TRANSACTION"},{"p":"org.apache.kafka.connect.source","c":"SourceTask","l":"TRANSACTION_BOUNDARY_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"TRANSACTION_TIMEOUT_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"TRANSACTION_TIMEOUT_DOC"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG"},{"p":"org.apache.kafka.common.errors","c":"TransactionAbortableException","l":"TransactionAbortableException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"TransactionAbortableException","l":"TransactionAbortableException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"transactionAborted()"},{"p":"org.apache.kafka.common.errors","c":"TransactionAbortedException","l":"TransactionAbortedException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"TransactionAbortedException","l":"TransactionAbortedException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"TransactionAbortedException","l":"TransactionAbortedException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.resource","c":"ResourceType","l":"TRANSACTIONAL_ID"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"TRANSACTIONAL_ID_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"TRANSACTIONAL_ID_DOC"},{"p":"org.apache.kafka.clients.admin","c":"TransactionListing","l":"transactionalId()"},{"p":"org.apache.kafka.common.errors","c":"TransactionalIdAuthorizationException","l":"TransactionalIdAuthorizationException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"TransactionalIdNotFoundException","l":"TransactionalIdNotFoundException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"transactionCommitted()"},{"p":"org.apache.kafka.connect.source","c":"SourceTaskContext","l":"transactionContext()"},{"p":"org.apache.kafka.common.errors","c":"TransactionCoordinatorFencedException","l":"TransactionCoordinatorFencedException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"TransactionCoordinatorFencedException","l":"TransactionCoordinatorFencedException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.clients.admin","c":"TransactionDescription","l":"TransactionDescription(int, TransactionState, long, int, long, OptionalLong, Set)","u":"%3Cinit%3E(int,org.apache.kafka.clients.admin.TransactionState,long,int,long,java.util.OptionalLong,java.util.Set)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"LogSegmentData","l":"transactionIndex()"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"transactionInFlight()"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"transactionInitialized()"},{"p":"org.apache.kafka.clients.admin","c":"TransactionListing","l":"TransactionListing(String, long, TransactionState)","u":"%3Cinit%3E(java.lang.String,long,org.apache.kafka.clients.admin.TransactionState)"},{"p":"org.apache.kafka.clients.admin","c":"TransactionDescription","l":"transactionStartTimeMs()"},{"p":"org.apache.kafka.clients.admin","c":"TransactionDescription","l":"transactionTimeoutMs()"},{"p":"org.apache.kafka.streams.kstream","c":"Transformer","l":"transform(K, V)","u":"transform(K,V)"},{"p":"org.apache.kafka.streams.kstream","c":"ValueTransformerWithKey","l":"transform(K, V)","u":"transform(K,V)"},{"p":"org.apache.kafka.common.config","c":"ConfigTransformer","l":"transform(Map)","u":"transform(java.util.Map)"},{"p":"org.apache.kafka.streams.kstream","c":"ValueTransformer","l":"transform(V)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"transformValues(ValueTransformerWithKeySupplier, Materialized>, Named, String...)","u":"transformValues(org.apache.kafka.streams.kstream.ValueTransformerWithKeySupplier,org.apache.kafka.streams.kstream.Materialized,org.apache.kafka.streams.kstream.Named,java.lang.String...)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"transformValues(ValueTransformerWithKeySupplier, Materialized>, String...)","u":"transformValues(org.apache.kafka.streams.kstream.ValueTransformerWithKeySupplier,org.apache.kafka.streams.kstream.Materialized,java.lang.String...)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"transformValues(ValueTransformerWithKeySupplier, Named, String...)","u":"transformValues(org.apache.kafka.streams.kstream.ValueTransformerWithKeySupplier,org.apache.kafka.streams.kstream.Named,java.lang.String...)"},{"p":"org.apache.kafka.streams.kstream","c":"KTable","l":"transformValues(ValueTransformerWithKeySupplier, String...)","u":"transformValues(org.apache.kafka.streams.kstream.ValueTransformerWithKeySupplier,java.lang.String...)"},{"p":"org.apache.kafka.connect.mirror","c":"RemoteClusterUtils","l":"translateOffsets(Map, String, String, Duration)","u":"translateOffsets(java.util.Map,java.lang.String,java.lang.String,java.time.Duration)"},{"p":"org.apache.kafka.common.security.auth","c":"SslEngineFactory","l":"truststore()"},{"p":"org.apache.kafka.common.config","c":"ConfigData","l":"ttl()"},{"p":"org.apache.kafka.common.config","c":"ConfigTransformerResult","l":"ttls()"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"TWO_PHASE_COMMIT"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ConfigKey","l":"type"},{"p":"org.apache.kafka.connect.storage","c":"ConverterConfig","l":"TYPE_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry","l":"type()"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupDescription","l":"type()"},{"p":"org.apache.kafka.clients.admin","c":"ConsumerGroupListing","l":"type()"},{"p":"org.apache.kafka.clients.admin","c":"GroupListing","l":"type()"},{"p":"org.apache.kafka.clients.admin","c":"ScramMechanism","l":"type()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ConfigKey","l":"type()"},{"p":"org.apache.kafka.common.config","c":"ConfigResource","l":"type()"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"type()"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"type()"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"type()"},{"p":"org.apache.kafka.connect.health","c":"ConnectorHealth","l":"type()"},{"p":"org.apache.kafka.connect.storage","c":"ConverterConfig","l":"type()"},{"p":"org.apache.kafka.streams.kstream","c":"EmitStrategy","l":"type()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsAssignment.AssignedTask","l":"type()"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"type(Schema.Type)","u":"type(org.apache.kafka.connect.data.Schema.Type)"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"typeOf(String)","u":"typeOf(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupsOptions","l":"types()"},{"p":"org.apache.kafka.clients.admin","c":"ListGroupsOptions","l":"types()"},{"p":"org.apache.kafka.common.errors","c":"UnacceptableCredentialException","l":"UnacceptableCredentialException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"UnacceptableCredentialException","l":"UnacceptableCredentialException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common","c":"Cluster","l":"unauthorizedTopics()"},{"p":"org.apache.kafka.common.errors","c":"TopicAuthorizationException","l":"unauthorizedTopics()"},{"p":"org.apache.kafka.streams.state","c":"HostInfo","l":"unavailable()"},{"p":"org.apache.kafka.streams.kstream","c":"Suppressed.BufferConfig","l":"unbounded()"},{"p":"org.apache.kafka.streams.query","c":"PositionBound","l":"unbounded()"},{"p":"org.apache.kafka.common","c":"ElectionType","l":"UNCLEAN"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG"},{"p":"org.apache.kafka.common.config","c":"TopicConfig","l":"UNCLEAN_LEADER_ELECTION_ENABLE_DOC"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"uncommittedOffsets()"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"uncommittedRecords()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Rate","l":"unitName()"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigSource","l":"UNKNOWN"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigType","l":"UNKNOWN"},{"p":"org.apache.kafka.clients.admin","c":"EndpointType","l":"UNKNOWN"},{"p":"org.apache.kafka.clients.admin","c":"FeatureUpdate.UpgradeType","l":"UNKNOWN"},{"p":"org.apache.kafka.clients.admin","c":"ScramMechanism","l":"UNKNOWN"},{"p":"org.apache.kafka.clients.admin","c":"TransactionState","l":"UNKNOWN"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"UNKNOWN"},{"p":"org.apache.kafka.common.acl","c":"AclPermissionType","l":"UNKNOWN"},{"p":"org.apache.kafka.common","c":"ClassicGroupState","l":"UNKNOWN"},{"p":"org.apache.kafka.common.config","c":"ConfigResource.Type","l":"UNKNOWN"},{"p":"org.apache.kafka.common","c":"ConsumerGroupState","l":"UNKNOWN"},{"p":"org.apache.kafka.common","c":"GroupState","l":"UNKNOWN"},{"p":"org.apache.kafka.common","c":"GroupType","l":"UNKNOWN"},{"p":"org.apache.kafka.common.resource","c":"PatternType","l":"UNKNOWN"},{"p":"org.apache.kafka.common.resource","c":"ResourceType","l":"UNKNOWN"},{"p":"org.apache.kafka.connect.health","c":"ConnectorType","l":"UNKNOWN"},{"p":"org.apache.kafka.clients.producer","c":"RecordMetadata","l":"UNKNOWN_PARTITION"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignor.AssignmentError","l":"UNKNOWN_PROCESS_ID"},{"p":"org.apache.kafka.streams.query","c":"FailureReason","l":"UNKNOWN_QUERY_TYPE"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignor.AssignmentError","l":"UNKNOWN_TASK_ID"},{"p":"org.apache.kafka.common.errors","c":"UnknownControllerIdException","l":"UnknownControllerIdException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"UnknownLeaderEpochException","l":"UnknownLeaderEpochException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"UnknownLeaderEpochException","l":"UnknownLeaderEpochException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"UnknownMemberIdException","l":"UnknownMemberIdException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"UnknownMemberIdException","l":"UnknownMemberIdException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"UnknownMemberIdException","l":"UnknownMemberIdException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"UnknownMemberIdException","l":"UnknownMemberIdException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"UnknownProducerIdException","l":"UnknownProducerIdException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"UnknownServerException","l":"UnknownServerException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"UnknownServerException","l":"UnknownServerException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"UnknownServerException","l":"UnknownServerException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"UnknownServerException","l":"UnknownServerException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.streams.errors","c":"UnknownStateStoreException","l":"UnknownStateStoreException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"UnknownStateStoreException","l":"UnknownStateStoreException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"UnknownSubscriptionIdException","l":"UnknownSubscriptionIdException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"UnknownTopicIdException","l":"UnknownTopicIdException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"UnknownTopicOrPartitionException","l":"UnknownTopicOrPartitionException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"UnknownTopicOrPartitionException","l":"UnknownTopicOrPartitionException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"UnknownTopicOrPartitionException","l":"UnknownTopicOrPartitionException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"UnknownTopicOrPartitionException","l":"UnknownTopicOrPartitionException(Throwable)","u":"%3Cinit%3E(java.lang.Throwable)"},{"p":"org.apache.kafka.streams.errors","c":"UnknownTopologyException","l":"UnknownTopologyException(String, String)","u":"%3Cinit%3E(java.lang.String,java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"UnknownTopologyException","l":"UnknownTopologyException(String, Throwable, String)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable,java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"unregisterBroker(int)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"unregisterBroker(int, UnregisterBrokerOptions)","u":"unregisterBroker(int,org.apache.kafka.clients.admin.UnregisterBrokerOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"unregisterBroker(int, UnregisterBrokerOptions)","u":"unregisterBroker(int,org.apache.kafka.clients.admin.UnregisterBrokerOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"unregisterBroker(int, UnregisterBrokerOptions)","u":"unregisterBroker(int,org.apache.kafka.clients.admin.UnregisterBrokerOptions)"},{"p":"org.apache.kafka.clients.admin","c":"UnregisterBrokerOptions","l":"UnregisterBrokerOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"unregisterMetricFromSubscription(KafkaMetric)","u":"unregisterMetricFromSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"unregisterMetricFromSubscription(KafkaMetric)","u":"unregisterMetricFromSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"unregisterMetricFromSubscription(KafkaMetric)","u":"unregisterMetricFromSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"unregisterMetricFromSubscription(KafkaMetric)","u":"unregisterMetricFromSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"unregisterMetricFromSubscription(KafkaMetric)","u":"unregisterMetricFromSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaShareConsumer","l":"unregisterMetricFromSubscription(KafkaMetric)","u":"unregisterMetricFromSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"unregisterMetricFromSubscription(KafkaMetric)","u":"unregisterMetricFromSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.consumer","c":"MockShareConsumer","l":"unregisterMetricFromSubscription(KafkaMetric)","u":"unregisterMetricFromSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.consumer","c":"ShareConsumer","l":"unregisterMetricFromSubscription(KafkaMetric)","u":"unregisterMetricFromSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.producer","c":"KafkaProducer","l":"unregisterMetricFromSubscription(KafkaMetric)","u":"unregisterMetricFromSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.producer","c":"MockProducer","l":"unregisterMetricFromSubscription(KafkaMetric)","u":"unregisterMetricFromSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.clients.producer","c":"Producer","l":"unregisterMetricFromSubscription(KafkaMetric)","u":"unregisterMetricFromSubscription(org.apache.kafka.common.metrics.KafkaMetric)"},{"p":"org.apache.kafka.common.errors","c":"UnreleasedInstanceIdException","l":"UnreleasedInstanceIdException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"FeatureUpdate.UpgradeType","l":"UNSAFE_DOWNGRADE"},{"p":"org.apache.kafka.common.errors","c":"UnstableOffsetCommitException","l":"UnstableOffsetCommitException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"unsubscribe()"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"unsubscribe()"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaShareConsumer","l":"unsubscribe()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"unsubscribe()"},{"p":"org.apache.kafka.clients.consumer","c":"MockShareConsumer","l":"unsubscribe()"},{"p":"org.apache.kafka.clients.consumer","c":"ShareConsumer","l":"unsubscribe()"},{"p":"org.apache.kafka.common.config.provider","c":"ConfigProvider","l":"unsubscribe(String, Set, ConfigChangeCallback)","u":"unsubscribe(java.lang.String,java.util.Set,org.apache.kafka.common.config.ConfigChangeCallback)"},{"p":"org.apache.kafka.common.config.provider","c":"ConfigProvider","l":"unsubscribeAll()"},{"p":"org.apache.kafka.connect.source","c":"ConnectorTransactionBoundaries","l":"UNSUPPORTED"},{"p":"org.apache.kafka.connect.source","c":"ExactlyOnceSupport","l":"UNSUPPORTED"},{"p":"org.apache.kafka.common.errors","c":"UnsupportedAssignorException","l":"UnsupportedAssignorException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"UnsupportedByAuthenticationException","l":"UnsupportedByAuthenticationException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"UnsupportedByAuthenticationException","l":"UnsupportedByAuthenticationException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"UnsupportedCompressionTypeException","l":"UnsupportedCompressionTypeException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"UnsupportedCompressionTypeException","l":"UnsupportedCompressionTypeException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"UnsupportedEndpointTypeException","l":"UnsupportedEndpointTypeException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"UnsupportedForMessageFormatException","l":"UnsupportedForMessageFormatException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"UnsupportedForMessageFormatException","l":"UnsupportedForMessageFormatException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"UnsupportedSaslMechanismException","l":"UnsupportedSaslMechanismException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"UnsupportedSaslMechanismException","l":"UnsupportedSaslMechanismException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.common.errors","c":"UnsupportedVersionException","l":"UnsupportedVersionException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"UnsupportedVersionException","l":"UnsupportedVersionException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.streams.kstream","c":"Suppressed","l":"untilTimeLimit(Duration, Suppressed.BufferConfig)","u":"untilTimeLimit(java.time.Duration,org.apache.kafka.streams.kstream.Suppressed.BufferConfig)"},{"p":"org.apache.kafka.streams.kstream","c":"Suppressed","l":"untilWindowCloses(Suppressed.StrictBufferConfig)","u":"untilWindowCloses(org.apache.kafka.streams.kstream.Suppressed.StrictBufferConfig)"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"unused()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"updateBeginningOffsets(Map)","u":"updateBeginningOffsets(java.util.Map)"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaCallback","l":"updateClusterMetadata(Cluster)","u":"updateClusterMetadata(org.apache.kafka.common.Cluster)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"updateDurationOffsets(Map)","u":"updateDurationOffsets(java.util.Map)"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"updateEndOffsets(Map)","u":"updateEndOffsets(java.util.Map)"},{"p":"org.apache.kafka.clients.admin","c":"Admin","l":"updateFeatures(Map, UpdateFeaturesOptions)","u":"updateFeatures(java.util.Map,org.apache.kafka.clients.admin.UpdateFeaturesOptions)"},{"p":"org.apache.kafka.clients.admin","c":"ForwardingAdmin","l":"updateFeatures(Map, UpdateFeaturesOptions)","u":"updateFeatures(java.util.Map,org.apache.kafka.clients.admin.UpdateFeaturesOptions)"},{"p":"org.apache.kafka.clients.admin","c":"KafkaAdminClient","l":"updateFeatures(Map, UpdateFeaturesOptions)","u":"updateFeatures(java.util.Map,org.apache.kafka.clients.admin.UpdateFeaturesOptions)"},{"p":"org.apache.kafka.clients.admin","c":"UpdateFeaturesOptions","l":"UpdateFeaturesOptions()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"updatePartitions(String, List)","u":"updatePartitions(java.lang.String,java.util.List)"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaCallback","l":"updateQuota(ClientQuotaType, ClientQuotaEntity, double)","u":"updateQuota(org.apache.kafka.server.quota.ClientQuotaType,org.apache.kafka.server.quota.ClientQuotaEntity,double)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogMetadataManager","l":"updateRemoteLogSegmentMetadata(RemoteLogSegmentMetadataUpdate)","u":"updateRemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate)"},{"p":"org.apache.kafka.clients.admin","c":"FeatureUpdate.UpgradeType","l":"UPGRADE"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_0100"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_0101"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_0102"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_0110"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_10"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_11"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_20"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_21"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_22"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_23"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_24"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_25"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_26"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_27"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_28"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_30"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_31"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_32"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_33"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_34"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_35"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_36"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_37"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_38"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_39"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_40"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"UPGRADE_FROM_CONFIG"},{"p":"org.apache.kafka.clients.admin","c":"MemberDescription","l":"upgraded()"},{"p":"org.apache.kafka.clients.admin","c":"FeatureUpdate","l":"upgradeType()"},{"p":"org.apache.kafka.streams.query","c":"TimestampedRangeQuery","l":"upperBound()"},{"p":"org.apache.kafka.common.metrics","c":"Quota","l":"upperBound(double)"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"UPSTREAM_OFFSET_KEY"},{"p":"org.apache.kafka.connect.mirror","c":"MirrorClient","l":"upstreamClusters()"},{"p":"org.apache.kafka.connect.mirror","c":"RemoteClusterUtils","l":"upstreamClusters(Map)","u":"upstreamClusters(java.util.Map)"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"upstreamOffset()"},{"p":"org.apache.kafka.connect.mirror","c":"DefaultReplicationPolicy","l":"upstreamTopic(String)","u":"upstreamTopic(java.lang.String)"},{"p":"org.apache.kafka.connect.mirror","c":"IdentityReplicationPolicy","l":"upstreamTopic(String)","u":"upstreamTopic(java.lang.String)"},{"p":"org.apache.kafka.connect.mirror","c":"ReplicationPolicy","l":"upstreamTopic(String)","u":"upstreamTopic(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"LogDirDescription","l":"usableBytes()"},{"p":"org.apache.kafka.streams.processor","c":"UsePartitionTimeOnInvalidTimestamp","l":"UsePartitionTimeOnInvalidTimestamp()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.resource","c":"ResourceType","l":"USER"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaEntity.ConfigEntityType","l":"USER"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaEntity","l":"USER"},{"p":"org.apache.kafka.common.security.auth","c":"KafkaPrincipal","l":"USER_TYPE"},{"p":"org.apache.kafka.clients.admin","c":"UserScramCredentialAlteration","l":"user()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.Assignment","l":"userData()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.Subscription","l":"userData()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberDescription","l":"userEndpoint()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeUserScramCredentialsResult","l":"users()"},{"p":"org.apache.kafka.clients.admin","c":"UserScramCredentialDeletion","l":"UserScramCredentialDeletion(String, ScramMechanism)","u":"%3Cinit%3E(java.lang.String,org.apache.kafka.clients.admin.ScramMechanism)"},{"p":"org.apache.kafka.clients.admin","c":"UserScramCredentialsDescription","l":"UserScramCredentialsDescription(String, List)","u":"%3Cinit%3E(java.lang.String,java.util.List)"},{"p":"org.apache.kafka.clients.admin","c":"UserScramCredentialUpsertion","l":"UserScramCredentialUpsertion(String, ScramCredentialInfo, byte[])","u":"%3Cinit%3E(java.lang.String,org.apache.kafka.clients.admin.ScramCredentialInfo,byte[])"},{"p":"org.apache.kafka.clients.admin","c":"UserScramCredentialUpsertion","l":"UserScramCredentialUpsertion(String, ScramCredentialInfo, byte[], byte[])","u":"%3Cinit%3E(java.lang.String,org.apache.kafka.clients.admin.ScramCredentialInfo,byte[],byte[])"},{"p":"org.apache.kafka.clients.admin","c":"UserScramCredentialUpsertion","l":"UserScramCredentialUpsertion(String, ScramCredentialInfo, String)","u":"%3Cinit%3E(java.lang.String,org.apache.kafka.clients.admin.ScramCredentialInfo,java.lang.String)"},{"p":"org.apache.kafka.common.serialization","c":"Serdes","l":"UUID()"},{"p":"org.apache.kafka.common","c":"Uuid","l":"Uuid(long, long)","u":"%3Cinit%3E(long,long)"},{"p":"org.apache.kafka.common.serialization","c":"UUIDDeserializer","l":"UUIDDeserializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"Serdes.UUIDSerde","l":"UUIDSerde()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"UUIDSerializer","l":"UUIDSerializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.config","c":"LogLevelConfig","l":"VALID_LOG_LEVELS"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupsResult","l":"valid()"},{"p":"org.apache.kafka.clients.admin","c":"ListGroupsResult","l":"valid()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerExtensionsValidatorCallback","l":"valid(String)","u":"valid(java.lang.String)"},{"p":"org.apache.kafka.connect.data","c":"Struct","l":"validate()"},{"p":"org.apache.kafka.server.policy","c":"AlterConfigPolicy","l":"validate(AlterConfigPolicy.RequestMetadata)","u":"validate(org.apache.kafka.server.policy.AlterConfigPolicy.RequestMetadata)"},{"p":"org.apache.kafka.connect.connector.policy","c":"ConnectorClientConfigOverridePolicy","l":"validate(ConnectorClientConfigRequest)","u":"validate(org.apache.kafka.connect.connector.policy.ConnectorClientConfigRequest)"},{"p":"org.apache.kafka.server.policy","c":"CreateTopicPolicy","l":"validate(CreateTopicPolicy.RequestMetadata)","u":"validate(org.apache.kafka.server.policy.CreateTopicPolicy.RequestMetadata)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"validate(Map)","u":"validate(java.util.Map)"},{"p":"org.apache.kafka.connect.connector","c":"Connector","l":"validate(Map)","u":"validate(java.util.Map)"},{"p":"org.apache.kafka.connect.tools","c":"MockSinkConnector","l":"validate(Map)","u":"validate(java.util.Map)"},{"p":"org.apache.kafka.connect.tools","c":"MockSourceConnector","l":"validate(Map)","u":"validate(java.util.Map)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"BrokerJwtValidator","l":"validate(String)","u":"validate(java.lang.String)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"ClientJwtValidator","l":"validate(String)","u":"validate(java.lang.String)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"DefaultJwtValidator","l":"validate(String)","u":"validate(java.lang.String)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"JwtValidator","l":"validate(String)","u":"validate(java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"validateAll(Map)","u":"validateAll(java.util.Map)"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerExtensionsValidatorCallback","l":"validatedExtensions()"},{"p":"org.apache.kafka.clients.admin","c":"AlterClientQuotasOptions","l":"validateOnly()"},{"p":"org.apache.kafka.clients.admin","c":"CreatePartitionsOptions","l":"validateOnly()"},{"p":"org.apache.kafka.clients.admin","c":"UpdateFeaturesOptions","l":"validateOnly()"},{"p":"org.apache.kafka.clients.admin","c":"AlterClientQuotasOptions","l":"validateOnly(boolean)"},{"p":"org.apache.kafka.clients.admin","c":"AlterConfigsOptions","l":"validateOnly(boolean)"},{"p":"org.apache.kafka.clients.admin","c":"CreatePartitionsOptions","l":"validateOnly(boolean)"},{"p":"org.apache.kafka.clients.admin","c":"CreateTopicsOptions","l":"validateOnly(boolean)"},{"p":"org.apache.kafka.clients.admin","c":"UpdateFeaturesOptions","l":"validateOnly(boolean)"},{"p":"org.apache.kafka.common.metrics","c":"JmxReporter","l":"validateReconfiguration(Map)","u":"validateReconfiguration(java.util.Map)"},{"p":"org.apache.kafka.common.metrics","c":"MetricsReporter","l":"validateReconfiguration(Map)","u":"validateReconfiguration(java.util.Map)"},{"p":"org.apache.kafka.common","c":"Reconfigurable","l":"validateReconfiguration(Map)","u":"validateReconfiguration(java.util.Map)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignmentUtils","l":"validateTaskAssignment(ApplicationState, TaskAssignor.TaskAssignment)","u":"validateTaskAssignment(org.apache.kafka.streams.processor.assignment.ApplicationState,org.apache.kafka.streams.processor.assignment.TaskAssignor.TaskAssignment)"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"validateValue(Object)","u":"validateValue(java.lang.Object)"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"validateValue(Schema, Object)","u":"validateValue(org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"validateValue(String, Schema, Object)","u":"validateValue(java.lang.String,org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ConfigKey","l":"validator"},{"p":"org.apache.kafka.streams.state","c":"VersionedRecord","l":"validTo()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Recommender","l":"validValues(String, Map)","u":"validValues(java.lang.String,java.util.Map)"},{"p":"org.apache.kafka.common","c":"ElectionType","l":"value"},{"p":"org.apache.kafka.streams","c":"KeyValue","l":"value"},{"p":"org.apache.kafka.common.errors","c":"RecordDeserializationException.DeserializationExceptionOrigin","l":"VALUE"},{"p":"org.apache.kafka.connect.storage","c":"ConverterType","l":"VALUE"},{"p":"org.apache.kafka.streams.errors","c":"ProductionExceptionHandler.SerializationExceptionOrigin","l":"VALUE"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"VALUE_DESERIALIZER_CLASS_CONFIG"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerConfig","l":"VALUE_DESERIALIZER_CLASS_DOC"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"VALUE_SCHEMA_V0"},{"p":"org.apache.kafka.connect.mirror","c":"Heartbeat","l":"VALUE_SCHEMA_V0"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"VALUE_SERIALIZER_CLASS_CONFIG"},{"p":"org.apache.kafka.clients.producer","c":"ProducerConfig","l":"VALUE_SERIALIZER_CLASS_DOC"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigSynonym","l":"value()"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry","l":"value()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerRecord","l":"value()"},{"p":"org.apache.kafka.clients.producer","c":"ProducerRecord","l":"value()"},{"p":"org.apache.kafka.common.config","c":"ConfigValue","l":"value()"},{"p":"org.apache.kafka.common.header","c":"Header","l":"value()"},{"p":"org.apache.kafka.common.metrics","c":"QuotaViolationException","l":"value()"},{"p":"org.apache.kafka.common.quota","c":"ClientQuotaAlteration.Op","l":"value()"},{"p":"org.apache.kafka.common.security.oauthbearer","c":"OAuthBearerToken","l":"value()"},{"p":"org.apache.kafka.connect.connector","c":"ConnectRecord","l":"value()"},{"p":"org.apache.kafka.connect.data","c":"SchemaAndValue","l":"value()"},{"p":"org.apache.kafka.connect.header","c":"Header","l":"value()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentMetadata.CustomMetadata","l":"value()"},{"p":"org.apache.kafka.streams.processor.api","c":"FixedKeyRecord","l":"value()"},{"p":"org.apache.kafka.streams.processor.api","c":"Record","l":"value()"},{"p":"org.apache.kafka.streams.state","c":"ValueAndTimestamp","l":"value()"},{"p":"org.apache.kafka.streams.state","c":"VersionedRecord","l":"value()"},{"p":"org.apache.kafka.streams.test","c":"TestRecord","l":"value()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Value","l":"Value()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Histogram","l":"value(double)"},{"p":"org.apache.kafka.common.metrics","c":"Gauge","l":"value(MetricConfig, long)","u":"value(org.apache.kafka.common.metrics.MetricConfig,long)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Percentiles","l":"value(MetricConfig, long, double)","u":"value(org.apache.kafka.common.metrics.MetricConfig,long,double)"},{"p":"org.apache.kafka.common.config","c":"ConfigValue","l":"value(Object)","u":"value(java.lang.Object)"},{"p":"org.apache.kafka.common.errors","c":"RecordDeserializationException","l":"valueBuffer()"},{"p":"org.apache.kafka.streams.state","c":"StateSerdes","l":"valueDeserializer()"},{"p":"org.apache.kafka.streams.state","c":"StateSerdes","l":"valueFrom(byte[])"},{"p":"org.apache.kafka.common","c":"ElectionType","l":"valueOf(byte)"},{"p":"org.apache.kafka.clients.admin","c":"AlterConfigOp.OpType","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigSource","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigType","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"EndpointType","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"FeatureUpdate.UpgradeType","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"ScramMechanism","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.clients.admin","c":"TransactionState","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"AcknowledgeType","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"CloseOptions.GroupMembershipOperation","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.RebalanceProtocol","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"GroupProtocol","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetResetStrategy","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.common.acl","c":"AclPermissionType","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.common","c":"ClassicGroupState","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Importance","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Type","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Width","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"ConfigResource.Type","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"SslClientAuth","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.common","c":"ConsumerGroupState","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.common","c":"ElectionType","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"RecordDeserializationException.DeserializationExceptionOrigin","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.common","c":"GroupState","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.common","c":"GroupType","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.common","c":"IsolationLevel","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.common.metrics","c":"Sensor.RecordingLevel","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.common.metrics.stats","c":"Percentiles.BucketSizing","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.common.resource","c":"PatternType","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.common.resource","c":"ResourceType","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.common.security.auth","c":"SecurityProtocol","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.connect.connector.policy","c":"ConnectorClientConfigRequest.ClientType","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.connect.data","c":"Schema.Type","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.connect.health","c":"ConnectorType","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.connect.source","c":"ConnectorTransactionBoundaries","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.connect.source","c":"ExactlyOnceSupport","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.connect.source","c":"SourceTask.TransactionBoundary","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.connect.storage","c":"ConverterType","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"SubscriptionType","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.server.authorizer","c":"AuthorizationResult","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentState","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemotePartitionDeleteState","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageManager.IndexType","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaEntity.ConfigEntityType","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaType","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"DeserializationExceptionHandler.DeserializationHandlerResponse","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"ProcessingExceptionHandler.ProcessingHandlerResponse","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"ProductionExceptionHandler.ProductionExceptionHandlerResponse","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"ProductionExceptionHandler.SerializationExceptionOrigin","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.streams.errors","c":"StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"GroupProtocol","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"KafkaStreams.State","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"EmitStrategy.StrategyType","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized.StoreType","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsAssignment.AssignedTask.Type","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignor.AssignmentError","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.streams.processor","c":"PunctuationType","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.streams.processor","c":"StandbyUpdateListener.SuspendReason","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.streams.query","c":"FailureReason","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.streams.query","c":"ResultOrder","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.streams","c":"Topology.AutoOffsetReset","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"SslClientAuth","l":"VALUES"},{"p":"org.apache.kafka.clients.admin","c":"AlterClientQuotasResult","l":"values()"},{"p":"org.apache.kafka.clients.admin","c":"AlterConfigOp.OpType","l":"values()"},{"p":"org.apache.kafka.clients.admin","c":"AlterConfigsResult","l":"values()"},{"p":"org.apache.kafka.clients.admin","c":"AlterPartitionReassignmentsResult","l":"values()"},{"p":"org.apache.kafka.clients.admin","c":"AlterReplicaLogDirsResult","l":"values()"},{"p":"org.apache.kafka.clients.admin","c":"AlterUserScramCredentialsResult","l":"values()"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigSource","l":"values()"},{"p":"org.apache.kafka.clients.admin","c":"ConfigEntry.ConfigType","l":"values()"},{"p":"org.apache.kafka.clients.admin","c":"CreateAclsResult","l":"values()"},{"p":"org.apache.kafka.clients.admin","c":"CreatePartitionsResult","l":"values()"},{"p":"org.apache.kafka.clients.admin","c":"CreateTopicsResult","l":"values()"},{"p":"org.apache.kafka.clients.admin","c":"DeleteAclsResult.FilterResults","l":"values()"},{"p":"org.apache.kafka.clients.admin","c":"DeleteAclsResult","l":"values()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeAclsResult","l":"values()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeConfigsResult","l":"values()"},{"p":"org.apache.kafka.clients.admin","c":"DescribeReplicaLogDirsResult","l":"values()"},{"p":"org.apache.kafka.clients.admin","c":"EndpointType","l":"values()"},{"p":"org.apache.kafka.clients.admin","c":"FeatureUpdate.UpgradeType","l":"values()"},{"p":"org.apache.kafka.clients.admin","c":"ScramMechanism","l":"values()"},{"p":"org.apache.kafka.clients.admin","c":"TransactionState","l":"values()"},{"p":"org.apache.kafka.clients.admin","c":"UpdateFeaturesResult","l":"values()"},{"p":"org.apache.kafka.clients.consumer","c":"AcknowledgeType","l":"values()"},{"p":"org.apache.kafka.clients.consumer","c":"CloseOptions.GroupMembershipOperation","l":"values()"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor.RebalanceProtocol","l":"values()"},{"p":"org.apache.kafka.clients.consumer","c":"GroupProtocol","l":"values()"},{"p":"org.apache.kafka.clients.consumer","c":"OffsetResetStrategy","l":"values()"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"values()"},{"p":"org.apache.kafka.common.acl","c":"AclPermissionType","l":"values()"},{"p":"org.apache.kafka.common","c":"ClassicGroupState","l":"values()"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"values()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Importance","l":"values()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Type","l":"values()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Width","l":"values()"},{"p":"org.apache.kafka.common.config","c":"ConfigResource.Type","l":"values()"},{"p":"org.apache.kafka.common.config","c":"SslClientAuth","l":"values()"},{"p":"org.apache.kafka.common","c":"ConsumerGroupState","l":"values()"},{"p":"org.apache.kafka.common","c":"ElectionType","l":"values()"},{"p":"org.apache.kafka.common.errors","c":"RecordDeserializationException.DeserializationExceptionOrigin","l":"values()"},{"p":"org.apache.kafka.common","c":"GroupState","l":"values()"},{"p":"org.apache.kafka.common","c":"GroupType","l":"values()"},{"p":"org.apache.kafka.common","c":"IsolationLevel","l":"values()"},{"p":"org.apache.kafka.common.metrics","c":"Sensor.RecordingLevel","l":"values()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Percentiles.BucketSizing","l":"values()"},{"p":"org.apache.kafka.common.resource","c":"PatternType","l":"values()"},{"p":"org.apache.kafka.common.resource","c":"ResourceType","l":"values()"},{"p":"org.apache.kafka.common.security.auth","c":"SecurityProtocol","l":"values()"},{"p":"org.apache.kafka.connect.connector.policy","c":"ConnectorClientConfigRequest.ClientType","l":"values()"},{"p":"org.apache.kafka.connect.data","c":"Schema.Type","l":"values()"},{"p":"org.apache.kafka.connect.health","c":"ConnectorType","l":"values()"},{"p":"org.apache.kafka.connect.source","c":"ConnectorTransactionBoundaries","l":"values()"},{"p":"org.apache.kafka.connect.source","c":"ExactlyOnceSupport","l":"values()"},{"p":"org.apache.kafka.connect.source","c":"SourceTask.TransactionBoundary","l":"values()"},{"p":"org.apache.kafka.connect.storage","c":"ConverterType","l":"values()"},{"p":"org.apache.kafka.coordinator.group.api.assignor","c":"SubscriptionType","l":"values()"},{"p":"org.apache.kafka.server.authorizer","c":"AuthorizationResult","l":"values()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteLogSegmentState","l":"values()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemotePartitionDeleteState","l":"values()"},{"p":"org.apache.kafka.server.log.remote.storage","c":"RemoteStorageManager.IndexType","l":"values()"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaEntity.ConfigEntityType","l":"values()"},{"p":"org.apache.kafka.server.quota","c":"ClientQuotaType","l":"values()"},{"p":"org.apache.kafka.streams.errors","c":"DeserializationExceptionHandler.DeserializationHandlerResponse","l":"values()"},{"p":"org.apache.kafka.streams.errors","c":"ProcessingExceptionHandler.ProcessingHandlerResponse","l":"values()"},{"p":"org.apache.kafka.streams.errors","c":"ProductionExceptionHandler.ProductionExceptionHandlerResponse","l":"values()"},{"p":"org.apache.kafka.streams.errors","c":"ProductionExceptionHandler.SerializationExceptionOrigin","l":"values()"},{"p":"org.apache.kafka.streams.errors","c":"StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse","l":"values()"},{"p":"org.apache.kafka.streams","c":"GroupProtocol","l":"values()"},{"p":"org.apache.kafka.streams","c":"KafkaStreams.State","l":"values()"},{"p":"org.apache.kafka.streams.kstream","c":"EmitStrategy.StrategyType","l":"values()"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized.StoreType","l":"values()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsAssignment.AssignedTask.Type","l":"values()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignor.AssignmentError","l":"values()"},{"p":"org.apache.kafka.streams.processor","c":"PunctuationType","l":"values()"},{"p":"org.apache.kafka.streams.processor","c":"StandbyUpdateListener.SuspendReason","l":"values()"},{"p":"org.apache.kafka.streams.query","c":"FailureReason","l":"values()"},{"p":"org.apache.kafka.streams.query","c":"ResultOrder","l":"values()"},{"p":"org.apache.kafka.streams","c":"Topology.AutoOffsetReset","l":"values()"},{"p":"org.apache.kafka.connect.data","c":"Values","l":"Values()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.connector","c":"ConnectRecord","l":"valueSchema()"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"valueSchema()"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"valueSchema()"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"valueSchema()"},{"p":"org.apache.kafka.streams.kstream","c":"Joined","l":"valueSerde()"},{"p":"org.apache.kafka.streams.processor.api","c":"MockProcessorContext","l":"valueSerde()"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessingContext","l":"valueSerde()"},{"p":"org.apache.kafka.streams.processor","c":"MockProcessorContext","l":"valueSerde()"},{"p":"org.apache.kafka.streams.processor","c":"ProcessorContext","l":"valueSerde()"},{"p":"org.apache.kafka.streams.processor","c":"StateStoreContext","l":"valueSerde()"},{"p":"org.apache.kafka.streams.state","c":"StateSerdes","l":"valueSerde()"},{"p":"org.apache.kafka.streams.kstream","c":"Grouped","l":"valueSerde(Serde)","u":"valueSerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Produced","l":"valueSerde(Serde)","u":"valueSerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Joined","l":"valueSerde(Serde)","u":"valueSerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.state","c":"StateSerdes","l":"valueSerializer()"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"valuesWithPrefixAllOrNothing(String)","u":"valuesWithPrefixAllOrNothing(java.lang.String)"},{"p":"org.apache.kafka.common.config","c":"AbstractConfig","l":"valuesWithPrefixOverride(String)","u":"valuesWithPrefixOverride(java.lang.String)"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSinkConnector","l":"VerifiableSinkConnector()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSinkTask","l":"VerifiableSinkTask()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSourceConnector","l":"VerifiableSourceConnector()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSourceTask","l":"VerifiableSourceTask()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"verifyTopologyOptimizationConfigs(String)","u":"verifyTopologyOptimizationConfigs(java.lang.String)"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"VERSION"},{"p":"org.apache.kafka.connect.mirror","c":"Heartbeat","l":"VERSION"},{"p":"org.apache.kafka.connect.mirror","c":"Checkpoint","l":"VERSION_KEY"},{"p":"org.apache.kafka.connect.mirror","c":"Heartbeat","l":"VERSION_KEY"},{"p":"org.apache.kafka.clients.consumer","c":"ConsumerPartitionAssignor","l":"version()"},{"p":"org.apache.kafka.connect.components","c":"Versioned","l":"version()"},{"p":"org.apache.kafka.connect.connector","c":"Task","l":"version()"},{"p":"org.apache.kafka.connect.data","c":"ConnectSchema","l":"version()"},{"p":"org.apache.kafka.connect.data","c":"Schema","l":"version()"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"version()"},{"p":"org.apache.kafka.connect.storage","c":"SimpleHeaderConverter","l":"version()"},{"p":"org.apache.kafka.connect.storage","c":"StringConverter","l":"version()"},{"p":"org.apache.kafka.connect.tools","c":"MockConnector","l":"version()"},{"p":"org.apache.kafka.connect.tools","c":"MockSinkConnector","l":"version()"},{"p":"org.apache.kafka.connect.tools","c":"MockSinkTask","l":"version()"},{"p":"org.apache.kafka.connect.tools","c":"MockSourceConnector","l":"version()"},{"p":"org.apache.kafka.connect.tools","c":"MockSourceTask","l":"version()"},{"p":"org.apache.kafka.connect.tools","c":"SchemaSourceConnector","l":"version()"},{"p":"org.apache.kafka.connect.tools","c":"SchemaSourceTask","l":"version()"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSinkConnector","l":"version()"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSinkTask","l":"version()"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSourceConnector","l":"version()"},{"p":"org.apache.kafka.connect.tools","c":"VerifiableSourceTask","l":"version()"},{"p":"org.apache.kafka.connect.data","c":"SchemaBuilder","l":"version(Integer)","u":"version(java.lang.Integer)"},{"p":"org.apache.kafka.streams.state","c":"Stores","l":"versionedKeyValueStoreBuilder(VersionedBytesStoreSupplier, Serde, Serde)","u":"versionedKeyValueStoreBuilder(org.apache.kafka.streams.state.VersionedBytesStoreSupplier,org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.state","c":"VersionedRecord","l":"VersionedRecord(V, long)","u":"%3Cinit%3E(V,long)"},{"p":"org.apache.kafka.streams.state","c":"VersionedRecord","l":"VersionedRecord(V, long, long)","u":"%3Cinit%3E(V,long,long)"},{"p":"org.apache.kafka.common.config","c":"ConfigValue","l":"visible()"},{"p":"org.apache.kafka.common.config","c":"ConfigValue","l":"visible(boolean)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.Recommender","l":"visible(String, Map)","u":"visible(java.lang.String,java.util.Map)"},{"p":"org.apache.kafka.common.serialization","c":"Serdes","l":"Void()"},{"p":"org.apache.kafka.common.serialization","c":"VoidDeserializer","l":"VoidDeserializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"Serdes.VoidSerde","l":"VoidSerde()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.serialization","c":"VoidSerializer","l":"VoidSerializer()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.errors","c":"VoterNotFoundException","l":"VoterNotFoundException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"org.apache.kafka.common.errors","c":"VoterNotFoundException","l":"VoterNotFoundException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"org.apache.kafka.clients.admin","c":"QuorumInfo","l":"voters()"},{"p":"org.apache.kafka.clients.consumer","c":"Consumer","l":"wakeup()"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaConsumer","l":"wakeup()"},{"p":"org.apache.kafka.clients.consumer","c":"KafkaShareConsumer","l":"wakeup()"},{"p":"org.apache.kafka.clients.consumer","c":"MockConsumer","l":"wakeup()"},{"p":"org.apache.kafka.clients.consumer","c":"MockShareConsumer","l":"wakeup()"},{"p":"org.apache.kafka.clients.consumer","c":"ShareConsumer","l":"wakeup()"},{"p":"org.apache.kafka.common.errors","c":"WakeupException","l":"WakeupException()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.processor","c":"PunctuationType","l":"WALL_CLOCK_TIME"},{"p":"org.apache.kafka.streams.processor","c":"WallclockTimestampExtractor","l":"WallclockTimestampExtractor()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.clients.admin","c":"StreamsGroupMemberAssignment","l":"warmupTasks()"},{"p":"org.apache.kafka.common.config","c":"LogLevelConfig","l":"WARN_LOG_LEVEL"},{"p":"org.apache.kafka.common","c":"KafkaFuture","l":"whenComplete(KafkaFuture.BiConsumer)","u":"whenComplete(org.apache.kafka.common.KafkaFuture.BiConsumer)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.ConfigKey","l":"width"},{"p":"org.apache.kafka.common.resource","c":"ResourcePattern","l":"WILDCARD_RESOURCE"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedDeserializer","l":"WINDOW_SIZE_MS_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"WINDOW_SIZE_MS_CONFIG"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG"},{"p":"org.apache.kafka.streams.kstream","c":"Windowed","l":"window()"},{"p":"org.apache.kafka.streams.kstream","c":"Window","l":"Window(long, long)","u":"%3Cinit%3E(long,long)"},{"p":"org.apache.kafka.streams","c":"StreamsConfig","l":"WINDOWED_INNER_CLASS_SERDE"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedDeserializer","l":"WINDOWED_INNER_DESERIALIZER_CLASS"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedDeserializer","l":"WINDOWED_INNER_DESERIALIZER_CLASS"},{"p":"org.apache.kafka.streams.kstream","c":"SessionWindowedSerializer","l":"WINDOWED_INNER_SERIALIZER_CLASS"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindowedSerializer","l":"WINDOWED_INNER_SERIALIZER_CLASS"},{"p":"org.apache.kafka.streams.kstream","c":"Windowed","l":"Windowed(K, Window)","u":"%3Cinit%3E(K,org.apache.kafka.streams.kstream.Window)"},{"p":"org.apache.kafka.streams.kstream","c":"CogroupedKStream","l":"windowedBy(SessionWindows)","u":"windowedBy(org.apache.kafka.streams.kstream.SessionWindows)"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedStream","l":"windowedBy(SessionWindows)","u":"windowedBy(org.apache.kafka.streams.kstream.SessionWindows)"},{"p":"org.apache.kafka.streams.kstream","c":"CogroupedKStream","l":"windowedBy(SlidingWindows)","u":"windowedBy(org.apache.kafka.streams.kstream.SlidingWindows)"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedStream","l":"windowedBy(SlidingWindows)","u":"windowedBy(org.apache.kafka.streams.kstream.SlidingWindows)"},{"p":"org.apache.kafka.streams.kstream","c":"CogroupedKStream","l":"windowedBy(Windows)","u":"windowedBy(org.apache.kafka.streams.kstream.Windows)"},{"p":"org.apache.kafka.streams.kstream","c":"KGroupedStream","l":"windowedBy(Windows)","u":"windowedBy(org.apache.kafka.streams.kstream.Windows)"},{"p":"org.apache.kafka.common.metrics.stats","c":"WindowedCount","l":"WindowedCount()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.kstream","c":"WindowedSerdes","l":"WindowedSerdes()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.common.metrics.stats","c":"WindowedSum","l":"WindowedSum()","u":"%3Cinit%3E()"},{"p":"org.apache.kafka.streams.kstream","c":"JoinWindows","l":"windowsFor(long)"},{"p":"org.apache.kafka.streams.kstream","c":"TimeWindows","l":"windowsFor(long)"},{"p":"org.apache.kafka.streams.kstream","c":"UnlimitedWindows","l":"windowsFor(long)"},{"p":"org.apache.kafka.streams.kstream","c":"Windows","l":"windowsFor(long)"},{"p":"org.apache.kafka.streams.state","c":"DslWindowParams","l":"windowSize()"},{"p":"org.apache.kafka.streams.state","c":"WindowBytesStoreSupplier","l":"windowSize()"},{"p":"org.apache.kafka.common.metrics.stats","c":"Rate","l":"windowSize(MetricConfig, long)","u":"windowSize(org.apache.kafka.common.metrics.MetricConfig,long)"},{"p":"org.apache.kafka.common.metrics.stats","c":"SimpleRate","l":"windowSize(MetricConfig, long)","u":"windowSize(org.apache.kafka.common.metrics.MetricConfig,long)"},{"p":"org.apache.kafka.streams.state","c":"QueryableStoreTypes","l":"windowStore()"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized.StoreType","l":"windowStore(DslWindowParams)","u":"windowStore(org.apache.kafka.streams.state.DslWindowParams)"},{"p":"org.apache.kafka.streams.state","c":"BuiltInDslStoreSuppliers.InMemoryDslStoreSuppliers","l":"windowStore(DslWindowParams)","u":"windowStore(org.apache.kafka.streams.state.DslWindowParams)"},{"p":"org.apache.kafka.streams.state","c":"BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers","l":"windowStore(DslWindowParams)","u":"windowStore(org.apache.kafka.streams.state.DslWindowParams)"},{"p":"org.apache.kafka.streams.state","c":"DslStoreSuppliers","l":"windowStore(DslWindowParams)","u":"windowStore(org.apache.kafka.streams.state.DslWindowParams)"},{"p":"org.apache.kafka.streams.state","c":"Stores","l":"windowStoreBuilder(WindowBytesStoreSupplier, Serde, Serde)","u":"windowStoreBuilder(org.apache.kafka.streams.state.WindowBytesStoreSupplier,org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Consumed","l":"with(AutoOffsetReset)","u":"with(org.apache.kafka.streams.AutoOffsetReset)"},{"p":"org.apache.kafka.common.config","c":"ConfigDef.LambdaValidator","l":"with(BiConsumer, Supplier)","u":"with(java.util.function.BiConsumer,java.util.function.Supplier)"},{"p":"org.apache.kafka.streams.kstream","c":"StreamJoined","l":"with(DslStoreSuppliers)","u":"with(org.apache.kafka.streams.state.DslStoreSuppliers)"},{"p":"org.apache.kafka.connect.header","c":"Header","l":"with(Schema, Object)","u":"with(org.apache.kafka.connect.data.Schema,java.lang.Object)"},{"p":"org.apache.kafka.streams.kstream","c":"Consumed","l":"with(Serde, Serde)","u":"with(org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Grouped","l":"with(Serde, Serde)","u":"with(org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized","l":"with(Serde, Serde)","u":"with(org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Produced","l":"with(Serde, Serde)","u":"with(org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Repartitioned","l":"with(Serde, Serde)","u":"with(org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Produced","l":"with(Serde, Serde, StreamPartitioner)","u":"with(org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde,org.apache.kafka.streams.processor.StreamPartitioner)"},{"p":"org.apache.kafka.streams.kstream","c":"Consumed","l":"with(Serde, Serde, TimestampExtractor, AutoOffsetReset)","u":"with(org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde,org.apache.kafka.streams.processor.TimestampExtractor,org.apache.kafka.streams.AutoOffsetReset)"},{"p":"org.apache.kafka.streams.kstream","c":"Consumed","l":"with(Serde, Serde, TimestampExtractor, Topology.AutoOffsetReset)","u":"with(org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde,org.apache.kafka.streams.processor.TimestampExtractor,org.apache.kafka.streams.Topology.AutoOffsetReset)"},{"p":"org.apache.kafka.streams.kstream","c":"StreamJoined","l":"with(Serde, Serde, Serde)","u":"with(org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Joined","l":"with(Serde, Serde, Serde)","u":"with(org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Joined","l":"with(Serde, Serde, Serde, String)","u":"with(org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde,java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Joined","l":"with(Serde, Serde, Serde, String, Duration)","u":"with(org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde,java.lang.String,java.time.Duration)"},{"p":"org.apache.kafka.streams.kstream","c":"TableJoined","l":"with(StreamPartitioner, StreamPartitioner)","u":"with(org.apache.kafka.streams.processor.StreamPartitioner,org.apache.kafka.streams.processor.StreamPartitioner)"},{"p":"org.apache.kafka.streams.kstream","c":"Grouped","l":"with(String, Serde, Serde)","u":"with(java.lang.String,org.apache.kafka.common.serialization.Serde,org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Consumed","l":"with(TimestampExtractor)","u":"with(org.apache.kafka.streams.processor.TimestampExtractor)"},{"p":"org.apache.kafka.streams.kstream","c":"Consumed","l":"with(Topology.AutoOffsetReset)","u":"with(org.apache.kafka.streams.Topology.AutoOffsetReset)"},{"p":"org.apache.kafka.streams.kstream","c":"StreamJoined","l":"with(WindowBytesStoreSupplier, WindowBytesStoreSupplier)","u":"with(org.apache.kafka.streams.state.WindowBytesStoreSupplier,org.apache.kafka.streams.state.WindowBytesStoreSupplier)"},{"p":"org.apache.kafka.streams.query","c":"StateQueryRequest","l":"withAllPartitions()"},{"p":"org.apache.kafka.streams.query","c":"RangeQuery","l":"withAscendingKeys()"},{"p":"org.apache.kafka.streams.query","c":"TimestampedRangeQuery","l":"withAscendingKeys()"},{"p":"org.apache.kafka.streams.query","c":"MultiVersionedKeyQuery","l":"withAscendingTimestamps()"},{"p":"org.apache.kafka.streams.state","c":"StateSerdes","l":"withBuiltinTypes(String, Class, Class)","u":"withBuiltinTypes(java.lang.String,java.lang.Class,java.lang.Class)"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized","l":"withCachingDisabled()"},{"p":"org.apache.kafka.streams.state","c":"StoreBuilder","l":"withCachingDisabled()"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized","l":"withCachingEnabled()"},{"p":"org.apache.kafka.streams.state","c":"StoreBuilder","l":"withCachingEnabled()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"withClientSaslSupport()"},{"p":"org.apache.kafka.common.config","c":"ConfigDef","l":"withClientSslSupport()"},{"p":"org.apache.kafka.streams.query","c":"Position","l":"withComponent(String, int, long)","u":"withComponent(java.lang.String,int,long)"},{"p":"org.apache.kafka.streams.kstream","c":"Branched","l":"withConsumer(Consumer>, String)","u":"withConsumer(java.util.function.Consumer,java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Branched","l":"withConsumer(Consumer>)","u":"withConsumer(java.util.function.Consumer)"},{"p":"org.apache.kafka.streams.query","c":"RangeQuery","l":"withDescendingKeys()"},{"p":"org.apache.kafka.streams.query","c":"TimestampedRangeQuery","l":"withDescendingKeys()"},{"p":"org.apache.kafka.streams.query","c":"MultiVersionedKeyQuery","l":"withDescendingTimestamps()"},{"p":"org.apache.kafka.streams.kstream","c":"StreamJoined","l":"withDslStoreSuppliers(DslStoreSuppliers)","u":"withDslStoreSuppliers(org.apache.kafka.streams.state.DslStoreSuppliers)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"KafkaStreamsAssignment","l":"withFollowupRebalance(Instant)","u":"withFollowupRebalance(java.time.Instant)"},{"p":"org.apache.kafka.streams.kstream","c":"Branched","l":"withFunction(Function, ? extends KStream>)","u":"withFunction(java.util.function.Function)"},{"p":"org.apache.kafka.streams.kstream","c":"Branched","l":"withFunction(Function, ? extends KStream>, String)","u":"withFunction(java.util.function.Function,java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Joined","l":"withGracePeriod(Duration)","u":"withGracePeriod(java.time.Duration)"},{"p":"org.apache.kafka.clients.consumer","c":"CloseOptions","l":"withGroupMembershipOperation(CloseOptions.GroupMembershipOperation)","u":"withGroupMembershipOperation(org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation)"},{"p":"org.apache.kafka.streams.processor.api","c":"FixedKeyRecord","l":"withHeaders(Headers)","u":"withHeaders(org.apache.kafka.common.header.Headers)"},{"p":"org.apache.kafka.streams.processor.api","c":"Record","l":"withHeaders(Headers)","u":"withHeaders(org.apache.kafka.common.header.Headers)"},{"p":"org.apache.kafka.streams.query","c":"KeyQuery","l":"withKey(K)"},{"p":"org.apache.kafka.streams.query","c":"MultiVersionedKeyQuery","l":"withKey(K)"},{"p":"org.apache.kafka.streams.query","c":"TimestampedKeyQuery","l":"withKey(K)"},{"p":"org.apache.kafka.streams.query","c":"VersionedKeyQuery","l":"withKey(K)"},{"p":"org.apache.kafka.streams.query","c":"WindowRangeQuery","l":"withKey(K)"},{"p":"org.apache.kafka.streams.processor.api","c":"Record","l":"withKey(NewK)"},{"p":"org.apache.kafka.streams.query","c":"WindowKeyQuery","l":"withKeyAndWindowStartRange(K, Instant, Instant)","u":"withKeyAndWindowStartRange(K,java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.streams.kstream","c":"Consumed","l":"withKeySerde(Serde)","u":"withKeySerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Grouped","l":"withKeySerde(Serde)","u":"withKeySerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Joined","l":"withKeySerde(Serde)","u":"withKeySerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized","l":"withKeySerde(Serde)","u":"withKeySerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Produced","l":"withKeySerde(Serde)","u":"withKeySerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Repartitioned","l":"withKeySerde(Serde)","u":"withKeySerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"StreamJoined","l":"withKeySerde(Serde)","u":"withKeySerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Printed","l":"withKeyValueMapper(KeyValueMapper)","u":"withKeyValueMapper(org.apache.kafka.streams.kstream.KeyValueMapper)"},{"p":"org.apache.kafka.streams.kstream","c":"Printed","l":"withLabel(String)","u":"withLabel(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized","l":"withLoggingDisabled()"},{"p":"org.apache.kafka.streams.kstream","c":"StreamJoined","l":"withLoggingDisabled()"},{"p":"org.apache.kafka.streams.kstream","c":"Suppressed.BufferConfig","l":"withLoggingDisabled()"},{"p":"org.apache.kafka.streams.state","c":"StoreBuilder","l":"withLoggingDisabled()"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized","l":"withLoggingEnabled(Map)","u":"withLoggingEnabled(java.util.Map)"},{"p":"org.apache.kafka.streams.kstream","c":"StreamJoined","l":"withLoggingEnabled(Map)","u":"withLoggingEnabled(java.util.Map)"},{"p":"org.apache.kafka.streams.kstream","c":"Suppressed.BufferConfig","l":"withLoggingEnabled(Map)","u":"withLoggingEnabled(java.util.Map)"},{"p":"org.apache.kafka.streams.state","c":"StoreBuilder","l":"withLoggingEnabled(Map)","u":"withLoggingEnabled(java.util.Map)"},{"p":"org.apache.kafka.streams.query","c":"RangeQuery","l":"withLowerBound(K)"},{"p":"org.apache.kafka.streams.query","c":"TimestampedRangeQuery","l":"withLowerBound(K)"},{"p":"org.apache.kafka.streams.kstream","c":"Suppressed.BufferConfig","l":"withMaxBytes(long)"},{"p":"org.apache.kafka.streams.kstream","c":"Suppressed.BufferConfig","l":"withMaxRecords(long)"},{"p":"org.apache.kafka.connect.storage","c":"ConverterType","l":"withName(String)","u":"withName(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Branched","l":"withName(String)","u":"withName(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Consumed","l":"withName(String)","u":"withName(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Grouped","l":"withName(String)","u":"withName(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Joined","l":"withName(String)","u":"withName(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Named","l":"withName(String)","u":"withName(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Printed","l":"withName(String)","u":"withName(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Produced","l":"withName(String)","u":"withName(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Repartitioned","l":"withName(String)","u":"withName(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"StreamJoined","l":"withName(String)","u":"withName(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Suppressed","l":"withName(String)","u":"withName(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"TableJoined","l":"withName(String)","u":"withName(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Suppressed.BufferConfig","l":"withNoBound()"},{"p":"org.apache.kafka.streams.query","c":"RangeQuery","l":"withNoBounds()"},{"p":"org.apache.kafka.streams.query","c":"TimestampedRangeQuery","l":"withNoBounds()"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignmentUtils.RackAwareOptimizationParams","l":"withNonOverlapCostOverride(int)"},{"p":"org.apache.kafka.streams.kstream","c":"Repartitioned","l":"withNumberOfPartitions(int)"},{"p":"org.apache.kafka.streams.kstream","c":"Consumed","l":"withOffsetResetPolicy(AutoOffsetReset)","u":"withOffsetResetPolicy(org.apache.kafka.streams.AutoOffsetReset)"},{"p":"org.apache.kafka.streams.kstream","c":"Consumed","l":"withOffsetResetPolicy(Topology.AutoOffsetReset)","u":"withOffsetResetPolicy(org.apache.kafka.streams.Topology.AutoOffsetReset)"},{"p":"org.apache.kafka.streams.kstream","c":"TableJoined","l":"withOtherPartitioner(StreamPartitioner)","u":"withOtherPartitioner(org.apache.kafka.streams.processor.StreamPartitioner)"},{"p":"org.apache.kafka.streams.kstream","c":"StreamJoined","l":"withOtherStoreSupplier(WindowBytesStoreSupplier)","u":"withOtherStoreSupplier(org.apache.kafka.streams.state.WindowBytesStoreSupplier)"},{"p":"org.apache.kafka.streams.kstream","c":"StreamJoined","l":"withOtherValueSerde(Serde)","u":"withOtherValueSerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Joined","l":"withOtherValueSerde(Serde)","u":"withOtherValueSerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams","c":"StoreQueryParameters","l":"withPartition(Integer)","u":"withPartition(java.lang.Integer)"},{"p":"org.apache.kafka.streams.kstream","c":"TableJoined","l":"withPartitioner(StreamPartitioner)","u":"withPartitioner(org.apache.kafka.streams.processor.StreamPartitioner)"},{"p":"org.apache.kafka.common","c":"Cluster","l":"withPartitions(Map)","u":"withPartitions(java.util.Map)"},{"p":"org.apache.kafka.streams.query","c":"StateQueryRequest","l":"withPartitions(Set)","u":"withPartitions(java.util.Set)"},{"p":"org.apache.kafka.common.metrics","c":"Monitorable","l":"withPluginMetrics(PluginMetrics)","u":"withPluginMetrics(org.apache.kafka.common.metrics.PluginMetrics)"},{"p":"org.apache.kafka.streams.query","c":"StateQueryRequest","l":"withPositionBound(PositionBound)","u":"withPositionBound(org.apache.kafka.streams.query.PositionBound)"},{"p":"org.apache.kafka.clients.admin","c":"ListGroupsOptions","l":"withProtocolTypes(Set)","u":"withProtocolTypes(java.util.Set)"},{"p":"org.apache.kafka.streams.query","c":"StateQueryRequest.InStore","l":"withQuery(Query)","u":"withQuery(org.apache.kafka.streams.query.Query)"},{"p":"org.apache.kafka.streams.query","c":"RangeQuery","l":"withRange(K, K)","u":"withRange(K,K)"},{"p":"org.apache.kafka.streams.query","c":"TimestampedRangeQuery","l":"withRange(K, K)","u":"withRange(K,K)"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized","l":"withRetention(Duration)","u":"withRetention(java.time.Duration)"},{"p":"org.apache.kafka.streams.kstream","c":"StreamJoined","l":"withStoreName(String)","u":"withStoreName(java.lang.String)"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized","l":"withStoreType(DslStoreSuppliers)","u":"withStoreType(org.apache.kafka.streams.state.DslStoreSuppliers)"},{"p":"org.apache.kafka.streams.kstream","c":"Produced","l":"withStreamPartitioner(StreamPartitioner)","u":"withStreamPartitioner(org.apache.kafka.streams.processor.StreamPartitioner)"},{"p":"org.apache.kafka.streams.kstream","c":"Repartitioned","l":"withStreamPartitioner(StreamPartitioner)","u":"withStreamPartitioner(org.apache.kafka.streams.processor.StreamPartitioner)"},{"p":"org.apache.kafka.streams.kstream","c":"StreamJoined","l":"withThisStoreSupplier(WindowBytesStoreSupplier)","u":"withThisStoreSupplier(org.apache.kafka.streams.state.WindowBytesStoreSupplier)"},{"p":"org.apache.kafka.clients.consumer","c":"CloseOptions","l":"withTimeout(Duration)","u":"withTimeout(java.time.Duration)"},{"p":"org.apache.kafka.streams.processor.api","c":"FixedKeyRecord","l":"withTimestamp(long)"},{"p":"org.apache.kafka.streams.processor.api","c":"Record","l":"withTimestamp(long)"},{"p":"org.apache.kafka.streams.processor","c":"To","l":"withTimestamp(long)"},{"p":"org.apache.kafka.streams.kstream","c":"Consumed","l":"withTimestampExtractor(TimestampExtractor)","u":"withTimestampExtractor(org.apache.kafka.streams.processor.TimestampExtractor)"},{"p":"org.apache.kafka.streams.processor.assignment","c":"TaskAssignmentUtils.RackAwareOptimizationParams","l":"withTrafficCostOverride(int)"},{"p":"org.apache.kafka.clients.admin","c":"ListConsumerGroupsOptions","l":"withTypes(Set)","u":"withTypes(java.util.Set)"},{"p":"org.apache.kafka.clients.admin","c":"ListGroupsOptions","l":"withTypes(Set)","u":"withTypes(java.util.Set)"},{"p":"org.apache.kafka.streams.query","c":"RangeQuery","l":"withUpperBound(K)"},{"p":"org.apache.kafka.streams.query","c":"TimestampedRangeQuery","l":"withUpperBound(K)"},{"p":"org.apache.kafka.streams.processor.api","c":"FixedKeyRecord","l":"withValue(NewV)"},{"p":"org.apache.kafka.streams.processor.api","c":"Record","l":"withValue(NewV)"},{"p":"org.apache.kafka.streams.kstream","c":"Consumed","l":"withValueSerde(Serde)","u":"withValueSerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Grouped","l":"withValueSerde(Serde)","u":"withValueSerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Materialized","l":"withValueSerde(Serde)","u":"withValueSerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Produced","l":"withValueSerde(Serde)","u":"withValueSerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Repartitioned","l":"withValueSerde(Serde)","u":"withValueSerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"StreamJoined","l":"withValueSerde(Serde)","u":"withValueSerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.kstream","c":"Joined","l":"withValueSerde(Serde)","u":"withValueSerde(org.apache.kafka.common.serialization.Serde)"},{"p":"org.apache.kafka.streams.query","c":"WindowRangeQuery","l":"withWindowStartRange(Instant, Instant)","u":"withWindowStartRange(java.time.Instant,java.time.Instant)"},{"p":"org.apache.kafka.connect.health","c":"AbstractState","l":"workerId()"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessorWrapper","l":"wrapFixedKeyProcessorSupplier(String, FixedKeyProcessorSupplier)","u":"wrapFixedKeyProcessorSupplier(java.lang.String,org.apache.kafka.streams.processor.api.FixedKeyProcessorSupplier)"},{"p":"org.apache.kafka.common.serialization","c":"Serdes.WrapperSerde","l":"WrapperSerde(Serializer, Deserializer)","u":"%3Cinit%3E(org.apache.kafka.common.serialization.Serializer,org.apache.kafka.common.serialization.Deserializer)"},{"p":"org.apache.kafka.streams.processor.api","c":"ProcessorWrapper","l":"wrapProcessorSupplier(String, ProcessorSupplier)","u":"wrapProcessorSupplier(java.lang.String,org.apache.kafka.streams.processor.api.ProcessorSupplier)"},{"p":"org.apache.kafka.common.acl","c":"AclOperation","l":"WRITE"},{"p":"org.apache.kafka.common","c":"MessageFormatter","l":"writeTo(ConsumerRecord, PrintStream)","u":"writeTo(org.apache.kafka.clients.consumer.ConsumerRecord,java.io.PrintStream)"},{"p":"org.apache.kafka.common","c":"Uuid","l":"ZERO_UUID"}];updateSearchResults(); \ No newline at end of file diff --git a/static/41/javadoc/module-search-index.js b/static/41/javadoc/module-search-index.js new file mode 100644 index 000000000..0d59754fc --- /dev/null +++ b/static/41/javadoc/module-search-index.js @@ -0,0 +1 @@ +moduleSearchIndex = [];updateSearchResults(); \ No newline at end of file diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AbortTransactionOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/AbortTransactionOptions.html new file mode 100644 index 000000000..61055879d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AbortTransactionOptions.html @@ -0,0 +1,167 @@ + + + + +AbortTransactionOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AbortTransactionOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<AbortTransactionOptions> +
org.apache.kafka.clients.admin.AbortTransactionOptions
+
+
+
+
+
public class AbortTransactionOptions +extends AbstractOptions<AbortTransactionOptions>
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      AbortTransactionOptions

      +
      public AbortTransactionOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    + +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AbortTransactionResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/AbortTransactionResult.html new file mode 100644 index 000000000..ec6625987 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AbortTransactionResult.html @@ -0,0 +1,143 @@ + + + + +AbortTransactionResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AbortTransactionResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbortTransactionResult
+
+
+
+
public class AbortTransactionResult +extends Object
+ +
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AbortTransactionSpec.html b/static/41/javadoc/org/apache/kafka/clients/admin/AbortTransactionSpec.html new file mode 100644 index 000000000..c5d3a35d9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AbortTransactionSpec.html @@ -0,0 +1,230 @@ + + + + +AbortTransactionSpec (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AbortTransactionSpec

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbortTransactionSpec
+
+
+
+
public class AbortTransactionSpec +extends Object
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      AbortTransactionSpec

      +
      public AbortTransactionSpec(TopicPartition topicPartition, + long producerId, + short producerEpoch, + int coordinatorEpoch)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      topicPartition

      +
      public TopicPartition topicPartition()
      +
      +
    • +
    • +
      +

      producerId

      +
      public long producerId()
      +
      +
    • +
    • +
      +

      producerEpoch

      +
      public short producerEpoch()
      +
      +
    • +
    • +
      +

      coordinatorEpoch

      +
      public int coordinatorEpoch()
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AbstractOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/AbstractOptions.html new file mode 100644 index 000000000..7142ffdd6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AbstractOptions.html @@ -0,0 +1,181 @@ + + + + +AbstractOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AbstractOptions<T extends AbstractOptions>

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<T>
+
+
+
+
Direct Known Subclasses:
+
AbortTransactionOptions, AddRaftVoterOptions, AlterClientQuotasOptions, AlterConfigsOptions, AlterConsumerGroupOffsetsOptions, AlterPartitionReassignmentsOptions, AlterReplicaLogDirsOptions, AlterShareGroupOffsetsOptions, AlterStreamsGroupOffsetsOptions, AlterUserScramCredentialsOptions, CreateAclsOptions, CreateDelegationTokenOptions, CreatePartitionsOptions, CreateTopicsOptions, DeleteAclsOptions, DeleteConsumerGroupOffsetsOptions, DeleteConsumerGroupsOptions, DeleteRecordsOptions, DeleteShareGroupOffsetsOptions, DeleteShareGroupsOptions, DeleteStreamsGroupOffsetsOptions, DeleteStreamsGroupsOptions, DeleteTopicsOptions, DescribeAclsOptions, DescribeClassicGroupsOptions, DescribeClientQuotasOptions, DescribeClusterOptions, DescribeConfigsOptions, DescribeConsumerGroupsOptions, DescribeDelegationTokenOptions, DescribeFeaturesOptions, DescribeLogDirsOptions, DescribeMetadataQuorumOptions, DescribeProducersOptions, DescribeReplicaLogDirsOptions, DescribeShareGroupsOptions, DescribeStreamsGroupsOptions, DescribeTopicsOptions, DescribeTransactionsOptions, DescribeUserScramCredentialsOptions, ElectLeadersOptions, ExpireDelegationTokenOptions, FenceProducersOptions, ListClientMetricsResourcesOptions, ListConfigResourcesOptions, ListConsumerGroupOffsetsOptions, ListConsumerGroupsOptions, ListGroupsOptions, ListOffsetsOptions, ListPartitionReassignmentsOptions, ListShareGroupOffsetsOptions, ListStreamsGroupOffsetsOptions, ListTopicsOptions, ListTransactionsOptions, RemoveMembersFromConsumerGroupOptions, RemoveRaftVoterOptions, RenewDelegationTokenOptions, TerminateTransactionOptions, UnregisterBrokerOptions, UpdateFeaturesOptions
+
+
+
public abstract class AbstractOptions<T extends AbstractOptions> +extends Object
+
+
+
    + +
  • +
    +

    Constructor Summary

    +
    Constructors
    +
    +
    Constructor
    +
    Description
    + +
     
    +
    +
    +
  • + +
  • +
    +

    Method Summary

    +
    +
    +
    +
    +
    Modifier and Type
    +
    Method
    +
    Description
    + + +
    +
    The timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
    +
    + +
    timeoutMs(Integer timeoutMs)
    +
    +
    Set the timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
    +
    +
    +
    +
    +
    +

    Methods inherited from class java.lang.Object

    +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
    +
    +
  • +
+
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      AbstractOptions

      +
      public AbstractOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      timeoutMs

      +
      public T timeoutMs(Integer timeoutMs)
      +
      Set the timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
      +
      +
    • +
    • +
      +

      timeoutMs

      +
      public Integer timeoutMs()
      +
      The timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AddRaftVoterOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/AddRaftVoterOptions.html new file mode 100644 index 000000000..25e4372ed --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AddRaftVoterOptions.html @@ -0,0 +1,174 @@ + + + + +AddRaftVoterOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AddRaftVoterOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<AddRaftVoterOptions> +
org.apache.kafka.clients.admin.AddRaftVoterOptions
+
+
+
+
+
@Stable +public class AddRaftVoterOptions +extends AbstractOptions<AddRaftVoterOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      AddRaftVoterOptions

      +
      public AddRaftVoterOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    + +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AddRaftVoterResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/AddRaftVoterResult.html new file mode 100644 index 000000000..0dff35086 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AddRaftVoterResult.html @@ -0,0 +1,138 @@ + + + + +AddRaftVoterResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AddRaftVoterResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AddRaftVoterResult
+
+
+
+
@Stable +public class AddRaftVoterResult +extends Object
+ +
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Returns a future that completes when the voter has been added.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/Admin.html b/static/41/javadoc/org/apache/kafka/clients/admin/Admin.html new file mode 100644 index 000000000..4aac138ef --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/Admin.html @@ -0,0 +1,3521 @@ + + + + +Admin (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Interface Admin

+
+
+
+
All Superinterfaces:
+
AutoCloseable
+
+
+
All Known Implementing Classes:
+
AdminClient, ForwardingAdmin, KafkaAdminClient
+
+
+
public interface Admin +extends AutoCloseable
+
The administrative client for Kafka, which supports managing and inspecting topics, brokers, configurations and ACLs. +

+ Instances returned from the create methods of this interface are guaranteed to be thread safe. + However, the KafkaFutures returned from request methods are executed + by a single thread so it is important that any code which executes on that thread when they complete + (using KafkaFuture.thenApply(KafkaFuture.BaseFunction), for example) doesn't block + for too long. If necessary, processing of results should be passed to another thread. +

+ The operations exposed by Admin follow a consistent pattern: +

    +
  • Admin instances should be created using create(Properties) or create(Map)
  • +
  • Each operation typically has two overloaded methods, one which uses a default set of options and an + overloaded method where the last parameter is an explicit options object. +
  • The operation method's first parameter is a Collection of items to perform + the operation on. Batching multiple requests into a single call is more efficient and should be + preferred over multiple calls to the same method. +
  • The operation methods execute asynchronously. +
  • Each xxx operation method returns an XxxResult class with methods which expose + KafkaFuture for accessing the result(s) of the operation. +
  • Typically an all() method is provided for getting the overall success/failure of the batch and a + values() method provided access to each item in a request batch. + Other methods may also be provided. +
  • For synchronous behaviour use KafkaFuture.get() +
+

+ Here is a simple example of using an Admin client instance to create a new topic: +

+ 
+ Properties props = new Properties();
+ props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
+
+ try (Admin admin = Admin.create(props)) {
+   String topicName = "my-topic";
+   int partitions = 12;
+   short replicationFactor = 3;
+   // Create a compacted topic
+   CreateTopicsResult result = admin.createTopics(Collections.singleton(
+     new NewTopic(topicName, partitions, replicationFactor)
+       .configs(Collections.singletonMap(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT))));
+
+   // Call values() to get the result for a specific topic
+   KafkaFuture<Void> future = result.values().get(topicName);
+
+   // Call get() to block until the topic creation is complete or has failed
+   // if creation failed the ExecutionException wraps the underlying cause.
+   future.get();
+ }
+ 
+ 
+ +

Bootstrap and balancing

+

+ The bootstrap.servers config in the Map or Properties passed + to create(Properties) is only used for discovering the brokers in the cluster, + which the client will then connect to as needed. + As such, it is sufficient to include only two or three broker addresses to cope with the possibility of brokers + being unavailable. +

+ Different operations necessitate requests being sent to different nodes in the cluster. For example + createTopics(Collection) communicates with the controller, but describeTopics(Collection) + can talk to any broker. When the recipient does not matter the instance will try to use the broker with the + fewest outstanding requests. +

+ The client will transparently retry certain errors which are usually transient. + For example if the request for createTopics() get sent to a node which was not the controller + the metadata would be refreshed and the request re-sent to the controller. + +

Broker Compatibility

+

+ The minimum broker version required is 0.10.0.0. Methods with stricter requirements will specify the minimum broker + version required. +

+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      create

      +
      static Admin create(Properties props)
      +
      Create a new Admin with the given configuration.
      +
      +
      Parameters:
      +
      props - The configuration.
      +
      Returns:
      +
      The new KafkaAdminClient.
      +
      +
      +
    • +
    • +
      +

      create

      +
      static Admin create(Map<String,Object> conf)
      +
      Create a new Admin with the given configuration.
      +
      +
      Parameters:
      +
      conf - The configuration.
      +
      Returns:
      +
      The new KafkaAdminClient.
      +
      +
      +
    • +
    • +
      +

      close

      +
      default void close()
      +
      Close the Admin and release all associated resources. +

      + See close(Duration)

      +
      +
      Specified by:
      +
      close in interface AutoCloseable
      +
      +
      +
    • +
    • +
      +

      close

      +
      void close(Duration timeout)
      +
      Close the Admin client and release all associated resources. +

      + The close operation has a grace period during which current operations will be allowed to + complete, specified by the given duration. + New operations will not be accepted during the grace period. Once the grace period is over, + all operations that have not yet been completed will be aborted with a TimeoutException.

      +
      +
      Parameters:
      +
      timeout - The time to use for the wait time.
      +
      +
      +
    • +
    • +
      +

      createTopics

      +
      default CreateTopicsResult createTopics(Collection<NewTopic> newTopics)
      +
      Create a batch of new topics with the default options. +

      + This is a convenience method for createTopics(Collection, CreateTopicsOptions) with default options. + See the overload for more details. +

      + This operation is supported by brokers with version 0.10.1.0 or higher.

      +
      +
      Parameters:
      +
      newTopics - The new topics to create.
      +
      Returns:
      +
      The CreateTopicsResult.
      +
      +
      +
    • +
    • +
      +

      createTopics

      +
      CreateTopicsResult createTopics(Collection<NewTopic> newTopics, + CreateTopicsOptions options)
      +
      Create a batch of new topics. +

      + This operation is not transactional so it may succeed for some topics while fail for others. +

      + It may take several seconds after CreateTopicsResult returns + success for all the brokers to become aware that the topics have been created. + During this time, listTopics() and describeTopics(Collection) + may not return information about the new topics. +

      + This operation is supported by brokers with version 0.10.1.0 or higher. The validateOnly option is supported + from version 0.10.2.0.

      +
      +
      Parameters:
      +
      newTopics - The new topics to create.
      +
      options - The options to use when creating the new topics.
      +
      Returns:
      +
      The CreateTopicsResult.
      +
      +
      +
    • +
    • +
      +

      deleteTopics

      +
      default DeleteTopicsResult deleteTopics(Collection<String> topics)
      +
      This is a convenience method for deleteTopics(TopicCollection, DeleteTopicsOptions) + with default options. See the overload for more details. +

      + This operation is supported by brokers with version 0.10.1.0 or higher.

      +
      +
      Parameters:
      +
      topics - The topic names to delete.
      +
      Returns:
      +
      The DeleteTopicsResult.
      +
      +
      +
    • +
    • +
      +

      deleteTopics

      +
      default DeleteTopicsResult deleteTopics(Collection<String> topics, + DeleteTopicsOptions options)
      +
      This is a convenience method for deleteTopics(TopicCollection, DeleteTopicsOptions) + with default options. See the overload for more details. +

      + This operation is supported by brokers with version 0.10.1.0 or higher.

      +
      +
      Parameters:
      +
      topics - The topic names to delete.
      +
      options - The options to use when deleting the topics.
      +
      Returns:
      +
      The DeleteTopicsResult.
      +
      +
      +
    • +
    • +
      +

      deleteTopics

      +
      default DeleteTopicsResult deleteTopics(TopicCollection topics)
      +
      This is a convenience method for deleteTopics(TopicCollection, DeleteTopicsOptions) + with default options. See the overload for more details. +

      + When using topic IDs, this operation is supported by brokers with inter-broker protocol 2.8 or higher. + When using topic names, this operation is supported by brokers with version 0.10.1.0 or higher.

      +
      +
      Parameters:
      +
      topics - The topics to delete.
      +
      Returns:
      +
      The DeleteTopicsResult.
      +
      +
      +
    • +
    • +
      +

      deleteTopics

      +
      DeleteTopicsResult deleteTopics(TopicCollection topics, + DeleteTopicsOptions options)
      +
      Delete a batch of topics. +

      + This operation is not transactional so it may succeed for some topics while fail for others. +

      + It may take several seconds after the DeleteTopicsResult returns + success for all the brokers to become aware that the topics are gone. + During this time, listTopics() and describeTopics(Collection) + may continue to return information about the deleted topics. +

      + If delete.topic.enable is set to false on the brokers, an exception will be returned to the client indicating + that topic deletion is disabled. +

      + When using topic IDs, this operation is supported by brokers with inter-broker protocol 2.8 or higher. + When using topic names, this operation is supported by brokers with version 0.10.1.0 or higher.

      +
      +
      Parameters:
      +
      topics - The topics to delete.
      +
      options - The options to use when deleting the topics.
      +
      Returns:
      +
      The DeleteTopicsResult.
      +
      +
      +
    • +
    • +
      +

      listTopics

      +
      default ListTopicsResult listTopics()
      +
      List the topics available in the cluster with the default options. +

      + This is a convenience method for listTopics(ListTopicsOptions) with default options. + See the overload for more details.

      +
      +
      Returns:
      +
      The ListTopicsResult.
      +
      +
      +
    • +
    • +
      +

      listTopics

      +
      ListTopicsResult listTopics(ListTopicsOptions options)
      +
      List the topics available in the cluster.
      +
      +
      Parameters:
      +
      options - The options to use when listing the topics.
      +
      Returns:
      +
      The ListTopicsResult.
      +
      +
      +
    • +
    • +
      +

      describeTopics

      +
      default DescribeTopicsResult describeTopics(Collection<String> topicNames)
      +
      Describe some topics in the cluster, with the default options. +

      + This is a convenience method for describeTopics(Collection, DescribeTopicsOptions) with + default options. See the overload for more details.

      +
      +
      Parameters:
      +
      topicNames - The names of the topics to describe.
      +
      Returns:
      +
      The DescribeTopicsResult.
      +
      +
      +
    • +
    • +
      +

      describeTopics

      +
      default DescribeTopicsResult describeTopics(Collection<String> topicNames, + DescribeTopicsOptions options)
      +
      Describe some topics in the cluster.
      +
      +
      Parameters:
      +
      topicNames - The names of the topics to describe.
      +
      options - The options to use when describing the topic.
      +
      Returns:
      +
      The DescribeTopicsResult.
      +
      +
      +
    • +
    • +
      +

      describeTopics

      +
      default DescribeTopicsResult describeTopics(TopicCollection topics)
      +
      This is a convenience method for describeTopics(TopicCollection, DescribeTopicsOptions) + with default options. See the overload for more details. +

      + When using topic IDs, this operation is supported by brokers with version 3.1.0 or higher.

      +
      +
      Parameters:
      +
      topics - The topics to describe.
      +
      Returns:
      +
      The DescribeTopicsResult.
      +
      +
      +
    • +
    • +
      +

      describeTopics

      +
      DescribeTopicsResult describeTopics(TopicCollection topics, + DescribeTopicsOptions options)
      +
      Describe some topics in the cluster. + + When using topic IDs, this operation is supported by brokers with version 3.1.0 or higher.
      +
      +
      Parameters:
      +
      topics - The topics to describe.
      +
      options - The options to use when describing the topics.
      +
      Returns:
      +
      The DescribeTopicsResult.
      +
      +
      +
    • +
    • +
      +

      describeCluster

      +
      default DescribeClusterResult describeCluster()
      +
      Get information about the nodes in the cluster, using the default options. +

      + This is a convenience method for describeCluster(DescribeClusterOptions) with default options. + See the overload for more details.

      +
      +
      Returns:
      +
      The DescribeClusterResult.
      +
      +
      +
    • +
    • +
      +

      describeCluster

      + +
      Get information about the nodes in the cluster. +

      + To obtain broker cluster information, you must configure AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG. + To obtain controller cluster information, you must configure AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG.

      +
      +
      Parameters:
      +
      options - The options to use when getting information about the cluster.
      +
      Returns:
      +
      The DescribeClusterResult.
      +
      +
      +
    • +
    • +
      +

      describeAcls

      +
      default DescribeAclsResult describeAcls(AclBindingFilter filter)
      +
      This is a convenience method for describeAcls(AclBindingFilter, DescribeAclsOptions) with + default options. See the overload for more details. +

      + This operation is supported by brokers with version 0.11.0.0 or higher.

      +
      +
      Parameters:
      +
      filter - The filter to use.
      +
      Returns:
      +
      The DescribeAclsResult.
      +
      +
      +
    • +
    • +
      +

      describeAcls

      +
      DescribeAclsResult describeAcls(AclBindingFilter filter, + DescribeAclsOptions options)
      +
      Lists access control lists (ACLs) according to the supplied filter. +

      + Note: it may take some time for changes made by createAcls or deleteAcls to be reflected + in the output of describeAcls. +

      + This operation is supported by brokers with version 0.11.0.0 or higher.

      +
      +
      Parameters:
      +
      filter - The filter to use.
      +
      options - The options to use when listing the ACLs.
      +
      Returns:
      +
      The DescribeAclsResult.
      +
      +
      +
    • +
    • +
      +

      createAcls

      +
      default CreateAclsResult createAcls(Collection<AclBinding> acls)
      +
      This is a convenience method for createAcls(Collection, CreateAclsOptions) with + default options. See the overload for more details. +

      + This operation is supported by brokers with version 0.11.0.0 or higher.

      +
      +
      Parameters:
      +
      acls - The ACLs to create
      +
      Returns:
      +
      The CreateAclsResult.
      +
      +
      +
    • +
    • +
      +

      createAcls

      + +
      Creates access control lists (ACLs) which are bound to specific resources. +

      + This operation is not transactional so it may succeed for some ACLs while fail for others. +

      + If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but + no changes will be made. +

      + This operation is supported by brokers with version 0.11.0.0 or higher.

      +
      +
      Parameters:
      +
      acls - The ACLs to create
      +
      options - The options to use when creating the ACLs.
      +
      Returns:
      +
      The CreateAclsResult.
      +
      +
      +
    • +
    • +
      +

      deleteAcls

      +
      default DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters)
      +
      This is a convenience method for deleteAcls(Collection, DeleteAclsOptions) with default options. + See the overload for more details. +

      + This operation is supported by brokers with version 0.11.0.0 or higher.

      +
      +
      Parameters:
      +
      filters - The filters to use.
      +
      Returns:
      +
      The DeleteAclsResult.
      +
      +
      +
    • +
    • +
      +

      deleteAcls

      + +
      Deletes access control lists (ACLs) according to the supplied filters. +

      + This operation is not transactional so it may succeed for some ACLs while fail for others. +

      + This operation is supported by brokers with version 0.11.0.0 or higher.

      +
      +
      Parameters:
      +
      filters - The filters to use.
      +
      options - The options to use when deleting the ACLs.
      +
      Returns:
      +
      The DeleteAclsResult.
      +
      +
      +
    • +
    • +
      +

      describeConfigs

      +
      default DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources)
      +
      Get the configuration for the specified resources with the default options. +

      + This is a convenience method for describeConfigs(Collection, DescribeConfigsOptions) with default options. + See the overload for more details. +

      + This operation is supported by brokers with version 0.11.0.0 or higher.

      +
      +
      Parameters:
      +
      resources - See relevant type ConfigResource.Type
      +
      Returns:
      +
      The DescribeConfigsResult
      +
      +
      +
    • +
    • +
      +

      describeConfigs

      +
      DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, + DescribeConfigsOptions options)
      +
      Get the configuration for the specified resources. +

      + The returned configuration includes default values and the isDefault() method can be used to distinguish them + from user supplied values. +

      + The value of config entries where isSensitive() is true is always null so that sensitive information + is not disclosed. +

      + Config entries where isReadOnly() is true cannot be updated. +

      + The different behavior of nonexistent resource: +

      +

      + Note that you cannot describe broker configs or broker logger using AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG, + and you cannot describe controller configs or controller logger using AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG. +

      + This operation is supported by brokers with version 0.11.0.0 or higher.

      +
      +
      Parameters:
      +
      resources - See relevant type ConfigResource.Type
      +
      options - The options to use when describing configs
      +
      Returns:
      +
      The DescribeConfigsResult
      +
      +
      +
    • +
    • +
      +

      incrementalAlterConfigs

      +
      default AlterConfigsResult incrementalAlterConfigs(Map<ConfigResource,Collection<AlterConfigOp>> configs)
      +
      Incrementally updates the configuration for the specified resources with default options. +

      + This is a convenience method for incrementalAlterConfigs(Map, AlterConfigsOptions) with default options. + See the overload for more details. +

      + This operation is supported by brokers with version 2.3.0 or higher.

      +
      +
      Parameters:
      +
      configs - The resources with their configs
      +
      Returns:
      +
      The AlterConfigsResult
      +
      +
      +
    • +
    • +
      +

      incrementalAlterConfigs

      +
      AlterConfigsResult incrementalAlterConfigs(Map<ConfigResource,Collection<AlterConfigOp>> configs, + AlterConfigsOptions options)
      +
      Incrementally update the configuration for the specified resources. +

      + Updates are not transactional so they may succeed for some resources while fail for others. The configs for + a particular resource are updated atomically. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from + the returned AlterConfigsResult: +

      +

      + This operation is supported by brokers with version 2.3.0 or higher.

      +
      +
      Parameters:
      +
      configs - The resources with their configs
      +
      options - The options to use when altering configs
      +
      Returns:
      +
      The AlterConfigsResult
      +
      +
      +
    • +
    • +
      +

      alterReplicaLogDirs

      +
      default AlterReplicaLogDirsResult alterReplicaLogDirs(Map<TopicPartitionReplica,String> replicaAssignment)
      +
      Change the log directory for the specified replicas. If the replica does not exist on the broker, the result + shows REPLICA_NOT_AVAILABLE for the given replica and the replica will be created in the given log directory on the + broker when it is created later. If the replica already exists on the broker, the replica will be moved to the given + log directory if it is not already there. For detailed result, inspect the returned AlterReplicaLogDirsResult instance. +

      + This operation is not transactional so it may succeed for some replicas while fail for others. +

      + This is a convenience method for alterReplicaLogDirs(Map, AlterReplicaLogDirsOptions) with default options. + See the overload for more details. +

      + This operation is supported by brokers with version 1.1.0 or higher.

      +
      +
      Parameters:
      +
      replicaAssignment - The replicas with their log directory absolute path
      +
      Returns:
      +
      The AlterReplicaLogDirsResult
      +
      +
      +
    • +
    • +
      +

      alterReplicaLogDirs

      +
      AlterReplicaLogDirsResult alterReplicaLogDirs(Map<TopicPartitionReplica,String> replicaAssignment, + AlterReplicaLogDirsOptions options)
      +
      Change the log directory for the specified replicas. If the replica does not exist on the broker, the result + shows REPLICA_NOT_AVAILABLE for the given replica and the replica will be created in the given log directory on the + broker when it is created later. If the replica already exists on the broker, the replica will be moved to the given + log directory if it is not already there. For detailed result, inspect the returned AlterReplicaLogDirsResult instance. +

      + This operation is not transactional so it may succeed for some replicas while fail for others. +

      + This operation is supported by brokers with version 1.1.0 or higher.

      +
      +
      Parameters:
      +
      replicaAssignment - The replicas with their log directory absolute path
      +
      options - The options to use when changing replica dir
      +
      Returns:
      +
      The AlterReplicaLogDirsResult
      +
      +
      +
    • +
    • +
      +

      describeLogDirs

      +
      default DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers)
      +
      Query the information of all log directories on the given set of brokers +

      + This is a convenience method for describeLogDirs(Collection, DescribeLogDirsOptions) with default options. + See the overload for more details. +

      + This operation is supported by brokers with version 1.0.0 or higher.

      +
      +
      Parameters:
      +
      brokers - A list of brokers
      +
      Returns:
      +
      The DescribeLogDirsResult
      +
      +
      +
    • +
    • +
      +

      describeLogDirs

      +
      DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers, + DescribeLogDirsOptions options)
      +
      Query the information of all log directories on the given set of brokers +

      + This operation is supported by brokers with version 1.0.0 or higher.

      +
      +
      Parameters:
      +
      brokers - A list of brokers
      +
      options - The options to use when querying log dir info
      +
      Returns:
      +
      The DescribeLogDirsResult
      +
      +
      +
    • +
    • +
      +

      describeReplicaLogDirs

      +
      default DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection<TopicPartitionReplica> replicas)
      +
      Query the replica log directory information for the specified replicas. +

      + This is a convenience method for describeReplicaLogDirs(Collection, DescribeReplicaLogDirsOptions) + with default options. See the overload for more details. +

      + This operation is supported by brokers with version 1.0.0 or higher.

      +
      +
      Parameters:
      +
      replicas - The replicas to query
      +
      Returns:
      +
      The DescribeReplicaLogDirsResult
      +
      +
      +
    • +
    • +
      +

      describeReplicaLogDirs

      + +
      Query the replica log directory information for the specified replicas. +

      + This operation is supported by brokers with version 1.0.0 or higher.

      +
      +
      Parameters:
      +
      replicas - The replicas to query
      +
      options - The options to use when querying replica log dir info
      +
      Returns:
      +
      The DescribeReplicaLogDirsResult
      +
      +
      +
    • +
    • +
      +

      createPartitions

      +
      default CreatePartitionsResult createPartitions(Map<String,NewPartitions> newPartitions)
      +
      Increase the number of partitions of the topics given as the keys of newPartitions + according to the corresponding values. If partitions are increased for a topic that has a key, + the partition logic or ordering of the messages will be affected. +

      + This is a convenience method for createPartitions(Map, CreatePartitionsOptions) with default options. + See the overload for more details.

      +
      +
      Parameters:
      +
      newPartitions - The topics which should have new partitions created, and corresponding parameters + for the created partitions.
      +
      Returns:
      +
      The CreatePartitionsResult.
      +
      +
      +
    • +
    • +
      +

      createPartitions

      +
      CreatePartitionsResult createPartitions(Map<String,NewPartitions> newPartitions, + CreatePartitionsOptions options)
      +
      Increase the number of partitions of the topics given as the keys of newPartitions + according to the corresponding values. If partitions are increased for a topic that has a key, + the partition logic or ordering of the messages will be affected. +

      + This operation is not transactional so it may succeed for some topics while fail for others. +

      + It may take several seconds after this method returns + success for all the brokers to become aware that the partitions have been created. + During this time, describeTopics(Collection) + may not return information about the new partitions. +

      + This operation is supported by brokers with version 1.0.0 or higher. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from the + values() method of the returned CreatePartitionsResult +

      +
      +
      Parameters:
      +
      newPartitions - The topics which should have new partitions created, and corresponding parameters + for the created partitions.
      +
      options - The options to use when creating the new partitions.
      +
      Returns:
      +
      The CreatePartitionsResult.
      +
      +
      +
    • +
    • +
      +

      deleteRecords

      +
      default DeleteRecordsResult deleteRecords(Map<TopicPartition,RecordsToDelete> recordsToDelete)
      +
      Delete records whose offset is smaller than the given offset of the corresponding partition. +

      + This is a convenience method for deleteRecords(Map, DeleteRecordsOptions) with default options. + See the overload for more details. +

      + This operation is supported by brokers with version 0.11.0.0 or higher.

      +
      +
      Parameters:
      +
      recordsToDelete - The topic partitions and related offsets from which records deletion starts.
      +
      Returns:
      +
      The DeleteRecordsResult.
      +
      +
      +
    • +
    • +
      +

      deleteRecords

      +
      DeleteRecordsResult deleteRecords(Map<TopicPartition,RecordsToDelete> recordsToDelete, + DeleteRecordsOptions options)
      +
      Delete records whose offset is smaller than the given offset of the corresponding partition. +

      + This operation is supported by brokers with version 0.11.0.0 or higher.

      +
      +
      Parameters:
      +
      recordsToDelete - The topic partitions and related offsets from which records deletion starts.
      +
      options - The options to use when deleting records.
      +
      Returns:
      +
      The DeleteRecordsResult.
      +
      +
      +
    • +
    • +
      +

      createDelegationToken

      +
      default CreateDelegationTokenResult createDelegationToken()
      +
      Create a Delegation Token. +

      + This is a convenience method for createDelegationToken(CreateDelegationTokenOptions) with default options. + See the overload for more details.

      +
      +
      Returns:
      +
      The CreateDelegationTokenResult.
      +
      +
      +
    • +
    • +
      +

      createDelegationToken

      + +
      Create a Delegation Token. +

      + This operation is supported by brokers with version 1.1.0 or higher. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from the + delegationToken() method of the returned CreateDelegationTokenResult +

      +
      +
      Parameters:
      +
      options - The options to use when creating delegation token.
      +
      Returns:
      +
      The CreateDelegationTokenResult.
      +
      +
      +
    • +
    • +
      +

      renewDelegationToken

      +
      default RenewDelegationTokenResult renewDelegationToken(byte[] hmac)
      +
      Renew a Delegation Token. +

      + This is a convenience method for renewDelegationToken(byte[], RenewDelegationTokenOptions) with default options. + See the overload for more details.

      +
      +
      Parameters:
      +
      hmac - HMAC of the Delegation token
      +
      Returns:
      +
      The RenewDelegationTokenResult.
      +
      +
      +
    • +
    • +
      +

      renewDelegationToken

      +
      RenewDelegationTokenResult renewDelegationToken(byte[] hmac, + RenewDelegationTokenOptions options)
      +
      Renew a Delegation Token. +

      + This operation is supported by brokers with version 1.1.0 or higher. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from the + expiryTimestamp() method of the returned RenewDelegationTokenResult +

      +
      +
      Parameters:
      +
      hmac - HMAC of the Delegation token
      +
      options - The options to use when renewing delegation token.
      +
      Returns:
      +
      The RenewDelegationTokenResult.
      +
      +
      +
    • +
    • +
      +

      expireDelegationToken

      +
      default ExpireDelegationTokenResult expireDelegationToken(byte[] hmac)
      +
      Expire a Delegation Token. +

      + This is a convenience method for expireDelegationToken(byte[], ExpireDelegationTokenOptions) with default options. + This will expire the token immediately. See the overload for more details.

      +
      +
      Parameters:
      +
      hmac - HMAC of the Delegation token
      +
      Returns:
      +
      The ExpireDelegationTokenResult.
      +
      +
      +
    • +
    • +
      +

      expireDelegationToken

      +
      ExpireDelegationTokenResult expireDelegationToken(byte[] hmac, + ExpireDelegationTokenOptions options)
      +
      Expire a Delegation Token. +

      + This operation is supported by brokers with version 1.1.0 or higher. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from the + expiryTimestamp() method of the returned ExpireDelegationTokenResult +

      +
      +
      Parameters:
      +
      hmac - HMAC of the Delegation token
      +
      options - The options to use when expiring delegation token.
      +
      Returns:
      +
      The ExpireDelegationTokenResult.
      +
      +
      +
    • +
    • +
      +

      describeDelegationToken

      +
      default DescribeDelegationTokenResult describeDelegationToken()
      +
      Describe the Delegation Tokens. +

      + This is a convenience method for describeDelegationToken(DescribeDelegationTokenOptions) with default options. + This will return all the user owned tokens and tokens where user have Describe permission. See the overload for more details.

      +
      +
      Returns:
      +
      The DescribeDelegationTokenResult.
      +
      +
      +
    • +
    • +
      +

      describeDelegationToken

      + +
      Describe the Delegation Tokens. +

      + This operation is supported by brokers with version 1.1.0 or higher. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from the + delegationTokens() method of the returned DescribeDelegationTokenResult +

      +
      +
      Parameters:
      +
      options - The options to use when describing delegation tokens.
      +
      Returns:
      +
      The DescribeDelegationTokenResult.
      +
      +
      +
    • +
    • +
      +

      describeConsumerGroups

      +
      DescribeConsumerGroupsResult describeConsumerGroups(Collection<String> groupIds, + DescribeConsumerGroupsOptions options)
      +
      Describe some consumer groups in the cluster.
      +
      +
      Parameters:
      +
      groupIds - The IDs of the groups to describe.
      +
      options - The options to use when describing the groups.
      +
      Returns:
      +
      The DescribeConsumerGroupsResult.
      +
      +
      +
    • +
    • +
      +

      describeConsumerGroups

      +
      default DescribeConsumerGroupsResult describeConsumerGroups(Collection<String> groupIds)
      +
      Describe some consumer groups in the cluster, with the default options. +

      + This is a convenience method for describeConsumerGroups(Collection, DescribeConsumerGroupsOptions) + with default options. See the overload for more details.

      +
      +
      Parameters:
      +
      groupIds - The IDs of the groups to describe.
      +
      Returns:
      +
      The DescribeConsumerGroupsResult.
      +
      +
      +
    • +
    • +
      +

      listConsumerGroups

      +
      @Deprecated(since="4.1", + forRemoval=true) +ListConsumerGroupsResult listConsumerGroups(ListConsumerGroupsOptions options)
      +
      Deprecated, for removal: This API element is subject to removal in a future version. +
      Since 4.1. Use listGroups(ListGroupsOptions) instead.
      +
      +
      List the consumer groups available in the cluster.
      +
      +
      Parameters:
      +
      options - The options to use when listing the consumer groups.
      +
      Returns:
      +
      The ListConsumerGroupsResult.
      +
      +
      +
    • +
    • +
      +

      listConsumerGroups

      +
      @Deprecated(since="4.1", + forRemoval=true) +default ListConsumerGroupsResult listConsumerGroups()
      +
      Deprecated, for removal: This API element is subject to removal in a future version. +
      Since 4.1. Use listGroups(ListGroupsOptions) instead.
      +
      +
      List the consumer groups available in the cluster with the default options. +

      + This is a convenience method for listConsumerGroups(ListConsumerGroupsOptions) with default options. + See the overload for more details.

      +
      +
      Returns:
      +
      The ListConsumerGroupsResult.
      +
      +
      +
    • +
    • +
      +

      listConsumerGroupOffsets

      +
      default ListConsumerGroupOffsetsResult listConsumerGroupOffsets(String groupId, + ListConsumerGroupOffsetsOptions options)
      +
      List the consumer group offsets available in the cluster.
      +
      +
      Parameters:
      +
      options - The options to use when listing the consumer group offsets.
      +
      Returns:
      +
      The ListConsumerGroupOffsetsResult
      +
      +
      +
    • +
    • +
      +

      listConsumerGroupOffsets

      +
      default ListConsumerGroupOffsetsResult listConsumerGroupOffsets(String groupId)
      +
      List the consumer group offsets available in the cluster with the default options. +

      + This is a convenience method for listConsumerGroupOffsets(Map, ListConsumerGroupOffsetsOptions) + to list offsets of all partitions of one group with default options.

      +
      +
      Returns:
      +
      The ListConsumerGroupOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      listConsumerGroupOffsets

      + +
      List the consumer group offsets available in the cluster for the specified consumer groups.
      +
      +
      Parameters:
      +
      groupSpecs - Map of consumer group ids to a spec that specifies the topic partitions of the group to list offsets for.
      +
      options - The options to use when listing the consumer group offsets.
      +
      Returns:
      +
      The ListConsumerGroupOffsetsResult
      +
      +
      +
    • +
    • +
      +

      listConsumerGroupOffsets

      +
      default ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map<String,ListConsumerGroupOffsetsSpec> groupSpecs)
      +
      List the consumer group offsets available in the cluster for the specified groups with the default options. +

      + This is a convenience method for + listConsumerGroupOffsets(Map, ListConsumerGroupOffsetsOptions) with default options.

      +
      +
      Parameters:
      +
      groupSpecs - Map of consumer group ids to a spec that specifies the topic partitions of the group to list offsets for.
      +
      Returns:
      +
      The ListConsumerGroupOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      listStreamsGroupOffsets

      + +
      List the streams group offsets available in the cluster for the specified streams groups. + + Note: this method effectively does the same as the corresponding consumer group method listConsumerGroupOffsets(java.lang.String, org.apache.kafka.clients.admin.ListConsumerGroupOffsetsOptions) does.
      +
      +
      Parameters:
      +
      groupSpecs - Map of streams group ids to a spec that specifies the topic partitions of the group to list offsets for.
      +
      options - The options to use when listing the streams group offsets.
      +
      Returns:
      +
      The ListStreamsGroupOffsetsResult
      +
      +
      +
    • +
    • +
      +

      listStreamsGroupOffsets

      +
      default ListStreamsGroupOffsetsResult listStreamsGroupOffsets(Map<String,ListStreamsGroupOffsetsSpec> groupSpecs)
      +
      List the streams group offsets available in the cluster for the specified groups with the default options. +

      + This is a convenience method for + listStreamsGroupOffsets(Map, ListStreamsGroupOffsetsOptions) with default options.

      +
      +
      Parameters:
      +
      groupSpecs - Map of streams group ids to a spec that specifies the topic partitions of the group to list offsets for.
      +
      Returns:
      +
      The ListStreamsGroupOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      deleteConsumerGroups

      +
      DeleteConsumerGroupsResult deleteConsumerGroups(Collection<String> groupIds, + DeleteConsumerGroupsOptions options)
      +
      Delete consumer groups from the cluster.
      +
      +
      Parameters:
      +
      options - The options to use when deleting a consumer group.
      +
      Returns:
      +
      The DeleteConsumerGroupsResult.
      +
      +
      +
    • +
    • +
      +

      deleteConsumerGroups

      +
      default DeleteConsumerGroupsResult deleteConsumerGroups(Collection<String> groupIds)
      +
      Delete consumer groups from the cluster with the default options.
      +
      +
      Returns:
      +
      The DeleteConsumerGroupResult.
      +
      +
      +
    • +
    • +
      +

      deleteStreamsGroups

      +
      DeleteStreamsGroupsResult deleteStreamsGroups(Collection<String> groupIds, + DeleteStreamsGroupsOptions options)
      +
      Delete streams groups from the cluster. + + Note: this method effectively does the same as the corresponding consumer group method deleteConsumerGroups(java.util.Collection<java.lang.String>, org.apache.kafka.clients.admin.DeleteConsumerGroupsOptions) does.
      +
      +
      Parameters:
      +
      options - The options to use when deleting a streams group.
      +
      Returns:
      +
      The DeleteStreamsGroupsResult.
      +
      +
      +
    • +
    • +
      +

      deleteStreamsGroups

      +
      default DeleteStreamsGroupsResult deleteStreamsGroups(Collection<String> groupIds)
      +
      Delete streams groups from the cluster with the default options.
      +
      +
      Returns:
      +
      The DeleteStreamsGroupResult.
      +
      +
      +
    • +
    • +
      +

      deleteConsumerGroupOffsets

      +
      DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets(String groupId, + Set<TopicPartition> partitions, + DeleteConsumerGroupOffsetsOptions options)
      +
      Delete committed offsets for a set of partitions in a consumer group. This will + succeed at the partition level only if the group is not actively subscribed + to the corresponding topic.
      +
      +
      Parameters:
      +
      options - The options to use when deleting offsets in a consumer group.
      +
      Returns:
      +
      The DeleteConsumerGroupOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      deleteConsumerGroupOffsets

      +
      default DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets(String groupId, + Set<TopicPartition> partitions)
      +
      Delete committed offsets for a set of partitions in a consumer group with the default + options. This will succeed at the partition level only if the group is not actively + subscribed to the corresponding topic.
      +
      +
      Returns:
      +
      The DeleteConsumerGroupOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      deleteStreamsGroupOffsets

      +
      DeleteStreamsGroupOffsetsResult deleteStreamsGroupOffsets(String groupId, + Set<TopicPartition> partitions, + DeleteStreamsGroupOffsetsOptions options)
      +
      Delete committed offsets for a set of partitions in a streams group. This will + succeed at the partition level only if the group is not actively subscribed + to the corresponding topic. + + Note: this method effectively does the same as the corresponding consumer group method deleteConsumerGroupOffsets(java.lang.String, java.util.Set<org.apache.kafka.common.TopicPartition>, org.apache.kafka.clients.admin.DeleteConsumerGroupOffsetsOptions) does.
      +
      +
      Parameters:
      +
      options - The options to use when deleting offsets in a streams group.
      +
      Returns:
      +
      The DeleteStreamsGroupOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      deleteStreamsGroupOffsets

      +
      default DeleteStreamsGroupOffsetsResult deleteStreamsGroupOffsets(String groupId, + Set<TopicPartition> partitions)
      +
      Delete committed offsets for a set of partitions in a streams group with the default + options. This will succeed at the partition level only if the group is not actively + subscribed to the corresponding topic.
      +
      +
      Returns:
      +
      The DeleteStreamsGroupOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      listGroups

      +
      default ListGroupsResult listGroups()
      +
      List the groups available in the cluster with the default options. + +

      This is a convenience method for listGroups(ListGroupsOptions) with default options. + See the overload for more details.

      +
      +
      Returns:
      +
      The ListGroupsResult.
      +
      +
      +
    • +
    • +
      +

      listGroups

      +
      ListGroupsResult listGroups(ListGroupsOptions options)
      +
      List the groups available in the cluster.
      +
      +
      Parameters:
      +
      options - The options to use when listing the groups.
      +
      Returns:
      +
      The ListGroupsResult.
      +
      +
      +
    • +
    • +
      +

      electLeaders

      +
      default ElectLeadersResult electLeaders(ElectionType electionType, + Set<TopicPartition> partitions)
      +
      Elect a replica as leader for topic partitions. +

      + This is a convenience method for electLeaders(ElectionType, Set, ElectLeadersOptions) + with default options.

      +
      +
      Parameters:
      +
      electionType - The type of election to conduct.
      +
      partitions - The topics and partitions for which to conduct elections.
      +
      Returns:
      +
      The ElectLeadersResult.
      +
      +
      +
    • +
    • +
      +

      electLeaders

      +
      ElectLeadersResult electLeaders(ElectionType electionType, + Set<TopicPartition> partitions, + ElectLeadersOptions options)
      +
      Elect a replica as leader for the given partitions, or for all partitions if the argument + to partitions is null. +

      + This operation is not transactional so it may succeed for some partitions while fail for others. +

      + It may take several seconds after this method returns success for all the brokers in the cluster + to become aware that the partitions have new leaders. During this time, + describeTopics(Collection) may not return information about the partitions' + new leaders. +

      + This operation is supported by brokers with version 2.2.0 or later if preferred election is use; + otherwise the brokers most be 2.4.0 or higher. +

      + The following exceptions can be anticipated when calling get() on the future obtained + from the returned ElectLeadersResult: +

      +
      +
      Parameters:
      +
      electionType - The type of election to conduct.
      +
      partitions - The topics and partitions for which to conduct elections.
      +
      options - The options to use when electing the leaders.
      +
      Returns:
      +
      The ElectLeadersResult.
      +
      +
      +
    • +
    • +
      +

      alterPartitionReassignments

      +
      default AlterPartitionReassignmentsResult alterPartitionReassignments(Map<TopicPartition,Optional<NewPartitionReassignment>> reassignments)
      +
      Change the reassignments for one or more partitions. + Providing an empty Optional (e.g via Optional.empty()) will revert the reassignment for the associated partition. + + This is a convenience method for alterPartitionReassignments(Map, AlterPartitionReassignmentsOptions) + with default options. See the overload for more details.
      +
      +
    • +
    • +
      +

      alterPartitionReassignments

      + +
      Change the reassignments for one or more partitions. + Providing an empty Optional (e.g via Optional.empty()) will revert the reassignment for the associated partition. + +

      The following exceptions can be anticipated when calling get() on the futures obtained from + the returned AlterPartitionReassignmentsResult:

      +
      +
      +
      Parameters:
      +
      reassignments - The reassignments to add, modify, or remove. See NewPartitionReassignment.
      +
      options - The options to use.
      +
      Returns:
      +
      The result.
      +
      +
      +
    • +
    • +
      +

      listPartitionReassignments

      +
      default ListPartitionReassignmentsResult listPartitionReassignments()
      +
      List all of the current partition reassignments + + This is a convenience method for listPartitionReassignments(ListPartitionReassignmentsOptions) + with default options. See the overload for more details.
      +
      +
    • +
    • +
      +

      listPartitionReassignments

      +
      default ListPartitionReassignmentsResult listPartitionReassignments(Set<TopicPartition> partitions)
      +
      List the current reassignments for the given partitions + + This is a convenience method for listPartitionReassignments(Set, ListPartitionReassignmentsOptions) + with default options. See the overload for more details.
      +
      +
    • +
    • +
      +

      listPartitionReassignments

      +
      default ListPartitionReassignmentsResult listPartitionReassignments(Set<TopicPartition> partitions, + ListPartitionReassignmentsOptions options)
      +
      List the current reassignments for the given partitions + +

      The following exceptions can be anticipated when calling get() on the futures obtained from + the returned ListPartitionReassignmentsResult:

      +
      +
      +
      Parameters:
      +
      partitions - The topic partitions to list reassignments for.
      +
      options - The options to use.
      +
      Returns:
      +
      The result.
      +
      +
      +
    • +
    • +
      +

      listPartitionReassignments

      +
      default ListPartitionReassignmentsResult listPartitionReassignments(ListPartitionReassignmentsOptions options)
      +
      List all of the current partition reassignments + +

      The following exceptions can be anticipated when calling get() on the futures obtained from + the returned ListPartitionReassignmentsResult:

      +
      +
      +
      Parameters:
      +
      options - The options to use.
      +
      Returns:
      +
      The result.
      +
      +
      +
    • +
    • +
      +

      listPartitionReassignments

      + +
      +
      Parameters:
      +
      partitions - the partitions we want to get reassignment for, or an empty optional if we want to get the reassignments for all partitions in the cluster
      +
      options - The options to use.
      +
      Returns:
      +
      The result.
      +
      +
      +
    • +
    • +
      +

      removeMembersFromConsumerGroup

      +
      RemoveMembersFromConsumerGroupResult removeMembersFromConsumerGroup(String groupId, + RemoveMembersFromConsumerGroupOptions options)
      +
      Remove members from the consumer group by given member identities. +

      + For possible error codes, refer to LeaveGroupResponse.

      +
      +
      Parameters:
      +
      groupId - The ID of the group to remove member from.
      +
      options - The options to carry removing members' information.
      +
      Returns:
      +
      The MembershipChangeResult.
      +
      +
      +
    • +
    • +
      +

      alterConsumerGroupOffsets

      +
      default AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets(String groupId, + Map<TopicPartition,OffsetAndMetadata> offsets)
      +

      Alters offsets for the specified group. In order to succeed, the group must be empty. + +

      This is a convenience method for alterConsumerGroupOffsets(String, Map, AlterConsumerGroupOffsetsOptions) with default options. + See the overload for more details.

      +
      +
      Parameters:
      +
      groupId - The group for which to alter offsets.
      +
      offsets - A map of offsets by partition with associated metadata.
      +
      Returns:
      +
      The AlterOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      alterConsumerGroupOffsets

      + +

      Alters offsets for the specified group. In order to succeed, the group must be empty. + +

      This operation is not transactional so it may succeed for some partitions while fail for others.

      +
      +
      Parameters:
      +
      groupId - The group for which to alter offsets.
      +
      offsets - A map of offsets by partition with associated metadata. Partitions not specified in the map are ignored.
      +
      options - The options to use when altering the offsets.
      +
      Returns:
      +
      The AlterOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      alterStreamsGroupOffsets

      +
      default AlterStreamsGroupOffsetsResult alterStreamsGroupOffsets(String groupId, + Map<TopicPartition,OffsetAndMetadata> offsets)
      +

      Alters offsets for the specified group. In order to succeed, the group must be empty. + +

      This is a convenience method for alterStreamsGroupOffsets(String, Map, AlterStreamsGroupOffsetsOptions) with default options. + See the overload for more details.

      +
      +
      Parameters:
      +
      groupId - The group for which to alter offsets.
      +
      offsets - A map of offsets by partition with associated metadata.
      +
      Returns:
      +
      The AlterOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      alterStreamsGroupOffsets

      + +

      Alters offsets for the specified group. In order to succeed, the group must be empty. + +

      This operation is not transactional so it may succeed for some partitions while fail for others. + + Note: this method effectively does the same as the corresponding consumer group method alterConsumerGroupOffsets(java.lang.String, java.util.Map<org.apache.kafka.common.TopicPartition, org.apache.kafka.clients.consumer.OffsetAndMetadata>) does.

      +
      +
      Parameters:
      +
      groupId - The group for which to alter offsets.
      +
      offsets - A map of offsets by partition with associated metadata. Partitions not specified in the map are ignored.
      +
      options - The options to use when altering the offsets.
      +
      Returns:
      +
      The AlterOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      listOffsets

      +
      default ListOffsetsResult listOffsets(Map<TopicPartition,OffsetSpec> topicPartitionOffsets)
      +

      List offset for the specified partitions and OffsetSpec. This operation enables to find + the beginning offset, end offset as well as the offset matching a timestamp in partitions. + +

      This is a convenience method for listOffsets(Map, ListOffsetsOptions)

      +
      +
      Parameters:
      +
      topicPartitionOffsets - The mapping from partition to the OffsetSpec to look up.
      +
      Returns:
      +
      The ListOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      listOffsets

      +
      ListOffsetsResult listOffsets(Map<TopicPartition,OffsetSpec> topicPartitionOffsets, + ListOffsetsOptions options)
      +

      List offset for the specified partitions. This operation enables to find + the beginning offset, end offset as well as the offset matching a timestamp in partitions.

      +
      +
      Parameters:
      +
      topicPartitionOffsets - The mapping from partition to the OffsetSpec to look up.
      +
      options - The options to use when retrieving the offsets
      +
      Returns:
      +
      The ListOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      describeClientQuotas

      +
      default DescribeClientQuotasResult describeClientQuotas(ClientQuotaFilter filter)
      +
      Describes all entities matching the provided filter that have at least one client quota configuration + value defined. +

      + This is a convenience method for describeClientQuotas(ClientQuotaFilter, DescribeClientQuotasOptions) + with default options. See the overload for more details. +

      + This operation is supported by brokers with version 2.6.0 or higher.

      +
      +
      Parameters:
      +
      filter - the filter to apply to match entities
      +
      Returns:
      +
      the DescribeClientQuotasResult containing the result
      +
      +
      +
    • +
    • +
      +

      describeClientQuotas

      + +
      Describes all entities matching the provided filter that have at least one client quota configuration + value defined. +

      + The following exceptions can be anticipated when calling get() on the future from the + returned DescribeClientQuotasResult: +

      +

      + This operation is supported by brokers with version 2.6.0 or higher.

      +
      +
      Parameters:
      +
      filter - the filter to apply to match entities
      +
      options - the options to use
      +
      Returns:
      +
      the DescribeClientQuotasResult containing the result
      +
      +
      +
    • +
    • +
      +

      alterClientQuotas

      +
      default AlterClientQuotasResult alterClientQuotas(Collection<ClientQuotaAlteration> entries)
      +
      Alters client quota configurations with the specified alterations. +

      + This is a convenience method for alterClientQuotas(Collection, AlterClientQuotasOptions) + with default options. See the overload for more details. +

      + This operation is supported by brokers with version 2.6.0 or higher.

      +
      +
      Parameters:
      +
      entries - the alterations to perform
      +
      Returns:
      +
      the AlterClientQuotasResult containing the result
      +
      +
      +
    • +
    • +
      +

      alterClientQuotas

      + +
      Alters client quota configurations with the specified alterations. +

      + Alterations for a single entity are atomic, but across entities is not guaranteed. The resulting + per-entity error code should be evaluated to resolve the success or failure of all updates. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from + the returned AlterClientQuotasResult: +

        +
      • ClusterAuthorizationException + If the authenticated user didn't have alter access to the cluster.
      • +
      • InvalidRequestException + If the request details are invalid. e.g., a configuration key was specified more than once for an entity.
      • +
      • TimeoutException + If the request timed out before the alterations could finish. It cannot be guaranteed whether the update + succeed or not.
      • +
      +

      + This operation is supported by brokers with version 2.6.0 or higher.

      +
      +
      Parameters:
      +
      entries - the alterations to perform
      +
      Returns:
      +
      the AlterClientQuotasResult containing the result
      +
      +
      +
    • +
    • +
      +

      describeUserScramCredentials

      +
      default DescribeUserScramCredentialsResult describeUserScramCredentials()
      +
      Describe all SASL/SCRAM credentials. + +

      This is a convenience method for describeUserScramCredentials(List, DescribeUserScramCredentialsOptions)

      +
      +
      Returns:
      +
      The DescribeUserScramCredentialsResult.
      +
      +
      +
    • +
    • +
      +

      describeUserScramCredentials

      +
      default DescribeUserScramCredentialsResult describeUserScramCredentials(List<String> users)
      +
      Describe SASL/SCRAM credentials for the given users. + +

      This is a convenience method for describeUserScramCredentials(List, DescribeUserScramCredentialsOptions)

      +
      +
      Parameters:
      +
      users - the users for which credentials are to be described; all users' credentials are described if null + or empty.
      +
      Returns:
      +
      The DescribeUserScramCredentialsResult.
      +
      +
      +
    • +
    • +
      +

      describeUserScramCredentials

      +
      DescribeUserScramCredentialsResult describeUserScramCredentials(List<String> users, + DescribeUserScramCredentialsOptions options)
      +
      Describe SASL/SCRAM credentials. +

      + The following exceptions can be anticipated when calling get() on the futures from the + returned DescribeUserScramCredentialsResult: +

      +

      + This operation is supported by brokers with version 2.7.0 or higher.

      +
      +
      Parameters:
      +
      users - the users for which credentials are to be described; all users' credentials are described if null + or empty.
      +
      options - The options to use when describing the credentials
      +
      Returns:
      +
      The DescribeUserScramCredentialsResult.
      +
      +
      +
    • +
    • +
      +

      alterUserScramCredentials

      +
      default AlterUserScramCredentialsResult alterUserScramCredentials(List<UserScramCredentialAlteration> alterations)
      +
      Alter SASL/SCRAM credentials for the given users. + +

      This is a convenience method for alterUserScramCredentials(List, AlterUserScramCredentialsOptions)

      +
      +
      Parameters:
      +
      alterations - the alterations to be applied
      +
      Returns:
      +
      The AlterUserScramCredentialsResult.
      +
      +
      +
    • +
    • +
      +

      alterUserScramCredentials

      + +
      Alter SASL/SCRAM credentials. + +

      + The following exceptions can be anticipated when calling get() any of the futures from the + returned AlterUserScramCredentialsResult: +

      +

      + This operation is supported by brokers with version 2.7.0 or higher.

      +
      +
      Parameters:
      +
      alterations - the alterations to be applied
      +
      options - The options to use when altering the credentials
      +
      Returns:
      +
      The AlterUserScramCredentialsResult.
      +
      +
      +
    • +
    • +
      +

      describeFeatures

      +
      default DescribeFeaturesResult describeFeatures()
      +
      Describes finalized as well as supported features. +

      + This is a convenience method for describeFeatures(DescribeFeaturesOptions) with default options. + See the overload for more details.

      +
      +
      Returns:
      +
      the DescribeFeaturesResult containing the result
      +
      +
      +
    • +
    • +
      +

      describeFeatures

      + +
      Describes finalized as well as supported features. The request is issued to any random + broker. +

      + The following exceptions can be anticipated when calling get() on the future from the + returned DescribeFeaturesResult: +

        +
      • TimeoutException + If the request timed out before the describe operation could finish.
      • +
      +

      +
      +
      Parameters:
      +
      options - the options to use
      +
      Returns:
      +
      the DescribeFeaturesResult containing the result
      +
      +
      +
    • +
    • +
      +

      updateFeatures

      +
      UpdateFeaturesResult updateFeatures(Map<String,FeatureUpdate> featureUpdates, + UpdateFeaturesOptions options)
      +
      Applies specified updates to finalized features. This operation is not transactional so some + updates may succeed while the rest may fail. +

      + The API takes in a map of finalized feature names to FeatureUpdate that needs to be + applied. Each entry in the map specifies the finalized feature to be added or updated or + deleted, along with the new max feature version level value. This request is issued only to + the controller since the API is only served by the controller. The return value contains an + error code for each supplied FeatureUpdate, and the code indicates if the update + succeeded or failed in the controller. +

      +

      + The following exceptions can be anticipated when calling get() on the futures + obtained from the returned UpdateFeaturesResult: +

        +
      • ClusterAuthorizationException + If the authenticated user didn't have alter access to the cluster.
      • +
      • InvalidRequestException + If the request details are invalid. e.g., a non-existing finalized feature is attempted + to be deleted or downgraded.
      • +
      • TimeoutException + If the request timed out before the updates could finish. It cannot be guaranteed whether + the updates succeeded or not.
      • +
      • FeatureUpdateFailedException + This means there was an unexpected error encountered when the update was applied on + the controller. There is no guarantee on whether the update succeeded or failed. The best + way to find out is to issue a describeFeatures(DescribeFeaturesOptions) + request.
      • +
      +

      + This operation is supported by brokers with version 2.7.0 or higher.

      +
      +
      Parameters:
      +
      featureUpdates - the map of finalized feature name to FeatureUpdate
      +
      options - the options to use
      +
      Returns:
      +
      the UpdateFeaturesResult containing the result
      +
      +
      +
    • +
    • +
      +

      describeMetadataQuorum

      +
      default DescribeMetadataQuorumResult describeMetadataQuorum()
      +
      Describes the state of the metadata quorum. +

      + This is a convenience method for describeMetadataQuorum(DescribeMetadataQuorumOptions) with default options. + See the overload for more details.

      +
      +
      Returns:
      +
      the DescribeMetadataQuorumResult containing the result
      +
      +
      +
    • +
    • +
      +

      describeMetadataQuorum

      + +
      Describes the state of the metadata quorum. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from + the returned DescribeMetadataQuorumResult: +

      +
      +
      Parameters:
      +
      options - The DescribeMetadataQuorumOptions to use when describing the quorum.
      +
      Returns:
      +
      the DescribeMetadataQuorumResult containing the result
      +
      +
      +
    • +
    • +
      +

      unregisterBroker

      +
      @Unstable +default UnregisterBrokerResult unregisterBroker(int brokerId)
      +
      Unregister a broker. +

      + This operation does not have any effect on partition assignments. + + This is a convenience method for unregisterBroker(int, UnregisterBrokerOptions)

      +
      +
      Parameters:
      +
      brokerId - the broker id to unregister.
      +
      Returns:
      +
      the UnregisterBrokerResult containing the result
      +
      +
      +
    • +
    • +
      +

      unregisterBroker

      +
      @Unstable +UnregisterBrokerResult unregisterBroker(int brokerId, + UnregisterBrokerOptions options)
      +
      Unregister a broker. +

      + This operation does not have any effect on partition assignments. + + The following exceptions can be anticipated when calling get() on the future from the + returned UnregisterBrokerResult: +

      +

      +
      +
      Parameters:
      +
      brokerId - the broker id to unregister.
      +
      options - the options to use.
      +
      Returns:
      +
      the UnregisterBrokerResult containing the result
      +
      +
      +
    • +
    • +
      +

      describeProducers

      +
      default DescribeProducersResult describeProducers(Collection<TopicPartition> partitions)
      +
      Describe producer state on a set of topic partitions. See + describeProducers(Collection, DescribeProducersOptions) for more details.
      +
      +
      Parameters:
      +
      partitions - The set of partitions to query
      +
      Returns:
      +
      The result
      +
      +
      +
    • +
    • +
      +

      describeProducers

      +
      DescribeProducersResult describeProducers(Collection<TopicPartition> partitions, + DescribeProducersOptions options)
      +
      Describe active producer state on a set of topic partitions. Unless a specific broker + is requested through DescribeProducersOptions.brokerId(int), this will + query the partition leader to find the producer state.
      +
      +
      Parameters:
      +
      partitions - The set of partitions to query
      +
      options - Options to control the method behavior
      +
      Returns:
      +
      The result
      +
      +
      +
    • +
    • +
      +

      describeTransactions

      +
      default DescribeTransactionsResult describeTransactions(Collection<String> transactionalIds)
      +
      Describe the state of a set of transactional IDs. See + describeTransactions(Collection, DescribeTransactionsOptions) for more details.
      +
      +
      Parameters:
      +
      transactionalIds - The set of transactional IDs to query
      +
      Returns:
      +
      The result
      +
      +
      +
    • +
    • +
      +

      describeTransactions

      +
      DescribeTransactionsResult describeTransactions(Collection<String> transactionalIds, + DescribeTransactionsOptions options)
      +
      Describe the state of a set of transactional IDs from the respective transaction coordinators, + which are dynamically discovered.
      +
      +
      Parameters:
      +
      transactionalIds - The set of transactional IDs to query
      +
      options - Options to control the method behavior
      +
      Returns:
      +
      The result
      +
      +
      +
    • +
    • +
      +

      abortTransaction

      +
      default AbortTransactionResult abortTransaction(AbortTransactionSpec spec)
      +
      Forcefully abort a transaction which is open on a topic partition. See + abortTransaction(AbortTransactionSpec, AbortTransactionOptions) for more details.
      +
      +
      Parameters:
      +
      spec - The transaction specification including topic partition and producer details
      +
      Returns:
      +
      The result
      +
      +
      +
    • +
    • +
      +

      abortTransaction

      + +
      Forcefully abort a transaction which is open on a topic partition. This will + send a `WriteTxnMarkers` request to the partition leader in order to abort the + transaction. This requires administrative privileges.
      +
      +
      Parameters:
      +
      spec - The transaction specification including topic partition and producer details
      +
      options - Options to control the method behavior (including filters)
      +
      Returns:
      +
      The result
      +
      +
      +
    • +
    • +
      +

      listTransactions

      +
      default ListTransactionsResult listTransactions()
      +
      List active transactions in the cluster. See + listTransactions(ListTransactionsOptions) for more details.
      +
      +
      Returns:
      +
      The result
      +
      +
      +
    • +
    • +
      +

      listTransactions

      + +
      List active transactions in the cluster. This will query all potential transaction + coordinators in the cluster and collect the state of all transactions. Users + should typically attempt to reduce the size of the result set using + ListTransactionsOptions.filterProducerIds(Collection) or + ListTransactionsOptions.filterStates(Collection) or + ListTransactionsOptions.filterOnDuration(long).
      +
      +
      Parameters:
      +
      options - Options to control the method behavior (including filters)
      +
      Returns:
      +
      The result
      +
      +
      +
    • +
    • +
      +

      fenceProducers

      +
      default FenceProducersResult fenceProducers(Collection<String> transactionalIds)
      +
      Fence out all active producers that use any of the provided transactional IDs, with the default options. +

      + This is a convenience method for fenceProducers(Collection, FenceProducersOptions) + with default options. See the overload for more details.

      +
      +
      Parameters:
      +
      transactionalIds - The IDs of the producers to fence.
      +
      Returns:
      +
      The FenceProducersResult.
      +
      +
      +
    • +
    • +
      +

      fenceProducers

      +
      FenceProducersResult fenceProducers(Collection<String> transactionalIds, + FenceProducersOptions options)
      +
      Fence out all active producers that use any of the provided transactional IDs.
      +
      +
      Parameters:
      +
      transactionalIds - The IDs of the producers to fence.
      +
      options - The options to use when fencing the producers.
      +
      Returns:
      +
      The FenceProducersResult.
      +
      +
      +
    • +
    • +
      +

      listConfigResources

      +
      ListConfigResourcesResult listConfigResources(Set<ConfigResource.Type> configResourceTypes, + ListConfigResourcesOptions options)
      +
      List the configuration resources available in the cluster which matches config resource type. + If no config resource types are specified, all configuration resources will be listed.
      +
      +
      Parameters:
      +
      configResourceTypes - The set of configuration resource types to list.
      +
      options - The options to use when listing the configuration resources.
      +
      Returns:
      +
      The ListConfigurationResourcesResult.
      +
      +
      +
    • +
    • +
      +

      listConfigResources

      +
      default ListConfigResourcesResult listConfigResources()
      +
      List all configuration resources available in the cluster with the default options. +

      + This is a convenience method for listConfigResources(Set, ListConfigResourcesOptions) + with default options. See the overload for more details.

      +
      +
      Returns:
      +
      The ListConfigurationResourcesResult.
      +
      +
      +
    • +
    • +
      +

      listClientMetricsResources

      + +
      Deprecated, for removal: This API element is subject to removal in a future version. + +
      +
      List the client metrics configuration resources available in the cluster.
      +
      +
      Parameters:
      +
      options - The options to use when listing the client metrics resources.
      +
      Returns:
      +
      The ListClientMetricsResourcesResult.
      +
      +
      +
    • +
    • +
      +

      listClientMetricsResources

      +
      @Deprecated(since="4.1", + forRemoval=true) +default ListClientMetricsResourcesResult listClientMetricsResources()
      +
      Deprecated, for removal: This API element is subject to removal in a future version. +
      Since 4.1. Use listConfigResources() instead.
      +
      +
      List the client metrics configuration resources available in the cluster with the default options. +

      + This is a convenience method for listClientMetricsResources(ListClientMetricsResourcesOptions) + with default options. See the overload for more details.

      +
      +
      Returns:
      +
      The ListClientMetricsResourcesResult.
      +
      +
      +
    • +
    • +
      +

      clientInstanceId

      +
      Uuid clientInstanceId(Duration timeout)
      +
      Determines the client's unique client instance ID used for telemetry. This ID is unique to + this specific client instance and will not change after it is initially generated. + The ID is useful for correlating client operations with telemetry sent to the broker and + to its eventual monitoring destinations. +

      + If telemetry is enabled, this will first require a connection to the cluster to generate + the unique client instance ID. This method waits up to timeout for the admin + client to complete the request. +

      + Client telemetry is controlled by the AdminClientConfig.ENABLE_METRICS_PUSH_CONFIG + configuration option.

      +
      +
      Parameters:
      +
      timeout - The maximum time to wait for admin client to determine its client instance ID. + The value must be non-negative. Specifying a timeout of zero means do not + wait for the initial request to complete if it hasn't already.
      +
      Returns:
      +
      The client's assigned instance id used for metrics collection.
      +
      Throws:
      +
      InterruptException - If the thread is interrupted while blocked.
      +
      KafkaException - If an unexpected error occurs while trying to determine the client + instance ID, though this error does not necessarily imply the + admin client is otherwise unusable.
      +
      IllegalArgumentException - If the timeout is negative.
      +
      IllegalStateException - If telemetry is not enabled ie, config `enable.metrics.push` + is set to `false`.
      +
      +
      +
    • +
    • +
      +

      addRaftVoter

      +
      default AddRaftVoterResult addRaftVoter(int voterId, + Uuid voterDirectoryId, + Set<RaftVoterEndpoint> endpoints)
      +
      Add a new voter node to the KRaft metadata quorum.
      +
      +
      Parameters:
      +
      voterId - The node ID of the voter.
      +
      voterDirectoryId - The directory ID of the voter.
      +
      endpoints - The endpoints that the new voter has.
      +
      +
      +
    • +
    • +
      +

      addRaftVoter

      +
      AddRaftVoterResult addRaftVoter(int voterId, + Uuid voterDirectoryId, + Set<RaftVoterEndpoint> endpoints, + AddRaftVoterOptions options)
      +
      Add a new voter node to the KRaft metadata quorum.
      +
      +
      Parameters:
      +
      voterId - The node ID of the voter.
      +
      voterDirectoryId - The directory ID of the voter.
      +
      endpoints - The endpoints that the new voter has.
      +
      options - The options to use when adding the new voter node.
      +
      +
      +
    • +
    • +
      +

      removeRaftVoter

      +
      default RemoveRaftVoterResult removeRaftVoter(int voterId, + Uuid voterDirectoryId)
      +
      Remove a voter node from the KRaft metadata quorum.
      +
      +
      Parameters:
      +
      voterId - The node ID of the voter.
      +
      voterDirectoryId - The directory ID of the voter.
      +
      +
      +
    • +
    • +
      +

      removeRaftVoter

      +
      RemoveRaftVoterResult removeRaftVoter(int voterId, + Uuid voterDirectoryId, + RemoveRaftVoterOptions options)
      +
      Remove a voter node from the KRaft metadata quorum.
      +
      +
      Parameters:
      +
      voterId - The node ID of the voter.
      +
      voterDirectoryId - The directory ID of the voter.
      +
      options - The options to use when removing the voter node.
      +
      +
      +
    • +
    • +
      +

      describeShareGroups

      +
      DescribeShareGroupsResult describeShareGroups(Collection<String> groupIds, + DescribeShareGroupsOptions options)
      +
      Describe some share groups in the cluster.
      +
      +
      Parameters:
      +
      groupIds - The IDs of the groups to describe.
      +
      options - The options to use when describing the groups.
      +
      Returns:
      +
      The DescribeShareGroupsResult.
      +
      +
      +
    • +
    • +
      +

      describeShareGroups

      +
      default DescribeShareGroupsResult describeShareGroups(Collection<String> groupIds)
      +
      Describe some share groups in the cluster, with the default options. +

      + This is a convenience method for describeShareGroups(Collection, DescribeShareGroupsOptions) + with default options. See the overload for more details.

      +
      +
      Parameters:
      +
      groupIds - The IDs of the groups to describe.
      +
      Returns:
      +
      The DescribeShareGroupsResult.
      +
      +
      +
    • +
    • +
      +

      alterShareGroupOffsets

      +
      AlterShareGroupOffsetsResult alterShareGroupOffsets(String groupId, + Map<TopicPartition,Long> offsets, + AlterShareGroupOffsetsOptions options)
      +
      Alters offsets for the specified group. In order to succeed, the group must be empty. + +

      This operation is not transactional, so it may succeed for some partitions while fail for others.

      +
      +
      Parameters:
      +
      groupId - The group for which to alter offsets.
      +
      offsets - A map of offsets by partition. Partitions not specified in the map are ignored.
      +
      options - The options to use when altering the offsets.
      +
      Returns:
      +
      The AlterShareGroupOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      alterShareGroupOffsets

      +
      default AlterShareGroupOffsetsResult alterShareGroupOffsets(String groupId, + Map<TopicPartition,Long> offsets)
      +
      Alters offsets for the specified group. In order to succeed, the group must be empty. + +

      This is a convenience method for alterShareGroupOffsets(String, Map, AlterShareGroupOffsetsOptions) with default options. + See the overload for more details.

      +
      +
      Parameters:
      +
      groupId - The group for which to alter offsets.
      +
      offsets - A map of offsets by partition.
      +
      Returns:
      +
      The AlterShareGroupOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      listShareGroupOffsets

      + +
      List the share group offsets available in the cluster for the specified share groups.
      +
      +
      Parameters:
      +
      groupSpecs - Map of share group ids to a spec that specifies the topic partitions of the group to list offsets for.
      +
      options - The options to use when listing the share group offsets.
      +
      Returns:
      +
      The ListShareGroupOffsetsResult
      +
      +
      +
    • +
    • +
      +

      listShareGroupOffsets

      +
      default ListShareGroupOffsetsResult listShareGroupOffsets(Map<String,ListShareGroupOffsetsSpec> groupSpecs)
      +
      List the share group offsets available in the cluster for the specified share groups with the default options. + +

      This is a convenience method for listShareGroupOffsets(Map, ListShareGroupOffsetsOptions) + to list offsets of all partitions for the specified share groups with default options.

      +
      +
      Parameters:
      +
      groupSpecs - Map of share group ids to a spec that specifies the topic partitions of the group to list offsets for.
      +
      Returns:
      +
      The ListShareGroupOffsetsResult
      +
      +
      +
    • +
    • +
      +

      deleteShareGroupOffsets

      +
      DeleteShareGroupOffsetsResult deleteShareGroupOffsets(String groupId, + Set<String> topics, + DeleteShareGroupOffsetsOptions options)
      +
      Delete offsets for a set of topics in a share group.
      +
      +
      Parameters:
      +
      groupId - The group for which to delete offsets.
      +
      topics - The topics for which to delete offsets.
      +
      options - The options to use when deleting offsets in a share group.
      +
      Returns:
      +
      The DeleteShareGroupOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      deleteShareGroupOffsets

      +
      default DeleteShareGroupOffsetsResult deleteShareGroupOffsets(String groupId, + Set<String> topics)
      +
      Delete offsets for a set of topics in a share group with the default options. + +

      + This is a convenience method for deleteShareGroupOffsets(String, Set, DeleteShareGroupOffsetsOptions) with default options. + See the overload for more details.

      +
      +
      Parameters:
      +
      groupId - The group for which to delete offsets.
      +
      topics - The topics for which to delete offsets.
      +
      Returns:
      +
      The DeleteShareGroupOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      deleteShareGroups

      +
      DeleteShareGroupsResult deleteShareGroups(Collection<String> groupIds, + DeleteShareGroupsOptions options)
      +
      Delete share groups from the cluster.
      +
      +
      Parameters:
      +
      groupIds - Collection of share group ids which are to be deleted.
      +
      options - The options to use when deleting a share group.
      +
      Returns:
      +
      The DeleteShareGroupsResult.
      +
      +
      +
    • +
    • +
      +

      deleteShareGroups

      +
      default DeleteShareGroupsResult deleteShareGroups(Collection<String> groupIds)
      +
      Delete share groups from the cluster with the default options.
      +
      +
      Parameters:
      +
      groupIds - Collection of share group ids which are to be deleted.
      +
      Returns:
      +
      The DeleteShareGroupsResult.
      +
      +
      +
    • +
    • +
      +

      describeStreamsGroups

      +
      DescribeStreamsGroupsResult describeStreamsGroups(Collection<String> groupIds, + DescribeStreamsGroupsOptions options)
      +
      Describe streams groups in the cluster.
      +
      +
      Parameters:
      +
      groupIds - The IDs of the groups to describe.
      +
      options - The options to use when describing the groups.
      +
      Returns:
      +
      The DescribeStreamsGroupsResult.
      +
      +
      +
    • +
    • +
      +

      describeStreamsGroups

      +
      default DescribeStreamsGroupsResult describeStreamsGroups(Collection<String> groupIds)
      +
      Describe streams groups in the cluster, with the default options. +

      + This is a convenience method for describeStreamsGroups(Collection, DescribeStreamsGroupsOptions) + with default options. See the overload for more details.

      +
      +
      Parameters:
      +
      groupIds - The IDs of the groups to describe.
      +
      Returns:
      +
      The DescribeStreamsGroupsResult.
      +
      +
      +
    • +
    • +
      +

      describeClassicGroups

      +
      DescribeClassicGroupsResult describeClassicGroups(Collection<String> groupIds, + DescribeClassicGroupsOptions options)
      +
      Describe some classic groups in the cluster.
      +
      +
      Parameters:
      +
      groupIds - The IDs of the groups to describe.
      +
      options - The options to use when describing the groups.
      +
      Returns:
      +
      The DescribeClassicGroupsResult.
      +
      +
      +
    • +
    • +
      +

      describeClassicGroups

      +
      default DescribeClassicGroupsResult describeClassicGroups(Collection<String> groupIds)
      +
      Describe some classic groups in the cluster, with the default options. +

      + This is a convenience method for describeClassicGroups(Collection, DescribeClassicGroupsOptions) + with default options. See the overload for more details.

      +
      +
      Parameters:
      +
      groupIds - The IDs of the groups to describe.
      +
      Returns:
      +
      The DescribeClassicGroupsResult.
      +
      +
      +
    • +
    • +
      +

      registerMetricForSubscription

      +
      void registerMetricForSubscription(KafkaMetric metric)
      +
      Add the provided application metric for subscription. + This metric will be added to this client's metrics + that are available for subscription and sent as + telemetry data to the broker. + The provided metric must map to an OTLP metric data point + type in the OpenTelemetry v1 metrics protobuf message types. + Specifically, the metric should be one of the following: +
        +
      • + `Sum`: Monotonic total count meter (Counter). Suitable for metrics like total number of X, e.g., total bytes sent. +
      • +
      • + `Gauge`: Non-monotonic current value meter (UpDownCounter). Suitable for metrics like current value of Y, e.g., current queue count. +
      • +
      + Metrics not matching these types are silently ignored. + Executing this method for a previously registered metric is a benign operation and results in updating that metrics entry.
      +
      +
      Parameters:
      +
      metric - The application metric to register
      +
      +
      +
    • +
    • +
      +

      unregisterMetricFromSubscription

      +
      void unregisterMetricFromSubscription(KafkaMetric metric)
      +
      Remove the provided application metric for subscription. + This metric is removed from this client's metrics + and will not be available for subscription any longer. + Executing this method with a metric that has not been registered is a + benign operation and does not result in any action taken (no-op).
      +
      +
      Parameters:
      +
      metric - The application metric to remove
      +
      +
      +
    • +
    • +
      +

      metrics

      +
      Map<MetricName,? extends Metric> metrics()
      +
      Get the metrics kept by the adminClient
      +
      +
    • +
    • +
      +

      forceTerminateTransaction

      +
      default TerminateTransactionResult forceTerminateTransaction(String transactionalId)
      +
      Force terminate a transaction for the given transactional ID with the default options. +

      + This is a convenience method for forceTerminateTransaction(String, TerminateTransactionOptions) + with default options.

      +
      +
      Parameters:
      +
      transactionalId - The ID of the transaction to terminate.
      +
      Returns:
      +
      The TerminateTransactionResult.
      +
      +
      +
    • +
    • +
      +

      forceTerminateTransaction

      +
      TerminateTransactionResult forceTerminateTransaction(String transactionalId, + TerminateTransactionOptions options)
      +
      Force terminate a transaction for the given transactional ID. + This operation aborts any ongoing transaction associated with the transactional ID. + It's similar to fenceProducers but only targets a single transactional ID to handle + long-running transactions when 2PC is enabled.
      +
      +
      Parameters:
      +
      transactionalId - The ID of the transaction to terminate.
      +
      options - The options to use when terminating the transaction.
      +
      Returns:
      +
      The TerminateTransactionResult.
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AdminClient.html b/static/41/javadoc/org/apache/kafka/clients/admin/AdminClient.html new file mode 100644 index 000000000..61982c5ce --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AdminClient.html @@ -0,0 +1,202 @@ + + + + +AdminClient (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AdminClient

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AdminClient
+
+
+
+
All Implemented Interfaces:
+
AutoCloseable, Admin
+
+
+
Direct Known Subclasses:
+
KafkaAdminClient
+
+
+
public abstract class AdminClient +extends Object +implements Admin
+
The base class for in-built admin clients. + + Client code should use the newer Admin interface in preference to this class. + + This class may be removed in a later release, but has not been marked as deprecated to avoid unnecessary noise.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      AdminClient

      +
      public AdminClient()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      create

      +
      public static AdminClient create(Properties props)
      +
      Create a new Admin with the given configuration.
      +
      +
      Parameters:
      +
      props - The configuration.
      +
      Returns:
      +
      The new KafkaAdminClient.
      +
      +
      +
    • +
    • +
      +

      create

      +
      public static AdminClient create(Map<String,Object> conf)
      +
      Create a new Admin with the given configuration.
      +
      +
      Parameters:
      +
      conf - The configuration.
      +
      Returns:
      +
      The new KafkaAdminClient.
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AdminClientConfig.html b/static/41/javadoc/org/apache/kafka/clients/admin/AdminClientConfig.html new file mode 100644 index 000000000..a18a9a726 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AdminClientConfig.html @@ -0,0 +1,797 @@ + + + + +AdminClientConfig (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AdminClientConfig

+
+
java.lang.Object +
org.apache.kafka.common.config.AbstractConfig +
org.apache.kafka.clients.admin.AdminClientConfig
+
+
+
+
+
public class AdminClientConfig +extends AbstractConfig
+
The AdminClient configuration class, which also contains constants for configuration entry names.
+
+
+ +
+
+
    + +
  • +
    +

    Field Details

    +
      +
    • +
      +

      BOOTSTRAP_SERVERS_CONFIG

      +
      public static final String BOOTSTRAP_SERVERS_CONFIG
      +
      bootstrap.servers
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      BOOTSTRAP_CONTROLLERS_CONFIG

      +
      public static final String BOOTSTRAP_CONTROLLERS_CONFIG
      +
      bootstrap.controllers
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      BOOTSTRAP_CONTROLLERS_DOC

      +
      public static final String BOOTSTRAP_CONTROLLERS_DOC
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      CLIENT_DNS_LOOKUP_CONFIG

      +
      public static final String CLIENT_DNS_LOOKUP_CONFIG
      +
      client.dns.lookup
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      RECONNECT_BACKOFF_MS_CONFIG

      +
      public static final String RECONNECT_BACKOFF_MS_CONFIG
      +
      reconnect.backoff.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      RECONNECT_BACKOFF_MAX_MS_CONFIG

      +
      public static final String RECONNECT_BACKOFF_MAX_MS_CONFIG
      +
      reconnect.backoff.max.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      RETRY_BACKOFF_MS_CONFIG

      +
      public static final String RETRY_BACKOFF_MS_CONFIG
      +
      retry.backoff.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      RETRY_BACKOFF_MAX_MS_CONFIG

      +
      public static final String RETRY_BACKOFF_MAX_MS_CONFIG
      +
      retry.backoff.max.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      ENABLE_METRICS_PUSH_CONFIG

      +
      public static final String ENABLE_METRICS_PUSH_CONFIG
      +
      enable.metrics.push
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      ENABLE_METRICS_PUSH_DOC

      +
      public static final String ENABLE_METRICS_PUSH_DOC
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG

      +
      public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG
      +
      socket.connection.setup.timeout.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG

      +
      public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG
      +
      socket.connection.setup.timeout.max.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      CONNECTIONS_MAX_IDLE_MS_CONFIG

      +
      public static final String CONNECTIONS_MAX_IDLE_MS_CONFIG
      +
      connections.max.idle.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      REQUEST_TIMEOUT_MS_CONFIG

      +
      public static final String REQUEST_TIMEOUT_MS_CONFIG
      +
      request.timeout.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      CLIENT_ID_CONFIG

      +
      public static final String CLIENT_ID_CONFIG
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      METADATA_MAX_AGE_CONFIG

      +
      public static final String METADATA_MAX_AGE_CONFIG
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      SEND_BUFFER_CONFIG

      +
      public static final String SEND_BUFFER_CONFIG
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      RECEIVE_BUFFER_CONFIG

      +
      public static final String RECEIVE_BUFFER_CONFIG
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      METRIC_REPORTER_CLASSES_CONFIG

      +
      public static final String METRIC_REPORTER_CLASSES_CONFIG
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      METRICS_NUM_SAMPLES_CONFIG

      +
      public static final String METRICS_NUM_SAMPLES_CONFIG
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      METRICS_SAMPLE_WINDOW_MS_CONFIG

      +
      public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      METRICS_RECORDING_LEVEL_CONFIG

      +
      public static final String METRICS_RECORDING_LEVEL_CONFIG
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      SECURITY_PROTOCOL_CONFIG

      +
      public static final String SECURITY_PROTOCOL_CONFIG
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      DEFAULT_SECURITY_PROTOCOL

      +
      public static final String DEFAULT_SECURITY_PROTOCOL
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      RETRIES_CONFIG

      +
      public static final String RETRIES_CONFIG
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      DEFAULT_API_TIMEOUT_MS_CONFIG

      +
      public static final String DEFAULT_API_TIMEOUT_MS_CONFIG
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      METADATA_RECOVERY_STRATEGY_CONFIG

      +
      public static final String METADATA_RECOVERY_STRATEGY_CONFIG
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      METADATA_RECOVERY_STRATEGY_DOC

      +
      public static final String METADATA_RECOVERY_STRATEGY_DOC
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      DEFAULT_METADATA_RECOVERY_STRATEGY

      +
      public static final String DEFAULT_METADATA_RECOVERY_STRATEGY
      +
      +
    • +
    • +
      +

      METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG

      +
      public static final String METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC

      +
      public static final String METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      DEFAULT_METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS

      +
      public static final long DEFAULT_METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      SECURITY_PROVIDERS_CONFIG

      +
      public static final String SECURITY_PROVIDERS_CONFIG
      +
      security.providers
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      AdminClientConfig

      +
      public AdminClientConfig(Map<?,?> props)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      configNames

      +
      public static Set<String> configNames()
      +
      +
    • +
    • +
      +

      configDef

      +
      public static ConfigDef configDef()
      +
      +
    • +
    • +
      +

      main

      +
      public static void main(String[] args)
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AlterClientQuotasOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/AlterClientQuotasOptions.html new file mode 100644 index 000000000..97ffc0af5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AlterClientQuotasOptions.html @@ -0,0 +1,179 @@ + + + + +AlterClientQuotasOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AlterClientQuotasOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<AlterClientQuotasOptions> +
org.apache.kafka.clients.admin.AlterClientQuotasOptions
+
+
+
+
+
public class AlterClientQuotasOptions +extends AbstractOptions<AlterClientQuotasOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      AlterClientQuotasOptions

      +
      public AlterClientQuotasOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      validateOnly

      +
      public boolean validateOnly()
      +
      Returns whether the request should be validated without altering the configs.
      +
      +
    • +
    • +
      +

      validateOnly

      +
      public AlterClientQuotasOptions validateOnly(boolean validateOnly)
      +
      Sets whether the request should be validated without altering the configs.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AlterClientQuotasResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/AlterClientQuotasResult.html new file mode 100644 index 000000000..6ddfa325b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AlterClientQuotasResult.html @@ -0,0 +1,181 @@ + + + + +AlterClientQuotasResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AlterClientQuotasResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AlterClientQuotasResult
+
+
+
+
public class AlterClientQuotasResult +extends Object
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      AlterClientQuotasResult

      +
      public AlterClientQuotasResult(Map<ClientQuotaEntity,KafkaFuture<Void>> futures)
      +
      Maps an entity to its alteration result.
      +
      +
      Parameters:
      +
      futures - maps entity to its alteration result
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      values

      + +
      Returns a map from quota entity to a future which can be used to check the status of the operation.
      +
      +
    • +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Returns a future which succeeds only if all quota alterations succeed.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AlterConfigOp.OpType.html b/static/41/javadoc/org/apache/kafka/clients/admin/AlterConfigOp.OpType.html new file mode 100644 index 000000000..2741553c3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AlterConfigOp.OpType.html @@ -0,0 +1,274 @@ + + + + +AlterConfigOp.OpType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Enum Class AlterConfigOp.OpType

+
+
java.lang.Object +
java.lang.Enum<AlterConfigOp.OpType> +
org.apache.kafka.clients.admin.AlterConfigOp.OpType
+
+
+
+
+
All Implemented Interfaces:
+
Serializable, Comparable<AlterConfigOp.OpType>, Constable
+
+
+
Enclosing class:
+
AlterConfigOp
+
+
+
public static enum AlterConfigOp.OpType +extends Enum<AlterConfigOp.OpType>
+
+
+
    + +
  • +
    +

    Nested Class Summary

    +
    +

    Nested classes/interfaces inherited from class java.lang.Enum

    +Enum.EnumDesc<E extends Enum<E>>
    +
    +
  • + +
  • +
    +

    Enum Constant Summary

    +
    Enum Constants
    +
    +
    Enum Constant
    +
    Description
    + +
    +
    (For list-type configuration entries only.) Add the specified values to the + current value of the configuration entry.
    +
    + +
    +
    Revert the configuration entry to the default value (possibly null).
    +
    + +
    +
    Set the value of the configuration entry.
    +
    + +
    +
    (For list-type configuration entries only.) Removes the specified values from the current + value of the configuration entry.
    +
    +
    +
    +
  • + +
  • +
    +

    Method Summary

    +
    +
    +
    +
    +
    Modifier and Type
    +
    Method
    +
    Description
    + +
    forId(byte id)
    +
     
    +
    byte
    +
    id()
    +
     
    + + +
    +
    Returns the enum constant of this class with the specified name.
    +
    + + +
    +
    Returns an array containing the constants of this enum class, in +the order they are declared.
    +
    +
    +
    +
    +
    +

    Methods inherited from class java.lang.Enum

    +compareTo, describeConstable, equals, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
    +
    +

    Methods inherited from class java.lang.Object

    +getClass, notify, notifyAll, wait, wait, wait
    +
    +
  • +
+
+
+
    + +
  • +
    +

    Enum Constant Details

    +
      +
    • +
      +

      SET

      +
      public static final AlterConfigOp.OpType SET
      +
      Set the value of the configuration entry.
      +
      +
    • +
    • +
      +

      DELETE

      +
      public static final AlterConfigOp.OpType DELETE
      +
      Revert the configuration entry to the default value (possibly null).
      +
      +
    • +
    • +
      +

      APPEND

      +
      public static final AlterConfigOp.OpType APPEND
      +
      (For list-type configuration entries only.) Add the specified values to the + current value of the configuration entry. If the configuration value has not been set, + adds to the default value.
      +
      +
    • +
    • +
      +

      SUBTRACT

      +
      public static final AlterConfigOp.OpType SUBTRACT
      +
      (For list-type configuration entries only.) Removes the specified values from the current + value of the configuration entry. It is legal to remove values that are not currently in the + configuration entry. Removing all entries from the current configuration value leaves an empty + list and does NOT revert to the default value of the entry.
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      values

      +
      public static AlterConfigOp.OpType[] values()
      +
      Returns an array containing the constants of this enum class, in +the order they are declared.
      +
      +
      Returns:
      +
      an array containing the constants of this enum class, in the order they are declared
      +
      +
      +
    • +
    • +
      +

      valueOf

      +
      public static AlterConfigOp.OpType valueOf(String name)
      +
      Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
      +
      +
      Parameters:
      +
      name - the name of the enum constant to be returned.
      +
      Returns:
      +
      the enum constant with the specified name
      +
      Throws:
      +
      IllegalArgumentException - if this enum class has no constant with the specified name
      +
      NullPointerException - if the argument is null
      +
      +
      +
    • +
    • +
      +

      id

      +
      public byte id()
      +
      +
    • +
    • +
      +

      forId

      +
      public static AlterConfigOp.OpType forId(byte id)
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AlterConfigOp.html b/static/41/javadoc/org/apache/kafka/clients/admin/AlterConfigOp.html new file mode 100644 index 000000000..4a0f8163e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AlterConfigOp.html @@ -0,0 +1,224 @@ + + + + +AlterConfigOp (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AlterConfigOp

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AlterConfigOp
+
+
+
+
public class AlterConfigOp +extends Object
+
A class representing an alter configuration entry containing name, value and operation type.
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AlterConfigsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/AlterConfigsOptions.html new file mode 100644 index 000000000..a5f5a6a6f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AlterConfigsOptions.html @@ -0,0 +1,197 @@ + + + + +AlterConfigsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AlterConfigsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<AlterConfigsOptions> +
org.apache.kafka.clients.admin.AlterConfigsOptions
+
+
+
+
+
public class AlterConfigsOptions +extends AbstractOptions<AlterConfigsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      AlterConfigsOptions

      +
      public AlterConfigsOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      timeoutMs

      +
      public AlterConfigsOptions timeoutMs(Integer timeoutMs)
      +
      Set the timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
      +
      +
      Overrides:
      +
      timeoutMs in class AbstractOptions<AlterConfigsOptions>
      +
      +
      +
    • +
    • +
      +

      shouldValidateOnly

      +
      public boolean shouldValidateOnly()
      +
      Return true if the request should be validated without altering the configs.
      +
      +
    • +
    • +
      +

      validateOnly

      +
      public AlterConfigsOptions validateOnly(boolean validateOnly)
      +
      Set to true if the request should be validated without altering the configs.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AlterConfigsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/AlterConfigsResult.html new file mode 100644 index 000000000..d29cfcbed --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AlterConfigsResult.html @@ -0,0 +1,147 @@ + + + + +AlterConfigsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AlterConfigsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AlterConfigsResult
+
+
+
+
public class AlterConfigsResult +extends Object
+ +
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      values

      +
      public Map<ConfigResource,KafkaFuture<Void>> values()
      +
      Return a map from resources to futures which can be used to check the status of the operation on each resource.
      +
      +
    • +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Return a future which succeeds only if all the alter configs operations succeed.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AlterConsumerGroupOffsetsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/AlterConsumerGroupOffsetsOptions.html new file mode 100644 index 000000000..75b9aa797 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AlterConsumerGroupOffsetsOptions.html @@ -0,0 +1,133 @@ + + + + +AlterConsumerGroupOffsetsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AlterConsumerGroupOffsetsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<AlterConsumerGroupOffsetsOptions> +
org.apache.kafka.clients.admin.AlterConsumerGroupOffsetsOptions
+
+
+
+
+
public class AlterConsumerGroupOffsetsOptions +extends AbstractOptions<AlterConsumerGroupOffsetsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      AlterConsumerGroupOffsetsOptions

      +
      public AlterConsumerGroupOffsetsOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AlterConsumerGroupOffsetsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/AlterConsumerGroupOffsetsResult.html new file mode 100644 index 000000000..9ad15b251 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AlterConsumerGroupOffsetsResult.html @@ -0,0 +1,147 @@ + + + + +AlterConsumerGroupOffsetsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AlterConsumerGroupOffsetsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AlterConsumerGroupOffsetsResult
+
+
+
+
public class AlterConsumerGroupOffsetsResult +extends Object
+ +
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      partitionResult

      +
      public KafkaFuture<Void> partitionResult(TopicPartition partition)
      +
      Return a future which can be used to check the result for a given partition.
      +
      +
    • +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Return a future which succeeds if all the alter offsets succeed.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AlterPartitionReassignmentsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/AlterPartitionReassignmentsOptions.html new file mode 100644 index 000000000..a198a3261 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AlterPartitionReassignmentsOptions.html @@ -0,0 +1,185 @@ + + + + +AlterPartitionReassignmentsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AlterPartitionReassignmentsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<AlterPartitionReassignmentsOptions> +
org.apache.kafka.clients.admin.AlterPartitionReassignmentsOptions
+
+
+
+
+
public class AlterPartitionReassignmentsOptions +extends AbstractOptions<AlterPartitionReassignmentsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      AlterPartitionReassignmentsOptions

      +
      public AlterPartitionReassignmentsOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      allowReplicationFactorChange

      +
      public AlterPartitionReassignmentsOptions allowReplicationFactorChange(boolean allow)
      +
      Set the option indicating if the alter partition reassignments call should be + allowed to alter the replication factor of a partition. + In cases where it is not allowed, any replication factor change will result in an exception thrown by the API.
      +
      +
    • +
    • +
      +

      allowReplicationFactorChange

      +
      public boolean allowReplicationFactorChange()
      +
      A boolean indicating if the alter partition reassignments should be + allowed to alter the replication factor of a partition. + In cases where it is not allowed, any replication factor change will result in an exception thrown by the API.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AlterPartitionReassignmentsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/AlterPartitionReassignmentsResult.html new file mode 100644 index 000000000..fbf10b053 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AlterPartitionReassignmentsResult.html @@ -0,0 +1,153 @@ + + + + +AlterPartitionReassignmentsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AlterPartitionReassignmentsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AlterPartitionReassignmentsResult
+
+
+
+
public class AlterPartitionReassignmentsResult +extends Object
+ +
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      values

      +
      public Map<TopicPartition,KafkaFuture<Void>> values()
      +
      Return a map from partitions to futures which can be used to check the status of the reassignment. + + Possible error codes: + + INVALID_REPLICA_ASSIGNMENT (39) - if the specified replica assignment was not valid -- for example, if it included negative numbers, repeated numbers, or specified a broker ID that the controller was not aware of. + NO_REASSIGNMENT_IN_PROGRESS (85) - if the request wants to cancel reassignments but none exist + UNKNOWN (-1)
      +
      +
    • +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Return a future which succeeds only if all the reassignments were successfully initiated.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AlterReplicaLogDirsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/AlterReplicaLogDirsOptions.html new file mode 100644 index 000000000..5a12de436 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AlterReplicaLogDirsOptions.html @@ -0,0 +1,133 @@ + + + + +AlterReplicaLogDirsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AlterReplicaLogDirsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<AlterReplicaLogDirsOptions> +
org.apache.kafka.clients.admin.AlterReplicaLogDirsOptions
+
+
+
+
+
public class AlterReplicaLogDirsOptions +extends AbstractOptions<AlterReplicaLogDirsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      AlterReplicaLogDirsOptions

      +
      public AlterReplicaLogDirsOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AlterReplicaLogDirsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/AlterReplicaLogDirsResult.html new file mode 100644 index 000000000..bccf081df --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AlterReplicaLogDirsResult.html @@ -0,0 +1,171 @@ + + + + +AlterReplicaLogDirsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AlterReplicaLogDirsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AlterReplicaLogDirsResult
+
+
+
+
public class AlterReplicaLogDirsResult +extends Object
+
The result of Admin.alterReplicaLogDirs(Map, AlterReplicaLogDirsOptions). + + To retrieve the detailed result per specified TopicPartitionReplica, use values(). To retrieve the + overall result only, use all().
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AlterShareGroupOffsetsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/AlterShareGroupOffsetsOptions.html new file mode 100644 index 000000000..56d149b70 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AlterShareGroupOffsetsOptions.html @@ -0,0 +1,136 @@ + + + + +AlterShareGroupOffsetsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AlterShareGroupOffsetsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<AlterShareGroupOffsetsOptions> +
org.apache.kafka.clients.admin.AlterShareGroupOffsetsOptions
+
+
+
+
+
@Evolving +public class AlterShareGroupOffsetsOptions +extends AbstractOptions<AlterShareGroupOffsetsOptions>
+
Options for the Admin.alterShareGroupOffsets(String, Map, AlterShareGroupOffsetsOptions) call. +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      AlterShareGroupOffsetsOptions

      +
      public AlterShareGroupOffsetsOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AlterShareGroupOffsetsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/AlterShareGroupOffsetsResult.html new file mode 100644 index 000000000..385d95962 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AlterShareGroupOffsetsResult.html @@ -0,0 +1,150 @@ + + + + +AlterShareGroupOffsetsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AlterShareGroupOffsetsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AlterShareGroupOffsetsResult
+
+
+
+
@Evolving +public class AlterShareGroupOffsetsResult +extends Object
+
The result of the Admin.alterShareGroupOffsets(String, Map, AlterShareGroupOffsetsOptions) call. +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      partitionResult

      +
      public KafkaFuture<Void> partitionResult(TopicPartition partition)
      +
      Return a future which can be used to check the result for a given partition.
      +
      +
    • +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Return a future which succeeds if all the alter offsets succeed.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AlterStreamsGroupOffsetsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/AlterStreamsGroupOffsetsOptions.html new file mode 100644 index 000000000..148810990 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AlterStreamsGroupOffsetsOptions.html @@ -0,0 +1,136 @@ + + + + +AlterStreamsGroupOffsetsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AlterStreamsGroupOffsetsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<AlterStreamsGroupOffsetsOptions> +
org.apache.kafka.clients.admin.AlterStreamsGroupOffsetsOptions
+
+
+
+
+
@Evolving +public class AlterStreamsGroupOffsetsOptions +extends AbstractOptions<AlterStreamsGroupOffsetsOptions>
+
Options for the Admin.alterStreamsGroupOffsets(String, Map, AlterStreamsGroupOffsetsOptions) call. +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      AlterStreamsGroupOffsetsOptions

      +
      public AlterStreamsGroupOffsetsOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AlterStreamsGroupOffsetsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/AlterStreamsGroupOffsetsResult.html new file mode 100644 index 000000000..5cc5b4971 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AlterStreamsGroupOffsetsResult.html @@ -0,0 +1,150 @@ + + + + +AlterStreamsGroupOffsetsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AlterStreamsGroupOffsetsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AlterStreamsGroupOffsetsResult
+
+
+
+
@Evolving +public class AlterStreamsGroupOffsetsResult +extends Object
+
The result of the Admin.alterStreamsGroupOffsets(String, Map) call. + + The API of this class is evolving, see AdminClient for details.
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      partitionResult

      +
      public KafkaFuture<Void> partitionResult(TopicPartition partition)
      +
      Return a future which can be used to check the result for a given partition.
      +
      +
    • +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Return a future which succeeds if all the alter offsets succeed.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AlterUserScramCredentialsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/AlterUserScramCredentialsOptions.html new file mode 100644 index 000000000..4e2127a6d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AlterUserScramCredentialsOptions.html @@ -0,0 +1,133 @@ + + + + +AlterUserScramCredentialsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AlterUserScramCredentialsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<AlterUserScramCredentialsOptions> +
org.apache.kafka.clients.admin.AlterUserScramCredentialsOptions
+
+
+
+
+
public class AlterUserScramCredentialsOptions +extends AbstractOptions<AlterUserScramCredentialsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      AlterUserScramCredentialsOptions

      +
      public AlterUserScramCredentialsOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/AlterUserScramCredentialsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/AlterUserScramCredentialsResult.html new file mode 100644 index 000000000..d4ee59ece --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/AlterUserScramCredentialsResult.html @@ -0,0 +1,181 @@ + + + + +AlterUserScramCredentialsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class AlterUserScramCredentialsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AlterUserScramCredentialsResult
+
+
+
+
public class AlterUserScramCredentialsResult +extends Object
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      AlterUserScramCredentialsResult

      +
      public AlterUserScramCredentialsResult(Map<String,KafkaFuture<Void>> futures)
      +
      +
      Parameters:
      +
      futures - the required map from user names to futures representing the results of the alteration(s) + for each user
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      values

      +
      public Map<String,KafkaFuture<Void>> values()
      +
      Return a map from user names to futures, which can be used to check the status of the alteration(s) + for each user.
      +
      +
    • +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Return a future which succeeds only if all the user SCRAM credential alterations succeed.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ClassicGroupDescription.html b/static/41/javadoc/org/apache/kafka/clients/admin/ClassicGroupDescription.html new file mode 100644 index 000000000..546d691dc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ClassicGroupDescription.html @@ -0,0 +1,317 @@ + + + + +ClassicGroupDescription (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ClassicGroupDescription

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ClassicGroupDescription
+
+
+
+
public class ClassicGroupDescription +extends Object
+
A detailed description of a single classic group in the cluster.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    + +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      groupId

      +
      public String groupId()
      +
      The id of the classic group.
      +
      +
    • +
    • +
      +

      protocol

      +
      public String protocol()
      +
      The group protocol type.
      +
      +
    • +
    • +
      +

      protocolData

      +
      public String protocolData()
      +
      The group protocol data. The meaning depends on the group protocol type. + For a classic consumer group, this is the partition assignor name. + For a classic connect group, this indicates which Connect protocols are enabled.
      +
      +
    • +
    • +
      +

      isSimpleConsumerGroup

      +
      public boolean isSimpleConsumerGroup()
      +
      If the group is a simple consumer group or not.
      +
      +
    • +
    • +
      +

      members

      +
      public Collection<MemberDescription> members()
      +
      A list of the members of the classic group.
      +
      +
    • +
    • +
      +

      state

      +
      public ClassicGroupState state()
      +
      The classic group state, or UNKNOWN if the state is too new for us to parse.
      +
      +
    • +
    • +
      +

      coordinator

      +
      public Node coordinator()
      +
      The classic group coordinator, or null if the coordinator is not known.
      +
      +
    • +
    • +
      +

      authorizedOperations

      +
      public Set<AclOperation> authorizedOperations()
      +
      authorizedOperations for this group, or null if that information is not known.
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ClientMetricsResourceListing.html b/static/41/javadoc/org/apache/kafka/clients/admin/ClientMetricsResourceListing.html new file mode 100644 index 000000000..2dc7c24c6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ClientMetricsResourceListing.html @@ -0,0 +1,214 @@ + + + + +ClientMetricsResourceListing (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ClientMetricsResourceListing

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ClientMetricsResourceListing
+
+
+
+
@Deprecated(since="4.1") +public class ClientMetricsResourceListing +extends Object
+
Deprecated.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ClientMetricsResourceListing

      +
      public ClientMetricsResourceListing(String name)
      +
      Deprecated.
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      name

      +
      public String name()
      +
      Deprecated.
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      Deprecated.
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      Deprecated.
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      Deprecated.
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/Config.html b/static/41/javadoc/org/apache/kafka/clients/admin/Config.html new file mode 100644 index 000000000..512414119 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/Config.html @@ -0,0 +1,217 @@ + + + + +Config (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class Config

+
+
java.lang.Object +
org.apache.kafka.clients.admin.Config
+
+
+
+
public class Config +extends Object
+
A configuration object containing the configuration entries for a resource. +

+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      Config

      +
      public Config(Collection<ConfigEntry> entries)
      +
      Create a configuration instance with the provided entries.
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      entries

      +
      public Collection<ConfigEntry> entries()
      +
      Configuration entries for a resource.
      +
      +
    • +
    • +
      +

      get

      +
      public ConfigEntry get(String name)
      +
      Get the configuration entry with the provided name or null if there isn't one.
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ConfigEntry.ConfigSource.html b/static/41/javadoc/org/apache/kafka/clients/admin/ConfigEntry.ConfigSource.html new file mode 100644 index 000000000..99828dc87 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ConfigEntry.ConfigSource.html @@ -0,0 +1,278 @@ + + + + +ConfigEntry.ConfigSource (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Enum Class ConfigEntry.ConfigSource

+
+
java.lang.Object +
java.lang.Enum<ConfigEntry.ConfigSource> +
org.apache.kafka.clients.admin.ConfigEntry.ConfigSource
+
+
+
+
+
All Implemented Interfaces:
+
Serializable, Comparable<ConfigEntry.ConfigSource>, Constable
+
+
+
Enclosing class:
+
ConfigEntry
+
+
+
public static enum ConfigEntry.ConfigSource +extends Enum<ConfigEntry.ConfigSource>
+
Source of configuration entries.
+
+
+ +
+
+
    + +
  • +
    +

    Enum Constant Details

    + +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      values

      +
      public static ConfigEntry.ConfigSource[] values()
      +
      Returns an array containing the constants of this enum class, in +the order they are declared.
      +
      +
      Returns:
      +
      an array containing the constants of this enum class, in the order they are declared
      +
      +
      +
    • +
    • +
      +

      valueOf

      +
      public static ConfigEntry.ConfigSource valueOf(String name)
      +
      Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
      +
      +
      Parameters:
      +
      name - the name of the enum constant to be returned.
      +
      Returns:
      +
      the enum constant with the specified name
      +
      Throws:
      +
      IllegalArgumentException - if this enum class has no constant with the specified name
      +
      NullPointerException - if the argument is null
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ConfigEntry.ConfigSynonym.html b/static/41/javadoc/org/apache/kafka/clients/admin/ConfigEntry.ConfigSynonym.html new file mode 100644 index 000000000..8d4edeaee --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ConfigEntry.ConfigSynonym.html @@ -0,0 +1,202 @@ + + + + +ConfigEntry.ConfigSynonym (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ConfigEntry.ConfigSynonym

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ConfigEntry.ConfigSynonym
+
+
+
+
Enclosing class:
+
ConfigEntry
+
+
+
public static class ConfigEntry.ConfigSynonym +extends Object
+
Class representing a configuration synonym of a ConfigEntry.
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      name

      +
      public String name()
      +
      Returns the name of this configuration.
      +
      +
    • +
    • +
      +

      value

      +
      public String value()
      +
      Returns the value of this configuration, which may be null if the configuration is sensitive.
      +
      +
    • +
    • +
      +

      source

      +
      public ConfigEntry.ConfigSource source()
      +
      Returns the source of this configuration.
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ConfigEntry.ConfigType.html b/static/41/javadoc/org/apache/kafka/clients/admin/ConfigEntry.ConfigType.html new file mode 100644 index 000000000..64012aa4e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ConfigEntry.ConfigType.html @@ -0,0 +1,286 @@ + + + + +ConfigEntry.ConfigType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Enum Class ConfigEntry.ConfigType

+
+
java.lang.Object +
java.lang.Enum<ConfigEntry.ConfigType> +
org.apache.kafka.clients.admin.ConfigEntry.ConfigType
+
+
+
+
+
All Implemented Interfaces:
+
Serializable, Comparable<ConfigEntry.ConfigType>, Constable
+
+
+
Enclosing class:
+
ConfigEntry
+
+
+
public static enum ConfigEntry.ConfigType +extends Enum<ConfigEntry.ConfigType>
+
Data type of configuration entry.
+
+
+ +
+
+
    + +
  • +
    +

    Enum Constant Details

    + +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      values

      +
      public static ConfigEntry.ConfigType[] values()
      +
      Returns an array containing the constants of this enum class, in +the order they are declared.
      +
      +
      Returns:
      +
      an array containing the constants of this enum class, in the order they are declared
      +
      +
      +
    • +
    • +
      +

      valueOf

      +
      public static ConfigEntry.ConfigType valueOf(String name)
      +
      Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
      +
      +
      Parameters:
      +
      name - the name of the enum constant to be returned.
      +
      Returns:
      +
      the enum constant with the specified name
      +
      Throws:
      +
      IllegalArgumentException - if this enum class has no constant with the specified name
      +
      NullPointerException - if the argument is null
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ConfigEntry.html b/static/41/javadoc/org/apache/kafka/clients/admin/ConfigEntry.html new file mode 100644 index 000000000..7a3d2e684 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ConfigEntry.html @@ -0,0 +1,376 @@ + + + + +ConfigEntry (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ConfigEntry

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ConfigEntry
+
+
+
+
public class ConfigEntry +extends Object
+
A class representing a configuration entry containing name, value and additional metadata.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ConfigEntry

      +
      public ConfigEntry(String name, + String value)
      +
      Create a configuration entry with the provided values.
      +
      +
      Parameters:
      +
      name - the non-null config name
      +
      value - the config value or null
      +
      +
      +
    • +
    • +
      +

      ConfigEntry

      +
      public ConfigEntry(String name, + String value, + ConfigEntry.ConfigSource source, + boolean isSensitive, + boolean isReadOnly, + List<ConfigEntry.ConfigSynonym> synonyms, + ConfigEntry.ConfigType type, + String documentation)
      +
      Create a configuration with the provided values.
      +
      +
      Parameters:
      +
      name - the non-null config name
      +
      value - the config value or null
      +
      source - the source of this config entry
      +
      isSensitive - whether the config value is sensitive, the broker never returns the value if it is sensitive
      +
      isReadOnly - whether the config is read-only and cannot be updated
      +
      synonyms - Synonym configs in order of precedence
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      name

      +
      public String name()
      +
      Return the config name.
      +
      +
    • +
    • +
      +

      value

      +
      public String value()
      +
      Return the value or null. Null is returned if the config is unset or if isSensitive is true.
      +
      +
    • +
    • +
      +

      source

      +
      public ConfigEntry.ConfigSource source()
      +
      Return the source of this configuration entry.
      +
      +
    • +
    • +
      +

      isDefault

      +
      public boolean isDefault()
      +
      Return whether the config value is the default or if it's been explicitly set.
      +
      +
    • +
    • +
      +

      isSensitive

      +
      public boolean isSensitive()
      +
      Return whether the config value is sensitive. The value is always set to null by the broker if the config value + is sensitive.
      +
      +
    • +
    • +
      +

      isReadOnly

      +
      public boolean isReadOnly()
      +
      Return whether the config is read-only and cannot be updated.
      +
      +
    • +
    • +
      +

      synonyms

      +
      public List<ConfigEntry.ConfigSynonym> synonyms()
      +
      Returns all config values that may be used as the value of this config along with their source, + in the order of precedence. The list starts with the value returned in this ConfigEntry. + The list is empty if synonyms were not requested using DescribeConfigsOptions.includeSynonyms(boolean)
      +
      +
    • +
    • +
      +

      type

      +
      public ConfigEntry.ConfigType type()
      +
      Return the config data type.
      +
      +
    • +
    • +
      +

      documentation

      +
      public String documentation()
      +
      Return the config documentation.
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      Override toString to redact sensitive value. + WARNING, user should be responsible to set the correct "isSensitive" field for each config entry.
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ConsumerGroupDescription.html b/static/41/javadoc/org/apache/kafka/clients/admin/ConsumerGroupDescription.html new file mode 100644 index 000000000..40bc2550b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ConsumerGroupDescription.html @@ -0,0 +1,434 @@ + + + + +ConsumerGroupDescription (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ConsumerGroupDescription

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ConsumerGroupDescription
+
+
+
+
public class ConsumerGroupDescription +extends Object
+
A detailed description of a single consumer group in the cluster.
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ConsumerGroupListing.html b/static/41/javadoc/org/apache/kafka/clients/admin/ConsumerGroupListing.html new file mode 100644 index 000000000..0e3438724 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ConsumerGroupListing.html @@ -0,0 +1,398 @@ + + + + +ConsumerGroupListing (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ConsumerGroupListing

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ConsumerGroupListing
+
+
+
+
@Deprecated(since="4.1") +public class ConsumerGroupListing +extends Object
+
Deprecated. + +
+
A listing of a consumer group in the cluster.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ConsumerGroupListing

      +
      public ConsumerGroupListing(String groupId, + boolean isSimpleConsumerGroup)
      +
      Deprecated.
      +
      Create an instance with the specified parameters.
      +
      +
      Parameters:
      +
      groupId - Group Id.
      +
      isSimpleConsumerGroup - If consumer group is simple or not.
      +
      +
      +
    • +
    • +
      +

      ConsumerGroupListing

      +
      @Deprecated +public ConsumerGroupListing(String groupId, + boolean isSimpleConsumerGroup, + Optional<ConsumerGroupState> state)
      +
      Deprecated. + +
      +
      Create an instance with the specified parameters.
      +
      +
      Parameters:
      +
      groupId - Group Id.
      +
      isSimpleConsumerGroup - If consumer group is simple or not.
      +
      state - The state of the consumer group.
      +
      +
      +
    • +
    • +
      +

      ConsumerGroupListing

      +
      @Deprecated +public ConsumerGroupListing(String groupId, + boolean isSimpleConsumerGroup, + Optional<ConsumerGroupState> state, + Optional<GroupType> type)
      +
      Deprecated. + +
      +
      Create an instance with the specified parameters.
      +
      +
      Parameters:
      +
      groupId - Group Id.
      +
      isSimpleConsumerGroup - If consumer group is simple or not.
      +
      state - The state of the consumer group.
      +
      type - The type of the consumer group.
      +
      +
      +
    • +
    • +
      +

      ConsumerGroupListing

      +
      public ConsumerGroupListing(String groupId, + Optional<GroupState> groupState, + boolean isSimpleConsumerGroup)
      +
      Deprecated.
      +
      Create an instance with the specified parameters.
      +
      +
      Parameters:
      +
      groupId - Group Id.
      +
      groupState - The state of the consumer group.
      +
      isSimpleConsumerGroup - If consumer group is simple or not.
      +
      +
      +
    • +
    • +
      +

      ConsumerGroupListing

      +
      public ConsumerGroupListing(String groupId, + Optional<GroupState> groupState, + Optional<GroupType> type, + boolean isSimpleConsumerGroup)
      +
      Deprecated.
      +
      Create an instance with the specified parameters.
      +
      +
      Parameters:
      +
      groupId - Group Id.
      +
      groupState - The state of the consumer group.
      +
      type - The type of the consumer group.
      +
      isSimpleConsumerGroup - If consumer group is simple or not.
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      groupId

      +
      public String groupId()
      +
      Deprecated.
      +
      Consumer Group Id
      +
      +
    • +
    • +
      +

      isSimpleConsumerGroup

      +
      public boolean isSimpleConsumerGroup()
      +
      Deprecated.
      +
      If Consumer Group is simple or not.
      +
      +
    • +
    • +
      +

      groupState

      +
      public Optional<GroupState> groupState()
      +
      Deprecated.
      +
      Group state
      +
      +
    • +
    • +
      +

      state

      + +
      Deprecated. +
      Since 4.0. Use groupState() instead.
      +
      +
      Consumer Group state
      +
      +
    • +
    • +
      +

      type

      +
      public Optional<GroupType> type()
      +
      Deprecated.
      +
      The type of the consumer group.
      +
      +
      Returns:
      +
      An Optional containing the type, if available.
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      Deprecated.
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      Deprecated.
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      Deprecated.
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/CreateAclsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/CreateAclsOptions.html new file mode 100644 index 000000000..54ac6de81 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/CreateAclsOptions.html @@ -0,0 +1,173 @@ + + + + +CreateAclsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class CreateAclsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<CreateAclsOptions> +
org.apache.kafka.clients.admin.CreateAclsOptions
+
+
+
+
+
public class CreateAclsOptions +extends AbstractOptions<CreateAclsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      CreateAclsOptions

      +
      public CreateAclsOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    + +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/CreateAclsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/CreateAclsResult.html new file mode 100644 index 000000000..4bca389de --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/CreateAclsResult.html @@ -0,0 +1,149 @@ + + + + +CreateAclsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class CreateAclsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.CreateAclsResult
+
+
+
+
public class CreateAclsResult +extends Object
+
The result of the Admin.createAcls(Collection) call.
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      values

      +
      public Map<AclBinding,KafkaFuture<Void>> values()
      +
      Return a map from ACL bindings to futures which can be used to check the status of the creation of each ACL + binding.
      +
      +
    • +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Return a future which succeeds only if all the ACL creations succeed.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/CreateDelegationTokenOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/CreateDelegationTokenOptions.html new file mode 100644 index 000000000..c24bbb7d0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/CreateDelegationTokenOptions.html @@ -0,0 +1,243 @@ + + + + +CreateDelegationTokenOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class CreateDelegationTokenOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<CreateDelegationTokenOptions> +
org.apache.kafka.clients.admin.CreateDelegationTokenOptions
+
+
+
+
+
public class CreateDelegationTokenOptions +extends AbstractOptions<CreateDelegationTokenOptions>
+ +
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/CreateDelegationTokenResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/CreateDelegationTokenResult.html new file mode 100644 index 000000000..17037682c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/CreateDelegationTokenResult.html @@ -0,0 +1,135 @@ + + + + +CreateDelegationTokenResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class CreateDelegationTokenResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.CreateDelegationTokenResult
+
+
+
+
public class CreateDelegationTokenResult +extends Object
+ +
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      delegationToken

      +
      public KafkaFuture<DelegationToken> delegationToken()
      +
      Returns a future which yields a delegation token
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/CreatePartitionsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/CreatePartitionsOptions.html new file mode 100644 index 000000000..35111ee2d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/CreatePartitionsOptions.html @@ -0,0 +1,203 @@ + + + + +CreatePartitionsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class CreatePartitionsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<CreatePartitionsOptions> +
org.apache.kafka.clients.admin.CreatePartitionsOptions
+
+
+
+
+
public class CreatePartitionsOptions +extends AbstractOptions<CreatePartitionsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      CreatePartitionsOptions

      +
      public CreatePartitionsOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      validateOnly

      +
      public boolean validateOnly()
      +
      Return true if the request should be validated without creating new partitions.
      +
      +
    • +
    • +
      +

      validateOnly

      +
      public CreatePartitionsOptions validateOnly(boolean validateOnly)
      +
      Set to true if the request should be validated without creating new partitions.
      +
      +
    • +
    • +
      +

      retryOnQuotaViolation

      +
      public CreatePartitionsOptions retryOnQuotaViolation(boolean retryOnQuotaViolation)
      +
      Set to true if quota violation should be automatically retried.
      +
      +
    • +
    • +
      +

      shouldRetryOnQuotaViolation

      +
      public boolean shouldRetryOnQuotaViolation()
      +
      Returns true if quota violation should be automatically retried.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/CreatePartitionsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/CreatePartitionsResult.html new file mode 100644 index 000000000..17b922b6b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/CreatePartitionsResult.html @@ -0,0 +1,149 @@ + + + + +CreatePartitionsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class CreatePartitionsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.CreatePartitionsResult
+
+
+
+
public class CreatePartitionsResult +extends Object
+
The result of the Admin.createPartitions(Map) call.
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      values

      +
      public Map<String,KafkaFuture<Void>> values()
      +
      Return a map from topic names to futures, which can be used to check the status of individual + partition creations.
      +
      +
    • +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Return a future which succeeds if all the partition creations succeed.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/CreateTopicsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/CreateTopicsOptions.html new file mode 100644 index 000000000..93af8e17e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/CreateTopicsOptions.html @@ -0,0 +1,221 @@ + + + + +CreateTopicsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class CreateTopicsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<CreateTopicsOptions> +
org.apache.kafka.clients.admin.CreateTopicsOptions
+
+
+
+
+
public class CreateTopicsOptions +extends AbstractOptions<CreateTopicsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      CreateTopicsOptions

      +
      public CreateTopicsOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      timeoutMs

      +
      public CreateTopicsOptions timeoutMs(Integer timeoutMs)
      +
      Set the timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
      +
      +
      Overrides:
      +
      timeoutMs in class AbstractOptions<CreateTopicsOptions>
      +
      +
      +
    • +
    • +
      +

      validateOnly

      +
      public CreateTopicsOptions validateOnly(boolean validateOnly)
      +
      Set to true if the request should be validated without creating the topic.
      +
      +
    • +
    • +
      +

      shouldValidateOnly

      +
      public boolean shouldValidateOnly()
      +
      Return true if the request should be validated without creating the topic.
      +
      +
    • +
    • +
      +

      retryOnQuotaViolation

      +
      public CreateTopicsOptions retryOnQuotaViolation(boolean retryOnQuotaViolation)
      +
      Set to true if quota violation should be automatically retried.
      +
      +
    • +
    • +
      +

      shouldRetryOnQuotaViolation

      +
      public boolean shouldRetryOnQuotaViolation()
      +
      Returns true if quota violation should be automatically retried.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/CreateTopicsResult.TopicMetadataAndConfig.html b/static/41/javadoc/org/apache/kafka/clients/admin/CreateTopicsResult.TopicMetadataAndConfig.html new file mode 100644 index 000000000..9d6a9a746 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/CreateTopicsResult.TopicMetadataAndConfig.html @@ -0,0 +1,203 @@ + + + + +CreateTopicsResult.TopicMetadataAndConfig (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class CreateTopicsResult.TopicMetadataAndConfig

+
+
java.lang.Object +
org.apache.kafka.clients.admin.CreateTopicsResult.TopicMetadataAndConfig
+
+
+
+
Enclosing class:
+
CreateTopicsResult
+
+
+
public static class CreateTopicsResult.TopicMetadataAndConfig +extends Object
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      TopicMetadataAndConfig

      +
      public TopicMetadataAndConfig(Uuid topicId, + int numPartitions, + int replicationFactor, + Config config)
      +
      +
    • +
    • +
      +

      TopicMetadataAndConfig

      +
      public TopicMetadataAndConfig(ApiException exception)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      topicId

      +
      public Uuid topicId()
      +
      +
    • +
    • +
      +

      numPartitions

      +
      public int numPartitions()
      +
      +
    • +
    • +
      +

      replicationFactor

      +
      public int replicationFactor()
      +
      +
    • +
    • +
      +

      config

      +
      public Config config()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/CreateTopicsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/CreateTopicsResult.html new file mode 100644 index 000000000..8da6b8fdc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/CreateTopicsResult.html @@ -0,0 +1,237 @@ + + + + +CreateTopicsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class CreateTopicsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.CreateTopicsResult
+
+
+
+
public class CreateTopicsResult +extends Object
+ +
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      values

      +
      public Map<String,KafkaFuture<Void>> values()
      +
      Return a map from topic names to futures, which can be used to check the status of individual + topic creations.
      +
      +
    • +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Return a future which succeeds if all the topic creations succeed.
      +
      +
    • +
    • +
      +

      config

      +
      public KafkaFuture<Config> config(String topic)
      +
      Returns a future that provides topic configs for the topic when the request completes. +

      + If broker version doesn't support replication factor in the response, throw + UnsupportedVersionException. + If broker returned an error for topic configs, throw appropriate exception. For example, + TopicAuthorizationException is thrown if user does not + have permission to describe topic configs. + Note that the values for the type and documentation fields will be null.

      +
      +
    • +
    • +
      +

      topicId

      +
      public KafkaFuture<Uuid> topicId(String topic)
      +
      Returns a future that provides topic ID for the topic when the request completes. +

      + If broker version doesn't support replication factor in the response, throw + UnsupportedVersionException. + If broker returned an error for topic configs, throw appropriate exception. For example, + TopicAuthorizationException is thrown if user does not + have permission to describe topic configs.

      +
      +
    • +
    • +
      +

      numPartitions

      +
      public KafkaFuture<Integer> numPartitions(String topic)
      +
      Returns a future that provides number of partitions in the topic when the request completes. +

      + If broker version doesn't support replication factor in the response, throw + UnsupportedVersionException. + If broker returned an error for topic configs, throw appropriate exception. For example, + TopicAuthorizationException is thrown if user does not + have permission to describe topic configs.

      +
      +
    • +
    • +
      +

      replicationFactor

      +
      public KafkaFuture<Integer> replicationFactor(String topic)
      +
      Returns a future that provides replication factor for the topic when the request completes. +

      + If broker version doesn't support replication factor in the response, throw + UnsupportedVersionException. + If broker returned an error for topic configs, throw appropriate exception. For example, + TopicAuthorizationException is thrown if user does not + have permission to describe topic configs.

      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DeleteAclsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteAclsOptions.html new file mode 100644 index 000000000..1032508dc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteAclsOptions.html @@ -0,0 +1,173 @@ + + + + +DeleteAclsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DeleteAclsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DeleteAclsOptions> +
org.apache.kafka.clients.admin.DeleteAclsOptions
+
+
+
+
+
public class DeleteAclsOptions +extends AbstractOptions<DeleteAclsOptions>
+
Options for the Admin.deleteAcls(Collection) call.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DeleteAclsOptions

      +
      public DeleteAclsOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    + +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DeleteAclsResult.FilterResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteAclsResult.FilterResult.html new file mode 100644 index 000000000..d33ce140a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteAclsResult.FilterResult.html @@ -0,0 +1,151 @@ + + + + +DeleteAclsResult.FilterResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DeleteAclsResult.FilterResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DeleteAclsResult.FilterResult
+
+
+
+
Enclosing class:
+
DeleteAclsResult
+
+
+
public static class DeleteAclsResult.FilterResult +extends Object
+
A class containing either the deleted ACL binding or an exception if the delete failed.
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      binding

      +
      public AclBinding binding()
      +
      Return the deleted ACL binding or null if there was an error.
      +
      +
    • +
    • +
      +

      exception

      +
      public ApiException exception()
      +
      Return an exception if the ACL delete was not successful or null if it was.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DeleteAclsResult.FilterResults.html b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteAclsResult.FilterResults.html new file mode 100644 index 000000000..822d8825a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteAclsResult.FilterResults.html @@ -0,0 +1,139 @@ + + + + +DeleteAclsResult.FilterResults (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DeleteAclsResult.FilterResults

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DeleteAclsResult.FilterResults
+
+
+
+
Enclosing class:
+
DeleteAclsResult
+
+
+
public static class DeleteAclsResult.FilterResults +extends Object
+
A class containing the results of the delete ACLs operation.
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DeleteAclsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteAclsResult.html new file mode 100644 index 000000000..23781b629 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteAclsResult.html @@ -0,0 +1,172 @@ + + + + +DeleteAclsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DeleteAclsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DeleteAclsResult
+
+
+
+
public class DeleteAclsResult +extends Object
+
The result of the Admin.deleteAcls(Collection) call.
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      values

      + +
      Return a map from acl filters to futures which can be used to check the status of the deletions by each + filter.
      +
      +
    • +
    • +
      +

      all

      + +
      Return a future which succeeds only if all the ACLs deletions succeed, and which contains all the deleted ACLs. + Note that it if the filters don't match any ACLs, this is not considered an error.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsOptions.html new file mode 100644 index 000000000..51ee53584 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsOptions.html @@ -0,0 +1,133 @@ + + + + +DeleteConsumerGroupOffsetsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DeleteConsumerGroupOffsetsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DeleteConsumerGroupOffsetsOptions> +
org.apache.kafka.clients.admin.DeleteConsumerGroupOffsetsOptions
+
+
+
+
+
public class DeleteConsumerGroupOffsetsOptions +extends AbstractOptions<DeleteConsumerGroupOffsetsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DeleteConsumerGroupOffsetsOptions

      +
      public DeleteConsumerGroupOffsetsOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsResult.html new file mode 100644 index 000000000..321640816 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsResult.html @@ -0,0 +1,148 @@ + + + + +DeleteConsumerGroupOffsetsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DeleteConsumerGroupOffsetsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DeleteConsumerGroupOffsetsResult
+
+
+
+
public class DeleteConsumerGroupOffsetsResult +extends Object
+ +
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      partitionResult

      +
      public KafkaFuture<Void> partitionResult(TopicPartition partition)
      +
      Return a future which can be used to check the result for a given partition.
      +
      +
    • +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Return a future which succeeds only if all the deletions succeed. + If not, the first partition error shall be returned.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DeleteConsumerGroupsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteConsumerGroupsOptions.html new file mode 100644 index 000000000..8a1ef6208 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteConsumerGroupsOptions.html @@ -0,0 +1,133 @@ + + + + +DeleteConsumerGroupsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DeleteConsumerGroupsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DeleteConsumerGroupsOptions> +
org.apache.kafka.clients.admin.DeleteConsumerGroupsOptions
+
+
+
+
+
public class DeleteConsumerGroupsOptions +extends AbstractOptions<DeleteConsumerGroupsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DeleteConsumerGroupsOptions

      +
      public DeleteConsumerGroupsOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DeleteConsumerGroupsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteConsumerGroupsResult.html new file mode 100644 index 000000000..2620b76e3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteConsumerGroupsResult.html @@ -0,0 +1,149 @@ + + + + +DeleteConsumerGroupsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DeleteConsumerGroupsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DeleteConsumerGroupsResult
+
+
+
+
public class DeleteConsumerGroupsResult +extends Object
+ +
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      deletedGroups

      +
      public Map<String,KafkaFuture<Void>> deletedGroups()
      +
      Return a map from group id to futures which can be used to check the status of + individual deletions.
      +
      +
    • +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Return a future which succeeds only if all the consumer group deletions succeed.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DeleteRecordsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteRecordsOptions.html new file mode 100644 index 000000000..256503550 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteRecordsOptions.html @@ -0,0 +1,133 @@ + + + + +DeleteRecordsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DeleteRecordsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DeleteRecordsOptions> +
org.apache.kafka.clients.admin.DeleteRecordsOptions
+
+
+
+
+
public class DeleteRecordsOptions +extends AbstractOptions<DeleteRecordsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DeleteRecordsOptions

      +
      public DeleteRecordsOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DeleteRecordsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteRecordsResult.html new file mode 100644 index 000000000..aa6b179eb --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteRecordsResult.html @@ -0,0 +1,176 @@ + + + + +DeleteRecordsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DeleteRecordsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DeleteRecordsResult
+
+
+
+
public class DeleteRecordsResult +extends Object
+
The result of the Admin.deleteRecords(Map) call.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    + +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      lowWatermarks

      +
      public Map<TopicPartition,KafkaFuture<DeletedRecords>> lowWatermarks()
      +
      Return a map from topic partition to futures which can be used to check the status of + individual deletions.
      +
      +
    • +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Return a future which succeeds only if all the records deletions succeed.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DeleteShareGroupOffsetsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteShareGroupOffsetsOptions.html new file mode 100644 index 000000000..05f42492f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteShareGroupOffsetsOptions.html @@ -0,0 +1,136 @@ + + + + +DeleteShareGroupOffsetsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DeleteShareGroupOffsetsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DeleteShareGroupOffsetsOptions> +
org.apache.kafka.clients.admin.DeleteShareGroupOffsetsOptions
+
+
+
+
+
@Evolving +public class DeleteShareGroupOffsetsOptions +extends AbstractOptions<DeleteShareGroupOffsetsOptions>
+
Options for the Admin.deleteShareGroupOffsets(String, Set, DeleteShareGroupOffsetsOptions) call. +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DeleteShareGroupOffsetsOptions

      +
      public DeleteShareGroupOffsetsOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DeleteShareGroupOffsetsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteShareGroupOffsetsResult.html new file mode 100644 index 000000000..5b0322bf0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteShareGroupOffsetsResult.html @@ -0,0 +1,151 @@ + + + + +DeleteShareGroupOffsetsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DeleteShareGroupOffsetsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DeleteShareGroupOffsetsResult
+
+
+
+
@Evolving +public class DeleteShareGroupOffsetsResult +extends Object
+
The result of the Admin.deleteShareGroupOffsets(String, Set, DeleteShareGroupOffsetsOptions) call. +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Return a future which succeeds only if all the deletions succeed. + If not, the first topic error shall be returned.
      +
      +
    • +
    • +
      +

      topicResult

      +
      public KafkaFuture<Void> topicResult(String topic)
      +
      Return a future which can be used to check the result for a given topic.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DeleteShareGroupsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteShareGroupsOptions.html new file mode 100644 index 000000000..b2521b3f0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteShareGroupsOptions.html @@ -0,0 +1,136 @@ + + + + +DeleteShareGroupsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DeleteShareGroupsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DeleteShareGroupsOptions> +
org.apache.kafka.clients.admin.DeleteShareGroupsOptions
+
+
+
+
+
@Evolving +public class DeleteShareGroupsOptions +extends AbstractOptions<DeleteShareGroupsOptions>
+
Options for the Admin.deleteShareGroups(Collection <String>, DeleteShareGroupsOptions) call. +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DeleteShareGroupsOptions

      +
      public DeleteShareGroupsOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DeleteShareGroupsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteShareGroupsResult.html new file mode 100644 index 000000000..0471ed2ff --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteShareGroupsResult.html @@ -0,0 +1,152 @@ + + + + +DeleteShareGroupsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DeleteShareGroupsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DeleteShareGroupsResult
+
+
+
+
@Evolving +public class DeleteShareGroupsResult +extends Object
+
The result of the Admin.deleteShareGroups(Collection <String>, DeleteShareGroupsOptions) call. +

+ The API of this class is evolving, see Admin for details.
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      deletedGroups

      +
      public Map<String,KafkaFuture<Void>> deletedGroups()
      +
      Return a map from group id to futures which can be used to check the status of + individual deletions.
      +
      +
    • +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Return a future which succeeds only if all the share group deletions succeed.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DeleteStreamsGroupOffsetsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteStreamsGroupOffsetsOptions.html new file mode 100644 index 000000000..ff07c7eda --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteStreamsGroupOffsetsOptions.html @@ -0,0 +1,136 @@ + + + + +DeleteStreamsGroupOffsetsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DeleteStreamsGroupOffsetsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DeleteStreamsGroupOffsetsOptions> +
org.apache.kafka.clients.admin.DeleteStreamsGroupOffsetsOptions
+
+
+
+
+
@Evolving +public class DeleteStreamsGroupOffsetsOptions +extends AbstractOptions<DeleteStreamsGroupOffsetsOptions>
+
Options for the Admin.deleteStreamsGroupOffsets(String, Set, DeleteStreamsGroupOffsetsOptions) call. +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DeleteStreamsGroupOffsetsOptions

      +
      public DeleteStreamsGroupOffsetsOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DeleteStreamsGroupOffsetsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteStreamsGroupOffsetsResult.html new file mode 100644 index 000000000..b75a15fe8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteStreamsGroupOffsetsResult.html @@ -0,0 +1,150 @@ + + + + +DeleteStreamsGroupOffsetsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DeleteStreamsGroupOffsetsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DeleteStreamsGroupOffsetsResult
+
+
+
+
@Evolving +public class DeleteStreamsGroupOffsetsResult +extends Object
+
The result of the Admin.deleteStreamsGroupOffsets(String, Set, DeleteStreamsGroupOffsetsOptions) call. +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Return a future which succeeds only if all the deletions succeed.
      +
      +
    • +
    • +
      +

      partitionResult

      +
      public KafkaFuture<Void> partitionResult(TopicPartition topicPartition)
      +
      Return a future which can be used to check the result for a given topic.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DeleteStreamsGroupsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteStreamsGroupsOptions.html new file mode 100644 index 000000000..ad12411fb --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteStreamsGroupsOptions.html @@ -0,0 +1,136 @@ + + + + +DeleteStreamsGroupsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DeleteStreamsGroupsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DeleteStreamsGroupsOptions> +
org.apache.kafka.clients.admin.DeleteStreamsGroupsOptions
+
+
+
+
+
@Evolving +public class DeleteStreamsGroupsOptions +extends AbstractOptions<DeleteStreamsGroupsOptions>
+
Options for the Admin.deleteStreamsGroups(Collection<String>, DeleteStreamsGroupsOptions) call. +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DeleteStreamsGroupsOptions

      +
      public DeleteStreamsGroupsOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DeleteStreamsGroupsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteStreamsGroupsResult.html new file mode 100644 index 000000000..4499af08a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteStreamsGroupsResult.html @@ -0,0 +1,150 @@ + + + + +DeleteStreamsGroupsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DeleteStreamsGroupsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DeleteStreamsGroupsResult
+
+
+
+
@Evolving +public class DeleteStreamsGroupsResult +extends Object
+
The result of the Admin.deleteStreamsGroups(Collection, DeleteStreamsGroupsOptions) call. +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Return a future which succeeds only if all the deletions succeed.
      +
      +
    • +
    • +
      +

      deletedGroups

      +
      public Map<String,KafkaFuture<Void>> deletedGroups()
      +
      Return a map from group id to futures which can be used to check the status of individual deletions.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DeleteTopicsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteTopicsOptions.html new file mode 100644 index 000000000..477c07c91 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteTopicsOptions.html @@ -0,0 +1,197 @@ + + + + +DeleteTopicsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DeleteTopicsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DeleteTopicsOptions> +
org.apache.kafka.clients.admin.DeleteTopicsOptions
+
+
+
+
+
public class DeleteTopicsOptions +extends AbstractOptions<DeleteTopicsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DeleteTopicsOptions

      +
      public DeleteTopicsOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      timeoutMs

      +
      public DeleteTopicsOptions timeoutMs(Integer timeoutMs)
      +
      Set the timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
      +
      +
      Overrides:
      +
      timeoutMs in class AbstractOptions<DeleteTopicsOptions>
      +
      +
      +
    • +
    • +
      +

      retryOnQuotaViolation

      +
      public DeleteTopicsOptions retryOnQuotaViolation(boolean retryOnQuotaViolation)
      +
      Set to true if quota violation should be automatically retried.
      +
      +
    • +
    • +
      +

      shouldRetryOnQuotaViolation

      +
      public boolean shouldRetryOnQuotaViolation()
      +
      Returns true if quota violation should be automatically retried.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DeleteTopicsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteTopicsResult.html new file mode 100644 index 000000000..e8d848098 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DeleteTopicsResult.html @@ -0,0 +1,170 @@ + + + + +DeleteTopicsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DeleteTopicsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DeleteTopicsResult
+
+
+
+
public class DeleteTopicsResult +extends Object
+
The result of the Admin.deleteTopics(Collection) call.
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    + +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DeletedRecords.html b/static/41/javadoc/org/apache/kafka/clients/admin/DeletedRecords.html new file mode 100644 index 000000000..974a5dcbc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DeletedRecords.html @@ -0,0 +1,169 @@ + + + + +DeletedRecords (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DeletedRecords

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DeletedRecords
+
+
+
+
public class DeletedRecords +extends Object
+
Represents information about deleted records
+
+
+
    + +
  • +
    +

    Constructor Summary

    +
    Constructors
    +
    +
    Constructor
    +
    Description
    +
    DeletedRecords(long lowWatermark)
    +
    +
    Create an instance of this class with the provided parameters.
    +
    +
    +
    +
  • + +
  • +
    +

    Method Summary

    +
    +
    +
    +
    +
    Modifier and Type
    +
    Method
    +
    Description
    +
    long
    + +
    +
    Return the "low watermark" for the topic partition on which the deletion was executed
    +
    +
    +
    +
    +
    +

    Methods inherited from class java.lang.Object

    +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
    +
    +
  • +
+
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DeletedRecords

      +
      public DeletedRecords(long lowWatermark)
      +
      Create an instance of this class with the provided parameters.
      +
      +
      Parameters:
      +
      lowWatermark - "low watermark" for the topic partition on which the deletion was executed
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      lowWatermark

      +
      public long lowWatermark()
      +
      Return the "low watermark" for the topic partition on which the deletion was executed
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeAclsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeAclsOptions.html new file mode 100644 index 000000000..a2d68a522 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeAclsOptions.html @@ -0,0 +1,173 @@ + + + + +DescribeAclsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeAclsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DescribeAclsOptions> +
org.apache.kafka.clients.admin.DescribeAclsOptions
+
+
+
+
+
public class DescribeAclsOptions +extends AbstractOptions<DescribeAclsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DescribeAclsOptions

      +
      public DescribeAclsOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    + +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeAclsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeAclsResult.html new file mode 100644 index 000000000..9497bcc65 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeAclsResult.html @@ -0,0 +1,135 @@ + + + + +DescribeAclsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeAclsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DescribeAclsResult
+
+
+
+
public class DescribeAclsResult +extends Object
+
The result of the Admin.describeAcls(AclBindingFilter) call.
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeClassicGroupsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeClassicGroupsOptions.html new file mode 100644 index 000000000..52b086ec3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeClassicGroupsOptions.html @@ -0,0 +1,176 @@ + + + + +DescribeClassicGroupsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeClassicGroupsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DescribeClassicGroupsOptions> +
org.apache.kafka.clients.admin.DescribeClassicGroupsOptions
+
+
+
+
+
@Evolving +public class DescribeClassicGroupsOptions +extends AbstractOptions<DescribeClassicGroupsOptions>
+
Options for Admin.describeClassicGroups(Collection, DescribeClassicGroupsOptions). +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DescribeClassicGroupsOptions

      +
      public DescribeClassicGroupsOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      includeAuthorizedOperations

      +
      public DescribeClassicGroupsOptions includeAuthorizedOperations(boolean includeAuthorizedOperations)
      +
      +
    • +
    • +
      +

      includeAuthorizedOperations

      +
      public boolean includeAuthorizedOperations()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeClassicGroupsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeClassicGroupsResult.html new file mode 100644 index 000000000..0d058cb55 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeClassicGroupsResult.html @@ -0,0 +1,177 @@ + + + + +DescribeClassicGroupsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeClassicGroupsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DescribeClassicGroupsResult
+
+
+
+
@Evolving +public class DescribeClassicGroupsResult +extends Object
+
The result of the Admin.describeClassicGroups(Collection, DescribeClassicGroupsOptions)} call. +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeClientQuotasOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeClientQuotasOptions.html new file mode 100644 index 000000000..df6a33039 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeClientQuotasOptions.html @@ -0,0 +1,133 @@ + + + + +DescribeClientQuotasOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeClientQuotasOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DescribeClientQuotasOptions> +
org.apache.kafka.clients.admin.DescribeClientQuotasOptions
+
+
+
+
+
public class DescribeClientQuotasOptions +extends AbstractOptions<DescribeClientQuotasOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DescribeClientQuotasOptions

      +
      public DescribeClientQuotasOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeClientQuotasResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeClientQuotasResult.html new file mode 100644 index 000000000..b76cbdd00 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeClientQuotasResult.html @@ -0,0 +1,170 @@ + + + + +DescribeClientQuotasResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeClientQuotasResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DescribeClientQuotasResult
+
+
+
+
public class DescribeClientQuotasResult +extends Object
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DescribeClientQuotasResult

      +
      public DescribeClientQuotasResult(KafkaFuture<Map<ClientQuotaEntity,Map<String,Double>>> entities)
      +
      Maps an entity to its configured quota value(s). Note if no value is defined for a quota + type for that entity's config, then it is not included in the resulting value map.
      +
      +
      Parameters:
      +
      entities - future for the collection of entities that matched the filter
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    + +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeClusterOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeClusterOptions.html new file mode 100644 index 000000000..f1f6c2bb9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeClusterOptions.html @@ -0,0 +1,217 @@ + + + + +DescribeClusterOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeClusterOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DescribeClusterOptions> +
org.apache.kafka.clients.admin.DescribeClusterOptions
+
+
+
+
+
public class DescribeClusterOptions +extends AbstractOptions<DescribeClusterOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DescribeClusterOptions

      +
      public DescribeClusterOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      timeoutMs

      +
      public DescribeClusterOptions timeoutMs(Integer timeoutMs)
      +
      Set the timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
      +
      +
      Overrides:
      +
      timeoutMs in class AbstractOptions<DescribeClusterOptions>
      +
      +
      +
    • +
    • +
      +

      includeAuthorizedOperations

      +
      public DescribeClusterOptions includeAuthorizedOperations(boolean includeAuthorizedOperations)
      +
      +
    • +
    • +
      +

      includeFencedBrokers

      +
      public DescribeClusterOptions includeFencedBrokers(boolean includeFencedBrokers)
      +
      +
    • +
    • +
      +

      includeAuthorizedOperations

      +
      public boolean includeAuthorizedOperations()
      +
      Specify if authorized operations should be included in the response. Note that some + older brokers cannot not supply this information even if it is requested.
      +
      +
    • +
    • +
      +

      includeFencedBrokers

      +
      public boolean includeFencedBrokers()
      +
      Specify if fenced brokers should be included in the response. Note that some + older brokers cannot not supply this information even if it is requested.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeClusterResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeClusterResult.html new file mode 100644 index 000000000..5c5e4edc4 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeClusterResult.html @@ -0,0 +1,175 @@ + + + + +DescribeClusterResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeClusterResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DescribeClusterResult
+
+
+
+
public class DescribeClusterResult +extends Object
+
The result of the Admin.describeCluster() call.
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeConfigsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeConfigsOptions.html new file mode 100644 index 000000000..41251bd68 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeConfigsOptions.html @@ -0,0 +1,221 @@ + + + + +DescribeConfigsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeConfigsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DescribeConfigsOptions> +
org.apache.kafka.clients.admin.DescribeConfigsOptions
+
+
+
+
+
public class DescribeConfigsOptions +extends AbstractOptions<DescribeConfigsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DescribeConfigsOptions

      +
      public DescribeConfigsOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      timeoutMs

      +
      public DescribeConfigsOptions timeoutMs(Integer timeoutMs)
      +
      Set the timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
      +
      +
      Overrides:
      +
      timeoutMs in class AbstractOptions<DescribeConfigsOptions>
      +
      +
      +
    • +
    • +
      +

      includeSynonyms

      +
      public boolean includeSynonyms()
      +
      Return true if synonym configs should be returned in the response.
      +
      +
    • +
    • +
      +

      includeDocumentation

      +
      public boolean includeDocumentation()
      +
      Return true if config documentation should be returned in the response.
      +
      +
    • +
    • +
      +

      includeSynonyms

      +
      public DescribeConfigsOptions includeSynonyms(boolean includeSynonyms)
      +
      Set to true if synonym configs should be returned in the response.
      +
      +
    • +
    • +
      +

      includeDocumentation

      +
      public DescribeConfigsOptions includeDocumentation(boolean includeDocumentation)
      +
      Set to true if config documentation should be returned in the response.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeConfigsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeConfigsResult.html new file mode 100644 index 000000000..5bdc0f175 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeConfigsResult.html @@ -0,0 +1,149 @@ + + + + +DescribeConfigsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeConfigsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DescribeConfigsResult
+
+
+
+
public class DescribeConfigsResult +extends Object
+
The result of the Admin.describeConfigs(Collection) call.
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      values

      +
      public Map<ConfigResource,KafkaFuture<Config>> values()
      +
      Return a map from resources to futures which can be used to check the status of the configuration for each + resource.
      +
      +
    • +
    • +
      +

      all

      + +
      Return a future which succeeds only if all the config descriptions succeed.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeConsumerGroupsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeConsumerGroupsOptions.html new file mode 100644 index 000000000..1a9f90433 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeConsumerGroupsOptions.html @@ -0,0 +1,174 @@ + + + + +DescribeConsumerGroupsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeConsumerGroupsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DescribeConsumerGroupsOptions> +
org.apache.kafka.clients.admin.DescribeConsumerGroupsOptions
+
+
+
+
+
public class DescribeConsumerGroupsOptions +extends AbstractOptions<DescribeConsumerGroupsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DescribeConsumerGroupsOptions

      +
      public DescribeConsumerGroupsOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      includeAuthorizedOperations

      +
      public DescribeConsumerGroupsOptions includeAuthorizedOperations(boolean includeAuthorizedOperations)
      +
      +
    • +
    • +
      +

      includeAuthorizedOperations

      +
      public boolean includeAuthorizedOperations()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeConsumerGroupsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeConsumerGroupsResult.html new file mode 100644 index 000000000..e1c3c8fe4 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeConsumerGroupsResult.html @@ -0,0 +1,174 @@ + + + + +DescribeConsumerGroupsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeConsumerGroupsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DescribeConsumerGroupsResult
+
+
+
+
public class DescribeConsumerGroupsResult +extends Object
+ +
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeDelegationTokenOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeDelegationTokenOptions.html new file mode 100644 index 000000000..7f7fc4bb3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeDelegationTokenOptions.html @@ -0,0 +1,184 @@ + + + + +DescribeDelegationTokenOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeDelegationTokenOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DescribeDelegationTokenOptions> +
org.apache.kafka.clients.admin.DescribeDelegationTokenOptions
+
+
+
+
+
public class DescribeDelegationTokenOptions +extends AbstractOptions<DescribeDelegationTokenOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DescribeDelegationTokenOptions

      +
      public DescribeDelegationTokenOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      owners

      + +
      If owners is null, all the user owned tokens and tokens where user have Describe permission + will be returned.
      +
      +
      Parameters:
      +
      owners - The owners that we want to describe delegation tokens for
      +
      Returns:
      +
      this instance
      +
      +
      +
    • +
    • +
      +

      owners

      +
      public List<KafkaPrincipal> owners()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeDelegationTokenResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeDelegationTokenResult.html new file mode 100644 index 000000000..30af8528c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeDelegationTokenResult.html @@ -0,0 +1,135 @@ + + + + +DescribeDelegationTokenResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeDelegationTokenResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DescribeDelegationTokenResult
+
+
+
+
public class DescribeDelegationTokenResult +extends Object
+ +
+
+ +
+
+
    + +
  • +
    +

    Method Details

    + +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeFeaturesOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeFeaturesOptions.html new file mode 100644 index 000000000..eea76e194 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeFeaturesOptions.html @@ -0,0 +1,133 @@ + + + + +DescribeFeaturesOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeFeaturesOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DescribeFeaturesOptions> +
org.apache.kafka.clients.admin.DescribeFeaturesOptions
+
+
+
+
+
public class DescribeFeaturesOptions +extends AbstractOptions<DescribeFeaturesOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DescribeFeaturesOptions

      +
      public DescribeFeaturesOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeFeaturesResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeFeaturesResult.html new file mode 100644 index 000000000..ed0288935 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeFeaturesResult.html @@ -0,0 +1,134 @@ + + + + +DescribeFeaturesResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeFeaturesResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DescribeFeaturesResult
+
+
+
+
public class DescribeFeaturesResult +extends Object
+
The result of the Admin.describeFeatures(DescribeFeaturesOptions) call. + + The API of this class is evolving, see Admin for details.
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeLogDirsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeLogDirsOptions.html new file mode 100644 index 000000000..f8409c946 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeLogDirsOptions.html @@ -0,0 +1,133 @@ + + + + +DescribeLogDirsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeLogDirsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DescribeLogDirsOptions> +
org.apache.kafka.clients.admin.DescribeLogDirsOptions
+
+
+
+
+
public class DescribeLogDirsOptions +extends AbstractOptions<DescribeLogDirsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DescribeLogDirsOptions

      +
      public DescribeLogDirsOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeLogDirsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeLogDirsResult.html new file mode 100644 index 000000000..889af7ee7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeLogDirsResult.html @@ -0,0 +1,150 @@ + + + + +DescribeLogDirsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeLogDirsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DescribeLogDirsResult
+
+
+
+
public class DescribeLogDirsResult +extends Object
+
The result of the Admin.describeLogDirs(Collection) call.
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      descriptions

      +
      public Map<Integer,KafkaFuture<Map<String,LogDirDescription>>> descriptions()
      +
      Return a map from brokerId to future which can be used to check the information of partitions on each individual broker. + The result of the future is a map from broker log directory path to a description of that log directory.
      +
      +
    • +
    • +
      +

      allDescriptions

      +
      public KafkaFuture<Map<Integer,Map<String,LogDirDescription>>> allDescriptions()
      +
      Return a future which succeeds only if all the brokers have responded without error. + The result of the future is a map from brokerId to a map from broker log directory path + to a description of that log directory.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeMetadataQuorumOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeMetadataQuorumOptions.html new file mode 100644 index 000000000..fbd69666c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeMetadataQuorumOptions.html @@ -0,0 +1,133 @@ + + + + +DescribeMetadataQuorumOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeMetadataQuorumOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DescribeMetadataQuorumOptions> +
org.apache.kafka.clients.admin.DescribeMetadataQuorumOptions
+
+
+
+
+
public class DescribeMetadataQuorumOptions +extends AbstractOptions<DescribeMetadataQuorumOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DescribeMetadataQuorumOptions

      +
      public DescribeMetadataQuorumOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeMetadataQuorumResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeMetadataQuorumResult.html new file mode 100644 index 000000000..5af174f97 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeMetadataQuorumResult.html @@ -0,0 +1,135 @@ + + + + +DescribeMetadataQuorumResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeMetadataQuorumResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DescribeMetadataQuorumResult
+
+
+
+
public class DescribeMetadataQuorumResult +extends Object
+ +
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      quorumInfo

      +
      public KafkaFuture<QuorumInfo> quorumInfo()
      +
      Returns a future containing the QuorumInfo
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeProducersOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeProducersOptions.html new file mode 100644 index 000000000..96d0b71b4 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeProducersOptions.html @@ -0,0 +1,212 @@ + + + + +DescribeProducersOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeProducersOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DescribeProducersOptions> +
org.apache.kafka.clients.admin.DescribeProducersOptions
+
+
+
+
+
public class DescribeProducersOptions +extends AbstractOptions<DescribeProducersOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DescribeProducersOptions

      +
      public DescribeProducersOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    + +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeProducersResult.PartitionProducerState.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeProducersResult.PartitionProducerState.html new file mode 100644 index 000000000..9f15bb993 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeProducersResult.PartitionProducerState.html @@ -0,0 +1,175 @@ + + + + +DescribeProducersResult.PartitionProducerState (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeProducersResult.PartitionProducerState

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DescribeProducersResult.PartitionProducerState
+
+
+
+
Enclosing class:
+
DescribeProducersResult
+
+
+
public static class DescribeProducersResult.PartitionProducerState +extends Object
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      PartitionProducerState

      +
      public PartitionProducerState(List<ProducerState> activeProducers)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    + +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeProducersResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeProducersResult.html new file mode 100644 index 000000000..b2312476c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeProducersResult.html @@ -0,0 +1,155 @@ + + + + +DescribeProducersResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeProducersResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DescribeProducersResult
+
+
+
+
public class DescribeProducersResult +extends Object
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeReplicaLogDirsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeReplicaLogDirsOptions.html new file mode 100644 index 000000000..69e2b5593 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeReplicaLogDirsOptions.html @@ -0,0 +1,133 @@ + + + + +DescribeReplicaLogDirsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeReplicaLogDirsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DescribeReplicaLogDirsOptions> +
org.apache.kafka.clients.admin.DescribeReplicaLogDirsOptions
+
+
+
+
+
public class DescribeReplicaLogDirsOptions +extends AbstractOptions<DescribeReplicaLogDirsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DescribeReplicaLogDirsOptions

      +
      public DescribeReplicaLogDirsOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeReplicaLogDirsResult.ReplicaLogDirInfo.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeReplicaLogDirsResult.ReplicaLogDirInfo.html new file mode 100644 index 000000000..33d6a15ff --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeReplicaLogDirsResult.ReplicaLogDirInfo.html @@ -0,0 +1,190 @@ + + + + +DescribeReplicaLogDirsResult.ReplicaLogDirInfo (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeReplicaLogDirsResult.ReplicaLogDirInfo

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult.ReplicaLogDirInfo
+
+
+
+
Enclosing class:
+
DescribeReplicaLogDirsResult
+
+
+
public static class DescribeReplicaLogDirsResult.ReplicaLogDirInfo +extends Object
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      getCurrentReplicaLogDir

      +
      public String getCurrentReplicaLogDir()
      +
      The current log directory of the replica of this partition on the given broker. + Null if no replica is not found for this partition on the given broker.
      +
      +
    • +
    • +
      +

      getCurrentReplicaOffsetLag

      +
      public long getCurrentReplicaOffsetLag()
      +
      Defined as max(HW of partition - LEO of the replica, 0).
      +
      +
    • +
    • +
      +

      getFutureReplicaLogDir

      +
      public String getFutureReplicaLogDir()
      +
      The future log directory of the replica of this partition on the given broker. + Null if the replica of this partition is not being moved to another log directory on the given broker.
      +
      +
    • +
    • +
      +

      getFutureReplicaOffsetLag

      +
      public long getFutureReplicaOffsetLag()
      +
      The LEO of the replica - LEO of the future log of this replica in the destination log directory. + -1 if either there is not replica for this partition or the replica of this partition is not being moved to another log directory on the given broker.
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeReplicaLogDirsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeReplicaLogDirsResult.html new file mode 100644 index 000000000..214998ccc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeReplicaLogDirsResult.html @@ -0,0 +1,162 @@ + + + + +DescribeReplicaLogDirsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeReplicaLogDirsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult
+
+
+
+
public class DescribeReplicaLogDirsResult +extends Object
+ +
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeShareGroupsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeShareGroupsOptions.html new file mode 100644 index 000000000..420635c63 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeShareGroupsOptions.html @@ -0,0 +1,176 @@ + + + + +DescribeShareGroupsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeShareGroupsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DescribeShareGroupsOptions> +
org.apache.kafka.clients.admin.DescribeShareGroupsOptions
+
+
+
+
+
@Evolving +public class DescribeShareGroupsOptions +extends AbstractOptions<DescribeShareGroupsOptions>
+
Options for Admin.describeShareGroups(Collection, DescribeShareGroupsOptions). +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DescribeShareGroupsOptions

      +
      public DescribeShareGroupsOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      includeAuthorizedOperations

      +
      public DescribeShareGroupsOptions includeAuthorizedOperations(boolean includeAuthorizedOperations)
      +
      +
    • +
    • +
      +

      includeAuthorizedOperations

      +
      public boolean includeAuthorizedOperations()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeShareGroupsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeShareGroupsResult.html new file mode 100644 index 000000000..06c613863 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeShareGroupsResult.html @@ -0,0 +1,177 @@ + + + + +DescribeShareGroupsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeShareGroupsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DescribeShareGroupsResult
+
+
+
+
@Evolving +public class DescribeShareGroupsResult +extends Object
+
The result of the KafkaAdminClient.describeShareGroups(Collection, DescribeShareGroupsOptions)} call. +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeStreamsGroupsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeStreamsGroupsOptions.html new file mode 100644 index 000000000..cc8d23f0f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeStreamsGroupsOptions.html @@ -0,0 +1,176 @@ + + + + +DescribeStreamsGroupsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeStreamsGroupsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DescribeStreamsGroupsOptions> +
org.apache.kafka.clients.admin.DescribeStreamsGroupsOptions
+
+
+
+
+
@Evolving +public class DescribeStreamsGroupsOptions +extends AbstractOptions<DescribeStreamsGroupsOptions>
+
Options for Admin.describeStreamsGroups(Collection, DescribeStreamsGroupsOptions). +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DescribeStreamsGroupsOptions

      +
      public DescribeStreamsGroupsOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      includeAuthorizedOperations

      +
      public DescribeStreamsGroupsOptions includeAuthorizedOperations(boolean includeAuthorizedOperations)
      +
      +
    • +
    • +
      +

      includeAuthorizedOperations

      +
      public boolean includeAuthorizedOperations()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeStreamsGroupsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeStreamsGroupsResult.html new file mode 100644 index 000000000..0b7c24e85 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeStreamsGroupsResult.html @@ -0,0 +1,177 @@ + + + + +DescribeStreamsGroupsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeStreamsGroupsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DescribeStreamsGroupsResult
+
+
+
+
@Evolving +public class DescribeStreamsGroupsResult +extends Object
+
The result of the KafkaAdminClient.describeStreamsGroups(Collection, DescribeStreamsGroupsOptions)} call. +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeTopicsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeTopicsOptions.html new file mode 100644 index 000000000..7892d1e9f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeTopicsOptions.html @@ -0,0 +1,224 @@ + + + + +DescribeTopicsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeTopicsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DescribeTopicsOptions> +
org.apache.kafka.clients.admin.DescribeTopicsOptions
+
+
+
+
+
public class DescribeTopicsOptions +extends AbstractOptions<DescribeTopicsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DescribeTopicsOptions

      +
      public DescribeTopicsOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      timeoutMs

      +
      public DescribeTopicsOptions timeoutMs(Integer timeoutMs)
      +
      Set the timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
      +
      +
      Overrides:
      +
      timeoutMs in class AbstractOptions<DescribeTopicsOptions>
      +
      +
      +
    • +
    • +
      +

      includeAuthorizedOperations

      +
      public DescribeTopicsOptions includeAuthorizedOperations(boolean includeAuthorizedOperations)
      +
      +
    • +
    • +
      +

      partitionSizeLimitPerResponse

      +
      public DescribeTopicsOptions partitionSizeLimitPerResponse(int partitionSizeLimitPerResponse)
      +
      Sets the maximum number of partitions to be returned in a single response. +

      + This option: +

        +
      • Is only effective when using topic names (not topic IDs).
      • +
      • Will not be effective if it is larger than the server-side configuration + max.request.partition.size.limit. +
      • +
      +
      +
      Parameters:
      +
      partitionSizeLimitPerResponse - the maximum number of partitions per response
      +
      +
      +
    • +
    • +
      +

      includeAuthorizedOperations

      +
      public boolean includeAuthorizedOperations()
      +
      +
    • +
    • +
      +

      partitionSizeLimitPerResponse

      +
      public int partitionSizeLimitPerResponse()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeTopicsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeTopicsResult.html new file mode 100644 index 000000000..0dd2c91dc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeTopicsResult.html @@ -0,0 +1,189 @@ + + + + +DescribeTopicsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeTopicsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DescribeTopicsResult
+
+
+
+
public class DescribeTopicsResult +extends Object
+
The result of the Admin.describeTopics(Collection) call.
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      topicIdValues

      +
      public Map<Uuid,KafkaFuture<TopicDescription>> topicIdValues()
      + +
      +
      Returns:
      +
      a map from topic IDs to futures which can be used to check the status of + individual topics if the request used topic IDs, otherwise return null.
      +
      +
      +
    • +
    • +
      +

      topicNameValues

      +
      public Map<String,KafkaFuture<TopicDescription>> topicNameValues()
      + +
      +
      Returns:
      +
      a map from topic names to futures which can be used to check the status of + individual topics if the request used topic names, otherwise return null.
      +
      +
      +
    • +
    • +
      +

      allTopicNames

      +
      public KafkaFuture<Map<String,TopicDescription>> allTopicNames()
      +
      +
      Returns:
      +
      A future map from topic names to descriptions which can be used to check + the status of individual description if the describe topic request used + topic names, otherwise return null, this request succeeds only if all the + topic descriptions succeed
      +
      +
      +
    • +
    • +
      +

      allTopicIds

      +
      public KafkaFuture<Map<Uuid,TopicDescription>> allTopicIds()
      +
      +
      Returns:
      +
      A future map from topic ids to descriptions which can be used to check the + status of individual description if the describe topic request used topic + ids, otherwise return null, this request succeeds only if all the topic + descriptions succeed
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeTransactionsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeTransactionsOptions.html new file mode 100644 index 000000000..68f65ca8b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeTransactionsOptions.html @@ -0,0 +1,168 @@ + + + + +DescribeTransactionsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeTransactionsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DescribeTransactionsOptions> +
org.apache.kafka.clients.admin.DescribeTransactionsOptions
+
+
+
+
+
public class DescribeTransactionsOptions +extends AbstractOptions<DescribeTransactionsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DescribeTransactionsOptions

      +
      public DescribeTransactionsOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    + +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeTransactionsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeTransactionsResult.html new file mode 100644 index 000000000..e56052754 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeTransactionsResult.html @@ -0,0 +1,166 @@ + + + + +DescribeTransactionsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeTransactionsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DescribeTransactionsResult
+
+
+
+
public class DescribeTransactionsResult +extends Object
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeUserScramCredentialsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeUserScramCredentialsOptions.html new file mode 100644 index 000000000..45540b6d5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeUserScramCredentialsOptions.html @@ -0,0 +1,133 @@ + + + + +DescribeUserScramCredentialsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeUserScramCredentialsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<DescribeUserScramCredentialsOptions> +
org.apache.kafka.clients.admin.DescribeUserScramCredentialsOptions
+
+
+
+
+
public class DescribeUserScramCredentialsOptions +extends AbstractOptions<DescribeUserScramCredentialsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      DescribeUserScramCredentialsOptions

      +
      public DescribeUserScramCredentialsOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResult.html new file mode 100644 index 000000000..e81d4cd4b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResult.html @@ -0,0 +1,175 @@ + + + + +DescribeUserScramCredentialsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class DescribeUserScramCredentialsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.DescribeUserScramCredentialsResult
+
+
+
+
public class DescribeUserScramCredentialsResult +extends Object
+
The result of the Admin.describeUserScramCredentials() call.
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      all

      + +
      +
      Returns:
      +
      a future for the results of all described users with map keys (one per user) being consistent with the + contents of the list returned by users(). The future will complete successfully only if all such user + descriptions complete successfully.
      +
      +
      +
    • +
    • +
      +

      users

      +
      public KafkaFuture<List<String>> users()
      +
      +
      Returns:
      +
      a future indicating the distinct users that meet the request criteria and that have at least one + credential. The future will not complete successfully if the user is not authorized to perform the describe + operation; otherwise, it will complete successfully as long as the list of users with credentials can be + successfully determined within some hard-coded timeout period. Note that the returned list will not include users + that do not exist/have no credentials: a request to describe an explicit list of users, none of which existed/had + a credential, will result in a future that returns an empty list being returned here. A returned list will + include users that have a credential but that could not be described.
      +
      +
      +
    • +
    • +
      +

      description

      +
      public KafkaFuture<UserScramCredentialsDescription> description(String userName)
      +
      +
      Parameters:
      +
      userName - the name of the user description being requested
      +
      Returns:
      +
      a future indicating the description results for the given user. The future will complete exceptionally if + the future returned by users() completes exceptionally. Note that if the given user does not exist in + the list of described users then the returned future will complete exceptionally with + ResourceNotFoundException.
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ElectLeadersOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/ElectLeadersOptions.html new file mode 100644 index 000000000..cc697a3f0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ElectLeadersOptions.html @@ -0,0 +1,135 @@ + + + + +ElectLeadersOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ElectLeadersOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<ElectLeadersOptions> +
org.apache.kafka.clients.admin.ElectLeadersOptions
+
+
+
+
+
public final class ElectLeadersOptions +extends AbstractOptions<ElectLeadersOptions>
+
Options for Admin.electLeaders(ElectionType, Set, ElectLeadersOptions). + + The API of this class is evolving, see Admin for details.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ElectLeadersOptions

      +
      public ElectLeadersOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ElectLeadersResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/ElectLeadersResult.html new file mode 100644 index 000000000..c1f58b58f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ElectLeadersResult.html @@ -0,0 +1,151 @@ + + + + +ElectLeadersResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ElectLeadersResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ElectLeadersResult
+
+
+
+
public final class ElectLeadersResult +extends Object
+
The result of Admin.electLeaders(ElectionType, Set, ElectLeadersOptions) + + The API of this class is evolving, see Admin for details.
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      partitions

      + +

      Get a future for the topic partitions for which a leader election was attempted. + If the election succeeded then the value for a topic partition will be the empty Optional. + Otherwise the election failed and the Optional will be set with the error.

      +
      +
    • +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Return a future which succeeds if all the topic elections succeed.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/EndpointType.html b/static/41/javadoc/org/apache/kafka/clients/admin/EndpointType.html new file mode 100644 index 000000000..22e9a5e3b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/EndpointType.html @@ -0,0 +1,244 @@ + + + + +EndpointType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Enum Class EndpointType

+
+
java.lang.Object +
java.lang.Enum<EndpointType> +
org.apache.kafka.clients.admin.EndpointType
+
+
+
+
+
All Implemented Interfaces:
+
Serializable, Comparable<EndpointType>, Constable
+
+
+
public enum EndpointType +extends Enum<EndpointType>
+
Identifies the endpoint type, as specified by KIP-919.
+
+
+ +
+
+
    + +
  • +
    +

    Enum Constant Details

    +
      +
    • +
      +

      UNKNOWN

      +
      public static final EndpointType UNKNOWN
      +
      +
    • +
    • +
      +

      BROKER

      +
      public static final EndpointType BROKER
      +
      +
    • +
    • +
      +

      CONTROLLER

      +
      public static final EndpointType CONTROLLER
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      values

      +
      public static EndpointType[] values()
      +
      Returns an array containing the constants of this enum class, in +the order they are declared.
      +
      +
      Returns:
      +
      an array containing the constants of this enum class, in the order they are declared
      +
      +
      +
    • +
    • +
      +

      valueOf

      +
      public static EndpointType valueOf(String name)
      +
      Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
      +
      +
      Parameters:
      +
      name - the name of the enum constant to be returned.
      +
      Returns:
      +
      the enum constant with the specified name
      +
      Throws:
      +
      IllegalArgumentException - if this enum class has no constant with the specified name
      +
      NullPointerException - if the argument is null
      +
      +
      +
    • +
    • +
      +

      id

      +
      public byte id()
      +
      +
    • +
    • +
      +

      fromId

      +
      public static EndpointType fromId(byte id)
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ExpireDelegationTokenOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/ExpireDelegationTokenOptions.html new file mode 100644 index 000000000..689fc8764 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ExpireDelegationTokenOptions.html @@ -0,0 +1,179 @@ + + + + +ExpireDelegationTokenOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ExpireDelegationTokenOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<ExpireDelegationTokenOptions> +
org.apache.kafka.clients.admin.ExpireDelegationTokenOptions
+
+
+
+
+
public class ExpireDelegationTokenOptions +extends AbstractOptions<ExpireDelegationTokenOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ExpireDelegationTokenOptions

      +
      public ExpireDelegationTokenOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      expiryTimePeriodMs

      +
      public ExpireDelegationTokenOptions expiryTimePeriodMs(long expiryTimePeriodMs)
      +
      +
      Parameters:
      +
      expiryTimePeriodMs - the time period until we should expire this token. + expiryTimePeriodMs >= 0: the token will update the expiration timestamp to min(now + expiryTimePeriodMs, maxTimestamp) + expiryTimePeriodMs < 0: token will be expired immediately.
      +
      +
      +
    • +
    • +
      +

      expiryTimePeriodMs

      +
      public long expiryTimePeriodMs()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ExpireDelegationTokenResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/ExpireDelegationTokenResult.html new file mode 100644 index 000000000..97de50123 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ExpireDelegationTokenResult.html @@ -0,0 +1,135 @@ + + + + +ExpireDelegationTokenResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ExpireDelegationTokenResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ExpireDelegationTokenResult
+
+
+
+
public class ExpireDelegationTokenResult +extends Object
+ +
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      expiryTimestamp

      +
      public KafkaFuture<Long> expiryTimestamp()
      +
      Returns a future which yields expiry timestamp
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/FeatureMetadata.html b/static/41/javadoc/org/apache/kafka/clients/admin/FeatureMetadata.html new file mode 100644 index 000000000..cd72bff0c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/FeatureMetadata.html @@ -0,0 +1,204 @@ + + + + +FeatureMetadata (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class FeatureMetadata

+
+
java.lang.Object +
org.apache.kafka.clients.admin.FeatureMetadata
+
+
+
+
public class FeatureMetadata +extends Object
+
Encapsulates details about finalized as well as supported features. This is particularly useful + to hold the result returned by the Admin.describeFeatures(DescribeFeaturesOptions) API.
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      finalizedFeatures

      +
      public Map<String,FinalizedVersionRange> finalizedFeatures()
      +
      Returns a map of finalized feature versions. Each entry in the map contains a key being a + feature name and the value being a range of version levels supported by every broker in the + cluster.
      +
      +
    • +
    • +
      +

      finalizedFeaturesEpoch

      +
      public Optional<Long> finalizedFeaturesEpoch()
      +
      The epoch for the finalized features. + If the returned value is empty, it means the finalized features are absent/unavailable.
      +
      +
    • +
    • +
      +

      supportedFeatures

      +
      public Map<String,SupportedVersionRange> supportedFeatures()
      +
      Returns a map of supported feature versions. Each entry in the map contains a key being a + feature name and the value being a range of versions supported by a particular broker in the + cluster.
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object other)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/FeatureUpdate.UpgradeType.html b/static/41/javadoc/org/apache/kafka/clients/admin/FeatureUpdate.UpgradeType.html new file mode 100644 index 000000000..b5d15f205 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/FeatureUpdate.UpgradeType.html @@ -0,0 +1,255 @@ + + + + +FeatureUpdate.UpgradeType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Enum Class FeatureUpdate.UpgradeType

+
+
java.lang.Object +
java.lang.Enum<FeatureUpdate.UpgradeType> +
org.apache.kafka.clients.admin.FeatureUpdate.UpgradeType
+
+
+
+
+
All Implemented Interfaces:
+
Serializable, Comparable<FeatureUpdate.UpgradeType>, Constable
+
+
+
Enclosing class:
+
FeatureUpdate
+
+
+
public static enum FeatureUpdate.UpgradeType +extends Enum<FeatureUpdate.UpgradeType>
+
+
+ +
+
+
    + +
  • +
    +

    Enum Constant Details

    + +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      values

      +
      public static FeatureUpdate.UpgradeType[] values()
      +
      Returns an array containing the constants of this enum class, in +the order they are declared.
      +
      +
      Returns:
      +
      an array containing the constants of this enum class, in the order they are declared
      +
      +
      +
    • +
    • +
      +

      valueOf

      +
      public static FeatureUpdate.UpgradeType valueOf(String name)
      +
      Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
      +
      +
      Parameters:
      +
      name - the name of the enum constant to be returned.
      +
      Returns:
      +
      the enum constant with the specified name
      +
      Throws:
      +
      IllegalArgumentException - if this enum class has no constant with the specified name
      +
      NullPointerException - if the argument is null
      +
      +
      +
    • +
    • +
      +

      code

      +
      public byte code()
      +
      +
    • +
    • +
      +

      fromCode

      +
      public static FeatureUpdate.UpgradeType fromCode(int code)
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/FeatureUpdate.html b/static/41/javadoc/org/apache/kafka/clients/admin/FeatureUpdate.html new file mode 100644 index 000000000..257093b94 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/FeatureUpdate.html @@ -0,0 +1,235 @@ + + + + +FeatureUpdate (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class FeatureUpdate

+
+
java.lang.Object +
org.apache.kafka.clients.admin.FeatureUpdate
+
+
+
+
public class FeatureUpdate +extends Object
+
Encapsulates details about an update to a finalized feature.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      FeatureUpdate

      +
      public FeatureUpdate(short maxVersionLevel, + FeatureUpdate.UpgradeType upgradeType)
      +
      +
      Parameters:
      +
      maxVersionLevel - The new maximum version level for the finalized feature. + a value of zero is special and indicates that the update is intended to + delete the finalized feature, and should be accompanied by setting + the upgradeType to safe or unsafe.
      +
      upgradeType - Indicate what kind of upgrade should be performed in this operation. + - UPGRADE: upgrading the feature level + - SAFE_DOWNGRADE: only downgrades which do not result in metadata loss are permitted + - UNSAFE_DOWNGRADE: any downgrade, including those which may result in metadata loss, are permitted
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      maxVersionLevel

      +
      public short maxVersionLevel()
      +
      +
    • +
    • +
      +

      upgradeType

      +
      public FeatureUpdate.UpgradeType upgradeType()
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object other)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/FenceProducersOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/FenceProducersOptions.html new file mode 100644 index 000000000..b715565da --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/FenceProducersOptions.html @@ -0,0 +1,168 @@ + + + + +FenceProducersOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class FenceProducersOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<FenceProducersOptions> +
org.apache.kafka.clients.admin.FenceProducersOptions
+
+
+
+
+
public class FenceProducersOptions +extends AbstractOptions<FenceProducersOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      FenceProducersOptions

      +
      public FenceProducersOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    + +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/FenceProducersResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/FenceProducersResult.html new file mode 100644 index 000000000..19af6176a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/FenceProducersResult.html @@ -0,0 +1,173 @@ + + + + +FenceProducersResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class FenceProducersResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.FenceProducersResult
+
+
+
+
public class FenceProducersResult +extends Object
+
The result of the Admin.fenceProducers(Collection) call.
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      fencedProducers

      +
      public Map<String,KafkaFuture<Void>> fencedProducers()
      +
      Return a map from transactional ID to futures which can be used to check the status of + individual fencings.
      +
      +
    • +
    • +
      +

      producerId

      +
      public KafkaFuture<Long> producerId(String transactionalId)
      +
      Returns a future that provides the producer ID generated while initializing the given transaction when the request completes.
      +
      +
    • +
    • +
      +

      epochId

      +
      public KafkaFuture<Short> epochId(String transactionalId)
      +
      Returns a future that provides the epoch ID generated while initializing the given transaction when the request completes.
      +
      +
    • +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Return a future which succeeds only if all the producer fencings succeed.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/FinalizedVersionRange.html b/static/41/javadoc/org/apache/kafka/clients/admin/FinalizedVersionRange.html new file mode 100644 index 000000000..6157ca4a4 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/FinalizedVersionRange.html @@ -0,0 +1,221 @@ + + + + +FinalizedVersionRange (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class FinalizedVersionRange

+
+
java.lang.Object +
org.apache.kafka.clients.admin.FinalizedVersionRange
+
+
+
+
public class FinalizedVersionRange +extends Object
+
Represents a range of version levels supported by every broker in a cluster for some feature.
+
+
+
    + +
  • +
    +

    Constructor Summary

    +
    Constructors
    +
    +
    Constructor
    +
    Description
    +
    FinalizedVersionRange(short minVersionLevel, + short maxVersionLevel)
    +
    +
    Raises an exception unless the following condition is met: + minVersionLevel >= 1 and maxVersionLevel >= 1 and maxVersionLevel >= minVersionLevel.
    +
    +
    +
    +
  • + +
  • +
    +

    Method Summary

    +
    +
    +
    +
    +
    Modifier and Type
    +
    Method
    +
    Description
    +
    boolean
    +
    equals(Object other)
    +
     
    +
    int
    + +
     
    +
    short
    + +
     
    +
    short
    + +
     
    + + +
     
    +
    +
    +
    +
    +

    Methods inherited from class java.lang.Object

    +getClass, notify, notifyAll, wait, wait, wait
    +
    +
  • +
+
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      FinalizedVersionRange

      +
      public FinalizedVersionRange(short minVersionLevel, + short maxVersionLevel)
      +
      Raises an exception unless the following condition is met: + minVersionLevel >= 1 and maxVersionLevel >= 1 and maxVersionLevel >= minVersionLevel.
      +
      +
      Parameters:
      +
      minVersionLevel - The minimum version level value.
      +
      maxVersionLevel - The maximum version level value.
      +
      Throws:
      +
      IllegalArgumentException - Raised when the condition described above is not met.
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      minVersionLevel

      +
      public short minVersionLevel()
      +
      +
    • +
    • +
      +

      maxVersionLevel

      +
      public short maxVersionLevel()
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object other)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ForwardingAdmin.html b/static/41/javadoc/org/apache/kafka/clients/admin/ForwardingAdmin.html new file mode 100644 index 000000000..b8eb08a49 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ForwardingAdmin.html @@ -0,0 +1,2148 @@ + + + + +ForwardingAdmin (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ForwardingAdmin

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ForwardingAdmin
+
+
+
+
All Implemented Interfaces:
+
AutoCloseable, Admin
+
+
+
public class ForwardingAdmin +extends Object +implements Admin
+
ForwardingAdmin is the default value of forwarding.admin.class in MirrorMaker. + Users who wish to customize the MirrorMaker behaviour for the creation of topics and access control lists can extend this + class without needing to provide a whole implementation of Admin. + The class must have a constructor with signature (Map<String, Object> config) for configuring + a decorated KafkaAdminClient and any other clients needed for external resource management.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ForwardingAdmin

      +
      public ForwardingAdmin(Map<String,Object> configs)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      close

      +
      public void close(Duration timeout)
      +
      Description copied from interface: Admin
      +
      Close the Admin client and release all associated resources. +

      + The close operation has a grace period during which current operations will be allowed to + complete, specified by the given duration. + New operations will not be accepted during the grace period. Once the grace period is over, + all operations that have not yet been completed will be aborted with a TimeoutException.

      +
      +
      Specified by:
      +
      close in interface Admin
      +
      Parameters:
      +
      timeout - The time to use for the wait time.
      +
      +
      +
    • +
    • +
      +

      createTopics

      +
      public CreateTopicsResult createTopics(Collection<NewTopic> newTopics, + CreateTopicsOptions options)
      +
      Description copied from interface: Admin
      +
      Create a batch of new topics. +

      + This operation is not transactional so it may succeed for some topics while fail for others. +

      + It may take several seconds after CreateTopicsResult returns + success for all the brokers to become aware that the topics have been created. + During this time, Admin.listTopics() and Admin.describeTopics(Collection) + may not return information about the new topics. +

      + This operation is supported by brokers with version 0.10.1.0 or higher. The validateOnly option is supported + from version 0.10.2.0.

      +
      +
      Specified by:
      +
      createTopics in interface Admin
      +
      Parameters:
      +
      newTopics - The new topics to create.
      +
      options - The options to use when creating the new topics.
      +
      Returns:
      +
      The CreateTopicsResult.
      +
      +
      +
    • +
    • +
      +

      deleteTopics

      +
      public DeleteTopicsResult deleteTopics(TopicCollection topics, + DeleteTopicsOptions options)
      +
      Description copied from interface: Admin
      +
      Delete a batch of topics. +

      + This operation is not transactional so it may succeed for some topics while fail for others. +

      + It may take several seconds after the DeleteTopicsResult returns + success for all the brokers to become aware that the topics are gone. + During this time, Admin.listTopics() and Admin.describeTopics(Collection) + may continue to return information about the deleted topics. +

      + If delete.topic.enable is set to false on the brokers, an exception will be returned to the client indicating + that topic deletion is disabled. +

      + When using topic IDs, this operation is supported by brokers with inter-broker protocol 2.8 or higher. + When using topic names, this operation is supported by brokers with version 0.10.1.0 or higher.

      +
      +
      Specified by:
      +
      deleteTopics in interface Admin
      +
      Parameters:
      +
      topics - The topics to delete.
      +
      options - The options to use when deleting the topics.
      +
      Returns:
      +
      The DeleteTopicsResult.
      +
      +
      +
    • +
    • +
      +

      listTopics

      +
      public ListTopicsResult listTopics(ListTopicsOptions options)
      +
      Description copied from interface: Admin
      +
      List the topics available in the cluster.
      +
      +
      Specified by:
      +
      listTopics in interface Admin
      +
      Parameters:
      +
      options - The options to use when listing the topics.
      +
      Returns:
      +
      The ListTopicsResult.
      +
      +
      +
    • +
    • +
      +

      describeTopics

      +
      public DescribeTopicsResult describeTopics(TopicCollection topics, + DescribeTopicsOptions options)
      +
      Description copied from interface: Admin
      +
      Describe some topics in the cluster. + + When using topic IDs, this operation is supported by brokers with version 3.1.0 or higher.
      +
      +
      Specified by:
      +
      describeTopics in interface Admin
      +
      Parameters:
      +
      topics - The topics to describe.
      +
      options - The options to use when describing the topics.
      +
      Returns:
      +
      The DescribeTopicsResult.
      +
      +
      +
    • +
    • +
      +

      describeCluster

      +
      public DescribeClusterResult describeCluster(DescribeClusterOptions options)
      +
      Description copied from interface: Admin
      +
      Get information about the nodes in the cluster. +

      + To obtain broker cluster information, you must configure AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG. + To obtain controller cluster information, you must configure AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG.

      +
      +
      Specified by:
      +
      describeCluster in interface Admin
      +
      Parameters:
      +
      options - The options to use when getting information about the cluster.
      +
      Returns:
      +
      The DescribeClusterResult.
      +
      +
      +
    • +
    • +
      +

      describeAcls

      +
      public DescribeAclsResult describeAcls(AclBindingFilter filter, + DescribeAclsOptions options)
      +
      Description copied from interface: Admin
      +
      Lists access control lists (ACLs) according to the supplied filter. +

      + Note: it may take some time for changes made by createAcls or deleteAcls to be reflected + in the output of describeAcls. +

      + This operation is supported by brokers with version 0.11.0.0 or higher.

      +
      +
      Specified by:
      +
      describeAcls in interface Admin
      +
      Parameters:
      +
      filter - The filter to use.
      +
      options - The options to use when listing the ACLs.
      +
      Returns:
      +
      The DescribeAclsResult.
      +
      +
      +
    • +
    • +
      +

      createAcls

      +
      public CreateAclsResult createAcls(Collection<AclBinding> acls, + CreateAclsOptions options)
      +
      Description copied from interface: Admin
      +
      Creates access control lists (ACLs) which are bound to specific resources. +

      + This operation is not transactional so it may succeed for some ACLs while fail for others. +

      + If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but + no changes will be made. +

      + This operation is supported by brokers with version 0.11.0.0 or higher.

      +
      +
      Specified by:
      +
      createAcls in interface Admin
      +
      Parameters:
      +
      acls - The ACLs to create
      +
      options - The options to use when creating the ACLs.
      +
      Returns:
      +
      The CreateAclsResult.
      +
      +
      +
    • +
    • +
      +

      deleteAcls

      +
      public DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, + DeleteAclsOptions options)
      +
      Description copied from interface: Admin
      +
      Deletes access control lists (ACLs) according to the supplied filters. +

      + This operation is not transactional so it may succeed for some ACLs while fail for others. +

      + This operation is supported by brokers with version 0.11.0.0 or higher.

      +
      +
      Specified by:
      +
      deleteAcls in interface Admin
      +
      Parameters:
      +
      filters - The filters to use.
      +
      options - The options to use when deleting the ACLs.
      +
      Returns:
      +
      The DeleteAclsResult.
      +
      +
      +
    • +
    • +
      +

      describeConfigs

      +
      public DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, + DescribeConfigsOptions options)
      +
      Description copied from interface: Admin
      +
      Get the configuration for the specified resources. +

      + The returned configuration includes default values and the isDefault() method can be used to distinguish them + from user supplied values. +

      + The value of config entries where isSensitive() is true is always null so that sensitive information + is not disclosed. +

      + Config entries where isReadOnly() is true cannot be updated. +

      + The different behavior of nonexistent resource: +

      +

      + Note that you cannot describe broker configs or broker logger using AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG, + and you cannot describe controller configs or controller logger using AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG. +

      + This operation is supported by brokers with version 0.11.0.0 or higher.

      +
      +
      Specified by:
      +
      describeConfigs in interface Admin
      +
      Parameters:
      +
      resources - See relevant type ConfigResource.Type
      +
      options - The options to use when describing configs
      +
      Returns:
      +
      The DescribeConfigsResult
      +
      +
      +
    • +
    • +
      +

      incrementalAlterConfigs

      +
      public AlterConfigsResult incrementalAlterConfigs(Map<ConfigResource,Collection<AlterConfigOp>> configs, + AlterConfigsOptions options)
      +
      Description copied from interface: Admin
      +
      Incrementally update the configuration for the specified resources. +

      + Updates are not transactional so they may succeed for some resources while fail for others. The configs for + a particular resource are updated atomically. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from + the returned AlterConfigsResult: +

      +

      + This operation is supported by brokers with version 2.3.0 or higher.

      +
      +
      Specified by:
      +
      incrementalAlterConfigs in interface Admin
      +
      Parameters:
      +
      configs - The resources with their configs
      +
      options - The options to use when altering configs
      +
      Returns:
      +
      The AlterConfigsResult
      +
      +
      +
    • +
    • +
      +

      alterReplicaLogDirs

      +
      public AlterReplicaLogDirsResult alterReplicaLogDirs(Map<TopicPartitionReplica,String> replicaAssignment, + AlterReplicaLogDirsOptions options)
      +
      Description copied from interface: Admin
      +
      Change the log directory for the specified replicas. If the replica does not exist on the broker, the result + shows REPLICA_NOT_AVAILABLE for the given replica and the replica will be created in the given log directory on the + broker when it is created later. If the replica already exists on the broker, the replica will be moved to the given + log directory if it is not already there. For detailed result, inspect the returned AlterReplicaLogDirsResult instance. +

      + This operation is not transactional so it may succeed for some replicas while fail for others. +

      + This operation is supported by brokers with version 1.1.0 or higher.

      +
      +
      Specified by:
      +
      alterReplicaLogDirs in interface Admin
      +
      Parameters:
      +
      replicaAssignment - The replicas with their log directory absolute path
      +
      options - The options to use when changing replica dir
      +
      Returns:
      +
      The AlterReplicaLogDirsResult
      +
      +
      +
    • +
    • +
      +

      describeLogDirs

      +
      public DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers, + DescribeLogDirsOptions options)
      +
      Description copied from interface: Admin
      +
      Query the information of all log directories on the given set of brokers +

      + This operation is supported by brokers with version 1.0.0 or higher.

      +
      +
      Specified by:
      +
      describeLogDirs in interface Admin
      +
      Parameters:
      +
      brokers - A list of brokers
      +
      options - The options to use when querying log dir info
      +
      Returns:
      +
      The DescribeLogDirsResult
      +
      +
      +
    • +
    • +
      +

      describeReplicaLogDirs

      +
      public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection<TopicPartitionReplica> replicas, + DescribeReplicaLogDirsOptions options)
      +
      Description copied from interface: Admin
      +
      Query the replica log directory information for the specified replicas. +

      + This operation is supported by brokers with version 1.0.0 or higher.

      +
      +
      Specified by:
      +
      describeReplicaLogDirs in interface Admin
      +
      Parameters:
      +
      replicas - The replicas to query
      +
      options - The options to use when querying replica log dir info
      +
      Returns:
      +
      The DescribeReplicaLogDirsResult
      +
      +
      +
    • +
    • +
      +

      createPartitions

      +
      public CreatePartitionsResult createPartitions(Map<String,NewPartitions> newPartitions, + CreatePartitionsOptions options)
      +
      Description copied from interface: Admin
      +
      Increase the number of partitions of the topics given as the keys of newPartitions + according to the corresponding values. If partitions are increased for a topic that has a key, + the partition logic or ordering of the messages will be affected. +

      + This operation is not transactional so it may succeed for some topics while fail for others. +

      + It may take several seconds after this method returns + success for all the brokers to become aware that the partitions have been created. + During this time, Admin.describeTopics(Collection) + may not return information about the new partitions. +

      + This operation is supported by brokers with version 1.0.0 or higher. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from the + values() method of the returned CreatePartitionsResult +

      +
      +
      Specified by:
      +
      createPartitions in interface Admin
      +
      Parameters:
      +
      newPartitions - The topics which should have new partitions created, and corresponding parameters + for the created partitions.
      +
      options - The options to use when creating the new partitions.
      +
      Returns:
      +
      The CreatePartitionsResult.
      +
      +
      +
    • +
    • +
      +

      deleteRecords

      +
      public DeleteRecordsResult deleteRecords(Map<TopicPartition,RecordsToDelete> recordsToDelete, + DeleteRecordsOptions options)
      +
      Description copied from interface: Admin
      +
      Delete records whose offset is smaller than the given offset of the corresponding partition. +

      + This operation is supported by brokers with version 0.11.0.0 or higher.

      +
      +
      Specified by:
      +
      deleteRecords in interface Admin
      +
      Parameters:
      +
      recordsToDelete - The topic partitions and related offsets from which records deletion starts.
      +
      options - The options to use when deleting records.
      +
      Returns:
      +
      The DeleteRecordsResult.
      +
      +
      +
    • +
    • +
      +

      createDelegationToken

      +
      public CreateDelegationTokenResult createDelegationToken(CreateDelegationTokenOptions options)
      +
      Description copied from interface: Admin
      +
      Create a Delegation Token. +

      + This operation is supported by brokers with version 1.1.0 or higher. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from the + delegationToken() method of the returned CreateDelegationTokenResult +

      +
      +
      Specified by:
      +
      createDelegationToken in interface Admin
      +
      Parameters:
      +
      options - The options to use when creating delegation token.
      +
      Returns:
      +
      The CreateDelegationTokenResult.
      +
      +
      +
    • +
    • +
      +

      renewDelegationToken

      +
      public RenewDelegationTokenResult renewDelegationToken(byte[] hmac, + RenewDelegationTokenOptions options)
      +
      Description copied from interface: Admin
      +
      Renew a Delegation Token. +

      + This operation is supported by brokers with version 1.1.0 or higher. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from the + expiryTimestamp() method of the returned RenewDelegationTokenResult +

      +
      +
      Specified by:
      +
      renewDelegationToken in interface Admin
      +
      Parameters:
      +
      hmac - HMAC of the Delegation token
      +
      options - The options to use when renewing delegation token.
      +
      Returns:
      +
      The RenewDelegationTokenResult.
      +
      +
      +
    • +
    • +
      +

      expireDelegationToken

      +
      public ExpireDelegationTokenResult expireDelegationToken(byte[] hmac, + ExpireDelegationTokenOptions options)
      +
      Description copied from interface: Admin
      +
      Expire a Delegation Token. +

      + This operation is supported by brokers with version 1.1.0 or higher. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from the + expiryTimestamp() method of the returned ExpireDelegationTokenResult +

      +
      +
      Specified by:
      +
      expireDelegationToken in interface Admin
      +
      Parameters:
      +
      hmac - HMAC of the Delegation token
      +
      options - The options to use when expiring delegation token.
      +
      Returns:
      +
      The ExpireDelegationTokenResult.
      +
      +
      +
    • +
    • +
      +

      describeDelegationToken

      +
      public DescribeDelegationTokenResult describeDelegationToken(DescribeDelegationTokenOptions options)
      +
      Description copied from interface: Admin
      +
      Describe the Delegation Tokens. +

      + This operation is supported by brokers with version 1.1.0 or higher. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from the + delegationTokens() method of the returned DescribeDelegationTokenResult +

      +
      +
      Specified by:
      +
      describeDelegationToken in interface Admin
      +
      Parameters:
      +
      options - The options to use when describing delegation tokens.
      +
      Returns:
      +
      The DescribeDelegationTokenResult.
      +
      +
      +
    • +
    • +
      +

      describeConsumerGroups

      +
      public DescribeConsumerGroupsResult describeConsumerGroups(Collection<String> groupIds, + DescribeConsumerGroupsOptions options)
      +
      Description copied from interface: Admin
      +
      Describe some consumer groups in the cluster.
      +
      +
      Specified by:
      +
      describeConsumerGroups in interface Admin
      +
      Parameters:
      +
      groupIds - The IDs of the groups to describe.
      +
      options - The options to use when describing the groups.
      +
      Returns:
      +
      The DescribeConsumerGroupsResult.
      +
      +
      +
    • +
    • +
      +

      listConsumerGroups

      +
      @Deprecated +public ListConsumerGroupsResult listConsumerGroups(ListConsumerGroupsOptions options)
      +
      Deprecated.
      +
      Description copied from interface: Admin
      +
      List the consumer groups available in the cluster.
      +
      +
      Specified by:
      +
      listConsumerGroups in interface Admin
      +
      Parameters:
      +
      options - The options to use when listing the consumer groups.
      +
      Returns:
      +
      The ListConsumerGroupsResult.
      +
      +
      +
    • +
    • +
      +

      listConsumerGroupOffsets

      + +
      Description copied from interface: Admin
      +
      List the consumer group offsets available in the cluster for the specified consumer groups.
      +
      +
      Specified by:
      +
      listConsumerGroupOffsets in interface Admin
      +
      Parameters:
      +
      groupSpecs - Map of consumer group ids to a spec that specifies the topic partitions of the group to list offsets for.
      +
      options - The options to use when listing the consumer group offsets.
      +
      Returns:
      +
      The ListConsumerGroupOffsetsResult
      +
      +
      +
    • +
    • +
      +

      listStreamsGroupOffsets

      + +
      Description copied from interface: Admin
      +
      List the streams group offsets available in the cluster for the specified streams groups. + + Note: this method effectively does the same as the corresponding consumer group method Admin.listConsumerGroupOffsets(java.lang.String, org.apache.kafka.clients.admin.ListConsumerGroupOffsetsOptions) does.
      +
      +
      Specified by:
      +
      listStreamsGroupOffsets in interface Admin
      +
      Parameters:
      +
      groupSpecs - Map of streams group ids to a spec that specifies the topic partitions of the group to list offsets for.
      +
      options - The options to use when listing the streams group offsets.
      +
      Returns:
      +
      The ListStreamsGroupOffsetsResult
      +
      +
      +
    • +
    • +
      +

      deleteConsumerGroups

      +
      public DeleteConsumerGroupsResult deleteConsumerGroups(Collection<String> groupIds, + DeleteConsumerGroupsOptions options)
      +
      Description copied from interface: Admin
      +
      Delete consumer groups from the cluster.
      +
      +
      Specified by:
      +
      deleteConsumerGroups in interface Admin
      +
      options - The options to use when deleting a consumer group.
      +
      Returns:
      +
      The DeleteConsumerGroupsResult.
      +
      +
      +
    • +
    • +
      +

      deleteStreamsGroups

      +
      public DeleteStreamsGroupsResult deleteStreamsGroups(Collection<String> groupIds, + DeleteStreamsGroupsOptions options)
      +
      Description copied from interface: Admin
      +
      Delete streams groups from the cluster. + + Note: this method effectively does the same as the corresponding consumer group method Admin.deleteConsumerGroups(java.util.Collection<java.lang.String>, org.apache.kafka.clients.admin.DeleteConsumerGroupsOptions) does.
      +
      +
      Specified by:
      +
      deleteStreamsGroups in interface Admin
      +
      options - The options to use when deleting a streams group.
      +
      Returns:
      +
      The DeleteStreamsGroupsResult.
      +
      +
      +
    • +
    • +
      +

      deleteConsumerGroupOffsets

      +
      public DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets(String groupId, + Set<TopicPartition> partitions, + DeleteConsumerGroupOffsetsOptions options)
      +
      Description copied from interface: Admin
      +
      Delete committed offsets for a set of partitions in a consumer group. This will + succeed at the partition level only if the group is not actively subscribed + to the corresponding topic.
      +
      +
      Specified by:
      +
      deleteConsumerGroupOffsets in interface Admin
      +
      options - The options to use when deleting offsets in a consumer group.
      +
      Returns:
      +
      The DeleteConsumerGroupOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      deleteStreamsGroupOffsets

      +
      public DeleteStreamsGroupOffsetsResult deleteStreamsGroupOffsets(String groupId, + Set<TopicPartition> partitions, + DeleteStreamsGroupOffsetsOptions options)
      +
      Description copied from interface: Admin
      +
      Delete committed offsets for a set of partitions in a streams group. This will + succeed at the partition level only if the group is not actively subscribed + to the corresponding topic. + + Note: this method effectively does the same as the corresponding consumer group method Admin.deleteConsumerGroupOffsets(java.lang.String, java.util.Set<org.apache.kafka.common.TopicPartition>, org.apache.kafka.clients.admin.DeleteConsumerGroupOffsetsOptions) does.
      +
      +
      Specified by:
      +
      deleteStreamsGroupOffsets in interface Admin
      +
      options - The options to use when deleting offsets in a streams group.
      +
      Returns:
      +
      The DeleteStreamsGroupOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      electLeaders

      +
      public ElectLeadersResult electLeaders(ElectionType electionType, + Set<TopicPartition> partitions, + ElectLeadersOptions options)
      +
      Description copied from interface: Admin
      +
      Elect a replica as leader for the given partitions, or for all partitions if the argument + to partitions is null. +

      + This operation is not transactional so it may succeed for some partitions while fail for others. +

      + It may take several seconds after this method returns success for all the brokers in the cluster + to become aware that the partitions have new leaders. During this time, + Admin.describeTopics(Collection) may not return information about the partitions' + new leaders. +

      + This operation is supported by brokers with version 2.2.0 or later if preferred election is use; + otherwise the brokers most be 2.4.0 or higher. +

      + The following exceptions can be anticipated when calling get() on the future obtained + from the returned ElectLeadersResult: +

      +
      +
      Specified by:
      +
      electLeaders in interface Admin
      +
      Parameters:
      +
      electionType - The type of election to conduct.
      +
      partitions - The topics and partitions for which to conduct elections.
      +
      options - The options to use when electing the leaders.
      +
      Returns:
      +
      The ElectLeadersResult.
      +
      +
      +
    • +
    • +
      +

      alterPartitionReassignments

      + +
      Description copied from interface: Admin
      +
      Change the reassignments for one or more partitions. + Providing an empty Optional (e.g via Optional.empty()) will revert the reassignment for the associated partition. + +

      The following exceptions can be anticipated when calling get() on the futures obtained from + the returned AlterPartitionReassignmentsResult:

      +
      +
      +
      Specified by:
      +
      alterPartitionReassignments in interface Admin
      +
      Parameters:
      +
      reassignments - The reassignments to add, modify, or remove. See NewPartitionReassignment.
      +
      options - The options to use.
      +
      Returns:
      +
      The result.
      +
      +
      +
    • +
    • +
      +

      listPartitionReassignments

      +
      public ListPartitionReassignmentsResult listPartitionReassignments(Optional<Set<TopicPartition>> partitions, + ListPartitionReassignmentsOptions options)
      +
      +
      Specified by:
      +
      listPartitionReassignments in interface Admin
      +
      Parameters:
      +
      partitions - the partitions we want to get reassignment for, or an empty optional if we want to get the reassignments for all partitions in the cluster
      +
      options - The options to use.
      +
      Returns:
      +
      The result.
      +
      +
      +
    • +
    • +
      +

      removeMembersFromConsumerGroup

      +
      public RemoveMembersFromConsumerGroupResult removeMembersFromConsumerGroup(String groupId, + RemoveMembersFromConsumerGroupOptions options)
      +
      Description copied from interface: Admin
      +
      Remove members from the consumer group by given member identities. +

      + For possible error codes, refer to LeaveGroupResponse.

      +
      +
      Specified by:
      +
      removeMembersFromConsumerGroup in interface Admin
      +
      Parameters:
      +
      groupId - The ID of the group to remove member from.
      +
      options - The options to carry removing members' information.
      +
      Returns:
      +
      The MembershipChangeResult.
      +
      +
      +
    • +
    • +
      +

      alterConsumerGroupOffsets

      +
      public AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets(String groupId, + Map<TopicPartition,OffsetAndMetadata> offsets, + AlterConsumerGroupOffsetsOptions options)
      +
      Description copied from interface: Admin
      +

      Alters offsets for the specified group. In order to succeed, the group must be empty. + +

      This operation is not transactional so it may succeed for some partitions while fail for others.

      +
      +
      Specified by:
      +
      alterConsumerGroupOffsets in interface Admin
      +
      Parameters:
      +
      groupId - The group for which to alter offsets.
      +
      offsets - A map of offsets by partition with associated metadata. Partitions not specified in the map are ignored.
      +
      options - The options to use when altering the offsets.
      +
      Returns:
      +
      The AlterOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      alterStreamsGroupOffsets

      +
      public AlterStreamsGroupOffsetsResult alterStreamsGroupOffsets(String groupId, + Map<TopicPartition,OffsetAndMetadata> offsets, + AlterStreamsGroupOffsetsOptions options)
      +
      Description copied from interface: Admin
      +

      Alters offsets for the specified group. In order to succeed, the group must be empty. + +

      This operation is not transactional so it may succeed for some partitions while fail for others. + + Note: this method effectively does the same as the corresponding consumer group method Admin.alterConsumerGroupOffsets(java.lang.String, java.util.Map<org.apache.kafka.common.TopicPartition, org.apache.kafka.clients.consumer.OffsetAndMetadata>) does.

      +
      +
      Specified by:
      +
      alterStreamsGroupOffsets in interface Admin
      +
      Parameters:
      +
      groupId - The group for which to alter offsets.
      +
      offsets - A map of offsets by partition with associated metadata. Partitions not specified in the map are ignored.
      +
      options - The options to use when altering the offsets.
      +
      Returns:
      +
      The AlterOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      listOffsets

      +
      public ListOffsetsResult listOffsets(Map<TopicPartition,OffsetSpec> topicPartitionOffsets, + ListOffsetsOptions options)
      +
      Description copied from interface: Admin
      +

      List offset for the specified partitions. This operation enables to find + the beginning offset, end offset as well as the offset matching a timestamp in partitions.

      +
      +
      Specified by:
      +
      listOffsets in interface Admin
      +
      Parameters:
      +
      topicPartitionOffsets - The mapping from partition to the OffsetSpec to look up.
      +
      options - The options to use when retrieving the offsets
      +
      Returns:
      +
      The ListOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      describeClientQuotas

      +
      public DescribeClientQuotasResult describeClientQuotas(ClientQuotaFilter filter, + DescribeClientQuotasOptions options)
      +
      Description copied from interface: Admin
      +
      Describes all entities matching the provided filter that have at least one client quota configuration + value defined. +

      + The following exceptions can be anticipated when calling get() on the future from the + returned DescribeClientQuotasResult: +

      +

      + This operation is supported by brokers with version 2.6.0 or higher.

      +
      +
      Specified by:
      +
      describeClientQuotas in interface Admin
      +
      Parameters:
      +
      filter - the filter to apply to match entities
      +
      options - the options to use
      +
      Returns:
      +
      the DescribeClientQuotasResult containing the result
      +
      +
      +
    • +
    • +
      +

      alterClientQuotas

      +
      public AlterClientQuotasResult alterClientQuotas(Collection<ClientQuotaAlteration> entries, + AlterClientQuotasOptions options)
      +
      Description copied from interface: Admin
      +
      Alters client quota configurations with the specified alterations. +

      + Alterations for a single entity are atomic, but across entities is not guaranteed. The resulting + per-entity error code should be evaluated to resolve the success or failure of all updates. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from + the returned AlterClientQuotasResult: +

        +
      • ClusterAuthorizationException + If the authenticated user didn't have alter access to the cluster.
      • +
      • InvalidRequestException + If the request details are invalid. e.g., a configuration key was specified more than once for an entity.
      • +
      • TimeoutException + If the request timed out before the alterations could finish. It cannot be guaranteed whether the update + succeed or not.
      • +
      +

      + This operation is supported by brokers with version 2.6.0 or higher.

      +
      +
      Specified by:
      +
      alterClientQuotas in interface Admin
      +
      Parameters:
      +
      entries - the alterations to perform
      +
      Returns:
      +
      the AlterClientQuotasResult containing the result
      +
      +
      +
    • +
    • +
      +

      describeUserScramCredentials

      +
      public DescribeUserScramCredentialsResult describeUserScramCredentials(List<String> users, + DescribeUserScramCredentialsOptions options)
      +
      Description copied from interface: Admin
      +
      Describe SASL/SCRAM credentials. +

      + The following exceptions can be anticipated when calling get() on the futures from the + returned DescribeUserScramCredentialsResult: +

      +

      + This operation is supported by brokers with version 2.7.0 or higher.

      +
      +
      Specified by:
      +
      describeUserScramCredentials in interface Admin
      +
      Parameters:
      +
      users - the users for which credentials are to be described; all users' credentials are described if null + or empty.
      +
      options - The options to use when describing the credentials
      +
      Returns:
      +
      The DescribeUserScramCredentialsResult.
      +
      +
      +
    • +
    • +
      +

      alterUserScramCredentials

      +
      public AlterUserScramCredentialsResult alterUserScramCredentials(List<UserScramCredentialAlteration> alterations, + AlterUserScramCredentialsOptions options)
      +
      Description copied from interface: Admin
      +
      Alter SASL/SCRAM credentials. + +

      + The following exceptions can be anticipated when calling get() any of the futures from the + returned AlterUserScramCredentialsResult: +

      +

      + This operation is supported by brokers with version 2.7.0 or higher.

      +
      +
      Specified by:
      +
      alterUserScramCredentials in interface Admin
      +
      Parameters:
      +
      alterations - the alterations to be applied
      +
      options - The options to use when altering the credentials
      +
      Returns:
      +
      The AlterUserScramCredentialsResult.
      +
      +
      +
    • +
    • +
      +

      describeFeatures

      +
      public DescribeFeaturesResult describeFeatures(DescribeFeaturesOptions options)
      +
      Description copied from interface: Admin
      +
      Describes finalized as well as supported features. The request is issued to any random + broker. +

      + The following exceptions can be anticipated when calling get() on the future from the + returned DescribeFeaturesResult: +

        +
      • TimeoutException + If the request timed out before the describe operation could finish.
      • +
      +

      +
      +
      Specified by:
      +
      describeFeatures in interface Admin
      +
      Parameters:
      +
      options - the options to use
      +
      Returns:
      +
      the DescribeFeaturesResult containing the result
      +
      +
      +
    • +
    • +
      +

      updateFeatures

      +
      public UpdateFeaturesResult updateFeatures(Map<String,FeatureUpdate> featureUpdates, + UpdateFeaturesOptions options)
      +
      Description copied from interface: Admin
      +
      Applies specified updates to finalized features. This operation is not transactional so some + updates may succeed while the rest may fail. +

      + The API takes in a map of finalized feature names to FeatureUpdate that needs to be + applied. Each entry in the map specifies the finalized feature to be added or updated or + deleted, along with the new max feature version level value. This request is issued only to + the controller since the API is only served by the controller. The return value contains an + error code for each supplied FeatureUpdate, and the code indicates if the update + succeeded or failed in the controller. +

      +

      + The following exceptions can be anticipated when calling get() on the futures + obtained from the returned UpdateFeaturesResult: +

      +

      + This operation is supported by brokers with version 2.7.0 or higher.

      +
      +
      Specified by:
      +
      updateFeatures in interface Admin
      +
      Parameters:
      +
      featureUpdates - the map of finalized feature name to FeatureUpdate
      +
      options - the options to use
      +
      Returns:
      +
      the UpdateFeaturesResult containing the result
      +
      +
      +
    • +
    • +
      +

      describeMetadataQuorum

      +
      public DescribeMetadataQuorumResult describeMetadataQuorum(DescribeMetadataQuorumOptions options)
      +
      Description copied from interface: Admin
      +
      Describes the state of the metadata quorum. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from + the returned DescribeMetadataQuorumResult: +

      +
      +
      Specified by:
      +
      describeMetadataQuorum in interface Admin
      +
      Parameters:
      +
      options - The DescribeMetadataQuorumOptions to use when describing the quorum.
      +
      Returns:
      +
      the DescribeMetadataQuorumResult containing the result
      +
      +
      +
    • +
    • +
      +

      unregisterBroker

      +
      public UnregisterBrokerResult unregisterBroker(int brokerId, + UnregisterBrokerOptions options)
      +
      Description copied from interface: Admin
      +
      Unregister a broker. +

      + This operation does not have any effect on partition assignments. + + The following exceptions can be anticipated when calling get() on the future from the + returned UnregisterBrokerResult: +

      +

      +
      +
      Specified by:
      +
      unregisterBroker in interface Admin
      +
      Parameters:
      +
      brokerId - the broker id to unregister.
      +
      options - the options to use.
      +
      Returns:
      +
      the UnregisterBrokerResult containing the result
      +
      +
      +
    • +
    • +
      +

      describeProducers

      +
      public DescribeProducersResult describeProducers(Collection<TopicPartition> partitions, + DescribeProducersOptions options)
      +
      Description copied from interface: Admin
      +
      Describe active producer state on a set of topic partitions. Unless a specific broker + is requested through DescribeProducersOptions.brokerId(int), this will + query the partition leader to find the producer state.
      +
      +
      Specified by:
      +
      describeProducers in interface Admin
      +
      Parameters:
      +
      partitions - The set of partitions to query
      +
      options - Options to control the method behavior
      +
      Returns:
      +
      The result
      +
      +
      +
    • +
    • +
      +

      describeTransactions

      +
      public DescribeTransactionsResult describeTransactions(Collection<String> transactionalIds, + DescribeTransactionsOptions options)
      +
      Description copied from interface: Admin
      +
      Describe the state of a set of transactional IDs from the respective transaction coordinators, + which are dynamically discovered.
      +
      +
      Specified by:
      +
      describeTransactions in interface Admin
      +
      Parameters:
      +
      transactionalIds - The set of transactional IDs to query
      +
      options - Options to control the method behavior
      +
      Returns:
      +
      The result
      +
      +
      +
    • +
    • +
      +

      abortTransaction

      +
      public AbortTransactionResult abortTransaction(AbortTransactionSpec spec, + AbortTransactionOptions options)
      +
      Description copied from interface: Admin
      +
      Forcefully abort a transaction which is open on a topic partition. This will + send a `WriteTxnMarkers` request to the partition leader in order to abort the + transaction. This requires administrative privileges.
      +
      +
      Specified by:
      +
      abortTransaction in interface Admin
      +
      Parameters:
      +
      spec - The transaction specification including topic partition and producer details
      +
      options - Options to control the method behavior (including filters)
      +
      Returns:
      +
      The result
      +
      +
      +
    • +
    • +
      +

      forceTerminateTransaction

      +
      public TerminateTransactionResult forceTerminateTransaction(String transactionalId, + TerminateTransactionOptions options)
      +
      Description copied from interface: Admin
      +
      Force terminate a transaction for the given transactional ID. + This operation aborts any ongoing transaction associated with the transactional ID. + It's similar to fenceProducers but only targets a single transactional ID to handle + long-running transactions when 2PC is enabled.
      +
      +
      Specified by:
      +
      forceTerminateTransaction in interface Admin
      +
      Parameters:
      +
      transactionalId - The ID of the transaction to terminate.
      +
      options - The options to use when terminating the transaction.
      +
      Returns:
      +
      The TerminateTransactionResult.
      +
      +
      +
    • +
    • +
      +

      listTransactions

      +
      public ListTransactionsResult listTransactions(ListTransactionsOptions options)
      +
      Description copied from interface: Admin
      +
      List active transactions in the cluster. This will query all potential transaction + coordinators in the cluster and collect the state of all transactions. Users + should typically attempt to reduce the size of the result set using + ListTransactionsOptions.filterProducerIds(Collection) or + ListTransactionsOptions.filterStates(Collection) or + ListTransactionsOptions.filterOnDuration(long).
      +
      +
      Specified by:
      +
      listTransactions in interface Admin
      +
      Parameters:
      +
      options - Options to control the method behavior (including filters)
      +
      Returns:
      +
      The result
      +
      +
      +
    • +
    • +
      +

      fenceProducers

      +
      public FenceProducersResult fenceProducers(Collection<String> transactionalIds, + FenceProducersOptions options)
      +
      Description copied from interface: Admin
      +
      Fence out all active producers that use any of the provided transactional IDs.
      +
      +
      Specified by:
      +
      fenceProducers in interface Admin
      +
      Parameters:
      +
      transactionalIds - The IDs of the producers to fence.
      +
      options - The options to use when fencing the producers.
      +
      Returns:
      +
      The FenceProducersResult.
      +
      +
      +
    • +
    • +
      +

      listConfigResources

      +
      public ListConfigResourcesResult listConfigResources(Set<ConfigResource.Type> configResourceTypes, + ListConfigResourcesOptions options)
      +
      Description copied from interface: Admin
      +
      List the configuration resources available in the cluster which matches config resource type. + If no config resource types are specified, all configuration resources will be listed.
      +
      +
      Specified by:
      +
      listConfigResources in interface Admin
      +
      Parameters:
      +
      configResourceTypes - The set of configuration resource types to list.
      +
      options - The options to use when listing the configuration resources.
      +
      Returns:
      +
      The ListConfigurationResourcesResult.
      +
      +
      +
    • +
    • +
      +

      listClientMetricsResources

      +
      public ListClientMetricsResourcesResult listClientMetricsResources(ListClientMetricsResourcesOptions options)
      +
      Description copied from interface: Admin
      +
      List the client metrics configuration resources available in the cluster.
      +
      +
      Specified by:
      +
      listClientMetricsResources in interface Admin
      +
      Parameters:
      +
      options - The options to use when listing the client metrics resources.
      +
      Returns:
      +
      The ListClientMetricsResourcesResult.
      +
      +
      +
    • +
    • +
      +

      clientInstanceId

      +
      public Uuid clientInstanceId(Duration timeout)
      +
      Description copied from interface: Admin
      +
      Determines the client's unique client instance ID used for telemetry. This ID is unique to + this specific client instance and will not change after it is initially generated. + The ID is useful for correlating client operations with telemetry sent to the broker and + to its eventual monitoring destinations. +

      + If telemetry is enabled, this will first require a connection to the cluster to generate + the unique client instance ID. This method waits up to timeout for the admin + client to complete the request. +

      + Client telemetry is controlled by the AdminClientConfig.ENABLE_METRICS_PUSH_CONFIG + configuration option.

      +
      +
      Specified by:
      +
      clientInstanceId in interface Admin
      +
      Parameters:
      +
      timeout - The maximum time to wait for admin client to determine its client instance ID. + The value must be non-negative. Specifying a timeout of zero means do not + wait for the initial request to complete if it hasn't already.
      +
      Returns:
      +
      The client's assigned instance id used for metrics collection.
      +
      +
      +
    • +
    • +
      +

      addRaftVoter

      +
      public AddRaftVoterResult addRaftVoter(int voterId, + Uuid voterDirectoryId, + Set<RaftVoterEndpoint> endpoints, + AddRaftVoterOptions options)
      +
      Description copied from interface: Admin
      +
      Add a new voter node to the KRaft metadata quorum.
      +
      +
      Specified by:
      +
      addRaftVoter in interface Admin
      +
      Parameters:
      +
      voterId - The node ID of the voter.
      +
      voterDirectoryId - The directory ID of the voter.
      +
      endpoints - The endpoints that the new voter has.
      +
      options - The options to use when adding the new voter node.
      +
      +
      +
    • +
    • +
      +

      removeRaftVoter

      +
      public RemoveRaftVoterResult removeRaftVoter(int voterId, + Uuid voterDirectoryId, + RemoveRaftVoterOptions options)
      +
      Description copied from interface: Admin
      +
      Remove a voter node from the KRaft metadata quorum.
      +
      +
      Specified by:
      +
      removeRaftVoter in interface Admin
      +
      Parameters:
      +
      voterId - The node ID of the voter.
      +
      voterDirectoryId - The directory ID of the voter.
      +
      options - The options to use when removing the voter node.
      +
      +
      +
    • +
    • +
      +

      describeShareGroups

      +
      public DescribeShareGroupsResult describeShareGroups(Collection<String> groupIds, + DescribeShareGroupsOptions options)
      +
      Description copied from interface: Admin
      +
      Describe some share groups in the cluster.
      +
      +
      Specified by:
      +
      describeShareGroups in interface Admin
      +
      Parameters:
      +
      groupIds - The IDs of the groups to describe.
      +
      options - The options to use when describing the groups.
      +
      Returns:
      +
      The DescribeShareGroupsResult.
      +
      +
      +
    • +
    • +
      +

      alterShareGroupOffsets

      +
      public AlterShareGroupOffsetsResult alterShareGroupOffsets(String groupId, + Map<TopicPartition,Long> offsets, + AlterShareGroupOffsetsOptions options)
      +
      Description copied from interface: Admin
      +
      Alters offsets for the specified group. In order to succeed, the group must be empty. + +

      This operation is not transactional, so it may succeed for some partitions while fail for others.

      +
      +
      Specified by:
      +
      alterShareGroupOffsets in interface Admin
      +
      Parameters:
      +
      groupId - The group for which to alter offsets.
      +
      offsets - A map of offsets by partition. Partitions not specified in the map are ignored.
      +
      options - The options to use when altering the offsets.
      +
      Returns:
      +
      The AlterShareGroupOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      listShareGroupOffsets

      +
      public ListShareGroupOffsetsResult listShareGroupOffsets(Map<String,ListShareGroupOffsetsSpec> groupSpecs, + ListShareGroupOffsetsOptions options)
      +
      Description copied from interface: Admin
      +
      List the share group offsets available in the cluster for the specified share groups.
      +
      +
      Specified by:
      +
      listShareGroupOffsets in interface Admin
      +
      Parameters:
      +
      groupSpecs - Map of share group ids to a spec that specifies the topic partitions of the group to list offsets for.
      +
      options - The options to use when listing the share group offsets.
      +
      Returns:
      +
      The ListShareGroupOffsetsResult
      +
      +
      +
    • +
    • +
      +

      deleteShareGroupOffsets

      +
      public DeleteShareGroupOffsetsResult deleteShareGroupOffsets(String groupId, + Set<String> topics, + DeleteShareGroupOffsetsOptions options)
      +
      Description copied from interface: Admin
      +
      Delete offsets for a set of topics in a share group.
      +
      +
      Specified by:
      +
      deleteShareGroupOffsets in interface Admin
      +
      Parameters:
      +
      groupId - The group for which to delete offsets.
      +
      topics - The topics for which to delete offsets.
      +
      options - The options to use when deleting offsets in a share group.
      +
      Returns:
      +
      The DeleteShareGroupOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      deleteShareGroups

      +
      public DeleteShareGroupsResult deleteShareGroups(Collection<String> groupIds, + DeleteShareGroupsOptions options)
      +
      Description copied from interface: Admin
      +
      Delete share groups from the cluster.
      +
      +
      Specified by:
      +
      deleteShareGroups in interface Admin
      +
      Parameters:
      +
      groupIds - Collection of share group ids which are to be deleted.
      +
      options - The options to use when deleting a share group.
      +
      Returns:
      +
      The DeleteShareGroupsResult.
      +
      +
      +
    • +
    • +
      +

      describeStreamsGroups

      +
      public DescribeStreamsGroupsResult describeStreamsGroups(Collection<String> groupIds, + DescribeStreamsGroupsOptions options)
      +
      Description copied from interface: Admin
      +
      Describe streams groups in the cluster.
      +
      +
      Specified by:
      +
      describeStreamsGroups in interface Admin
      +
      Parameters:
      +
      groupIds - The IDs of the groups to describe.
      +
      options - The options to use when describing the groups.
      +
      Returns:
      +
      The DescribeStreamsGroupsResult.
      +
      +
      +
    • +
    • +
      +

      listGroups

      +
      public ListGroupsResult listGroups(ListGroupsOptions options)
      +
      Description copied from interface: Admin
      +
      List the groups available in the cluster.
      +
      +
      Specified by:
      +
      listGroups in interface Admin
      +
      Parameters:
      +
      options - The options to use when listing the groups.
      +
      Returns:
      +
      The ListGroupsResult.
      +
      +
      +
    • +
    • +
      +

      describeClassicGroups

      +
      public DescribeClassicGroupsResult describeClassicGroups(Collection<String> groupIds, + DescribeClassicGroupsOptions options)
      +
      Description copied from interface: Admin
      +
      Describe some classic groups in the cluster.
      +
      +
      Specified by:
      +
      describeClassicGroups in interface Admin
      +
      Parameters:
      +
      groupIds - The IDs of the groups to describe.
      +
      options - The options to use when describing the groups.
      +
      Returns:
      +
      The DescribeClassicGroupsResult.
      +
      +
      +
    • +
    • +
      +

      registerMetricForSubscription

      +
      public void registerMetricForSubscription(KafkaMetric metric)
      +
      Description copied from interface: Admin
      +
      Add the provided application metric for subscription. + This metric will be added to this client's metrics + that are available for subscription and sent as + telemetry data to the broker. + The provided metric must map to an OTLP metric data point + type in the OpenTelemetry v1 metrics protobuf message types. + Specifically, the metric should be one of the following: +
        +
      • + `Sum`: Monotonic total count meter (Counter). Suitable for metrics like total number of X, e.g., total bytes sent. +
      • +
      • + `Gauge`: Non-monotonic current value meter (UpDownCounter). Suitable for metrics like current value of Y, e.g., current queue count. +
      • +
      + Metrics not matching these types are silently ignored. + Executing this method for a previously registered metric is a benign operation and results in updating that metrics entry.
      +
      +
      Specified by:
      +
      registerMetricForSubscription in interface Admin
      +
      Parameters:
      +
      metric - The application metric to register
      +
      +
      +
    • +
    • +
      +

      unregisterMetricFromSubscription

      +
      public void unregisterMetricFromSubscription(KafkaMetric metric)
      +
      Description copied from interface: Admin
      +
      Remove the provided application metric for subscription. + This metric is removed from this client's metrics + and will not be available for subscription any longer. + Executing this method with a metric that has not been registered is a + benign operation and does not result in any action taken (no-op).
      +
      +
      Specified by:
      +
      unregisterMetricFromSubscription in interface Admin
      +
      Parameters:
      +
      metric - The application metric to remove
      +
      +
      +
    • +
    • +
      +

      metrics

      +
      public Map<MetricName,? extends Metric> metrics()
      +
      Description copied from interface: Admin
      +
      Get the metrics kept by the adminClient
      +
      +
      Specified by:
      +
      metrics in interface Admin
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/GroupListing.html b/static/41/javadoc/org/apache/kafka/clients/admin/GroupListing.html new file mode 100644 index 000000000..53e5bfd08 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/GroupListing.html @@ -0,0 +1,291 @@ + + + + +GroupListing (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class GroupListing

+
+
java.lang.Object +
org.apache.kafka.clients.admin.GroupListing
+
+
+
+
@Evolving +public class GroupListing +extends Object
+
A listing of a group in the cluster.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      GroupListing

      +
      public GroupListing(String groupId, + Optional<GroupType> type, + String protocol, + Optional<GroupState> groupState)
      +
      Create an instance with the specified parameters.
      +
      +
      Parameters:
      +
      groupId - Group Id
      +
      type - Group type
      +
      protocol - Protocol
      +
      groupState - Group state
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      groupId

      +
      public String groupId()
      +
      The group Id.
      +
      +
      Returns:
      +
      Group Id
      +
      +
      +
    • +
    • +
      +

      type

      +
      public Optional<GroupType> type()
      +
      The type of the group. +

      + If the broker returns a group type which is not recognised, as might + happen when talking to a broker with a later version, the type will be + Optional.of(GroupType.UNKNOWN). If the broker is earlier than version 2.6.0, + the group type will not be available, and the type will be Optional.empty().

      +
      +
      Returns:
      +
      An Optional containing the type, if available
      +
      +
      +
    • +
    • +
      +

      protocol

      +
      public String protocol()
      +
      The protocol of the group.
      +
      +
      Returns:
      +
      The protocol
      +
      +
      +
    • +
    • +
      +

      groupState

      +
      public Optional<GroupState> groupState()
      +
      The group state. +

      + If the broker returns a group state which is not recognised, as might + happen when talking to a broker with a later version, the state will be + Optional.of(GroupState.UNKNOWN).

      +
      +
      Returns:
      +
      An Optional containing the state, if available.
      +
      +
      +
    • +
    • +
      +

      isSimpleConsumerGroup

      +
      public boolean isSimpleConsumerGroup()
      +
      If the group is a simple consumer group or not.
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/KafkaAdminClient.html b/static/41/javadoc/org/apache/kafka/clients/admin/KafkaAdminClient.html new file mode 100644 index 000000000..d0721ab2a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/KafkaAdminClient.html @@ -0,0 +1,1996 @@ + + + + +KafkaAdminClient (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class KafkaAdminClient

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AdminClient +
org.apache.kafka.clients.admin.KafkaAdminClient
+
+
+
+
+
All Implemented Interfaces:
+
AutoCloseable, Admin
+
+
+
public class KafkaAdminClient +extends AdminClient
+
The default implementation of Admin. An instance of this class is created by invoking one of the + create() methods in AdminClient. Users should not refer to this class directly. + +

+ This class is thread-safe. +

+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      close

      +
      public void close(Duration timeout)
      +
      Description copied from interface: Admin
      +
      Close the Admin client and release all associated resources. +

      + The close operation has a grace period during which current operations will be allowed to + complete, specified by the given duration. + New operations will not be accepted during the grace period. Once the grace period is over, + all operations that have not yet been completed will be aborted with a TimeoutException.

      +
      +
      Parameters:
      +
      timeout - The time to use for the wait time.
      +
      +
      +
    • +
    • +
      +

      createTopics

      +
      public CreateTopicsResult createTopics(Collection<NewTopic> newTopics, + CreateTopicsOptions options)
      +
      Description copied from interface: Admin
      +
      Create a batch of new topics. +

      + This operation is not transactional so it may succeed for some topics while fail for others. +

      + It may take several seconds after CreateTopicsResult returns + success for all the brokers to become aware that the topics have been created. + During this time, Admin.listTopics() and Admin.describeTopics(Collection) + may not return information about the new topics. +

      + This operation is supported by brokers with version 0.10.1.0 or higher. The validateOnly option is supported + from version 0.10.2.0.

      +
      +
      Parameters:
      +
      newTopics - The new topics to create.
      +
      options - The options to use when creating the new topics.
      +
      Returns:
      +
      The CreateTopicsResult.
      +
      +
      +
    • +
    • +
      +

      deleteTopics

      +
      public DeleteTopicsResult deleteTopics(TopicCollection topics, + DeleteTopicsOptions options)
      +
      Description copied from interface: Admin
      +
      Delete a batch of topics. +

      + This operation is not transactional so it may succeed for some topics while fail for others. +

      + It may take several seconds after the DeleteTopicsResult returns + success for all the brokers to become aware that the topics are gone. + During this time, Admin.listTopics() and Admin.describeTopics(Collection) + may continue to return information about the deleted topics. +

      + If delete.topic.enable is set to false on the brokers, an exception will be returned to the client indicating + that topic deletion is disabled. +

      + When using topic IDs, this operation is supported by brokers with inter-broker protocol 2.8 or higher. + When using topic names, this operation is supported by brokers with version 0.10.1.0 or higher.

      +
      +
      Parameters:
      +
      topics - The topics to delete.
      +
      options - The options to use when deleting the topics.
      +
      Returns:
      +
      The DeleteTopicsResult.
      +
      +
      +
    • +
    • +
      +

      listTopics

      +
      public ListTopicsResult listTopics(ListTopicsOptions options)
      +
      Description copied from interface: Admin
      +
      List the topics available in the cluster.
      +
      +
      Parameters:
      +
      options - The options to use when listing the topics.
      +
      Returns:
      +
      The ListTopicsResult.
      +
      +
      +
    • +
    • +
      +

      describeTopics

      +
      public DescribeTopicsResult describeTopics(TopicCollection topics, + DescribeTopicsOptions options)
      +
      Description copied from interface: Admin
      +
      Describe some topics in the cluster. + + When using topic IDs, this operation is supported by brokers with version 3.1.0 or higher.
      +
      +
      Parameters:
      +
      topics - The topics to describe.
      +
      options - The options to use when describing the topics.
      +
      Returns:
      +
      The DescribeTopicsResult.
      +
      +
      +
    • +
    • +
      +

      describeCluster

      +
      public DescribeClusterResult describeCluster(DescribeClusterOptions options)
      +
      Description copied from interface: Admin
      +
      Get information about the nodes in the cluster. +

      + To obtain broker cluster information, you must configure AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG. + To obtain controller cluster information, you must configure AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG.

      +
      +
      Parameters:
      +
      options - The options to use when getting information about the cluster.
      +
      Returns:
      +
      The DescribeClusterResult.
      +
      +
      +
    • +
    • +
      +

      describeAcls

      +
      public DescribeAclsResult describeAcls(AclBindingFilter filter, + DescribeAclsOptions options)
      +
      Description copied from interface: Admin
      +
      Lists access control lists (ACLs) according to the supplied filter. +

      + Note: it may take some time for changes made by createAcls or deleteAcls to be reflected + in the output of describeAcls. +

      + This operation is supported by brokers with version 0.11.0.0 or higher.

      +
      +
      Parameters:
      +
      filter - The filter to use.
      +
      options - The options to use when listing the ACLs.
      +
      Returns:
      +
      The DescribeAclsResult.
      +
      +
      +
    • +
    • +
      +

      createAcls

      +
      public CreateAclsResult createAcls(Collection<AclBinding> acls, + CreateAclsOptions options)
      +
      Description copied from interface: Admin
      +
      Creates access control lists (ACLs) which are bound to specific resources. +

      + This operation is not transactional so it may succeed for some ACLs while fail for others. +

      + If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but + no changes will be made. +

      + This operation is supported by brokers with version 0.11.0.0 or higher.

      +
      +
      Parameters:
      +
      acls - The ACLs to create
      +
      options - The options to use when creating the ACLs.
      +
      Returns:
      +
      The CreateAclsResult.
      +
      +
      +
    • +
    • +
      +

      deleteAcls

      +
      public DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, + DeleteAclsOptions options)
      +
      Description copied from interface: Admin
      +
      Deletes access control lists (ACLs) according to the supplied filters. +

      + This operation is not transactional so it may succeed for some ACLs while fail for others. +

      + This operation is supported by brokers with version 0.11.0.0 or higher.

      +
      +
      Parameters:
      +
      filters - The filters to use.
      +
      options - The options to use when deleting the ACLs.
      +
      Returns:
      +
      The DeleteAclsResult.
      +
      +
      +
    • +
    • +
      +

      describeConfigs

      +
      public DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, + DescribeConfigsOptions options)
      +
      Description copied from interface: Admin
      +
      Get the configuration for the specified resources. +

      + The returned configuration includes default values and the isDefault() method can be used to distinguish them + from user supplied values. +

      + The value of config entries where isSensitive() is true is always null so that sensitive information + is not disclosed. +

      + Config entries where isReadOnly() is true cannot be updated. +

      + The different behavior of nonexistent resource: +

      +

      + Note that you cannot describe broker configs or broker logger using AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG, + and you cannot describe controller configs or controller logger using AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG. +

      + This operation is supported by brokers with version 0.11.0.0 or higher.

      +
      +
      Parameters:
      +
      configResources - See relevant type ConfigResource.Type
      +
      options - The options to use when describing configs
      +
      Returns:
      +
      The DescribeConfigsResult
      +
      +
      +
    • +
    • +
      +

      incrementalAlterConfigs

      +
      public AlterConfigsResult incrementalAlterConfigs(Map<ConfigResource,Collection<AlterConfigOp>> configs, + AlterConfigsOptions options)
      +
      Description copied from interface: Admin
      +
      Incrementally update the configuration for the specified resources. +

      + Updates are not transactional so they may succeed for some resources while fail for others. The configs for + a particular resource are updated atomically. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from + the returned AlterConfigsResult: +

      +

      + This operation is supported by brokers with version 2.3.0 or higher.

      +
      +
      Parameters:
      +
      configs - The resources with their configs
      +
      options - The options to use when altering configs
      +
      Returns:
      +
      The AlterConfigsResult
      +
      +
      +
    • +
    • +
      +

      alterReplicaLogDirs

      +
      public AlterReplicaLogDirsResult alterReplicaLogDirs(Map<TopicPartitionReplica,String> replicaAssignment, + AlterReplicaLogDirsOptions options)
      +
      Description copied from interface: Admin
      +
      Change the log directory for the specified replicas. If the replica does not exist on the broker, the result + shows REPLICA_NOT_AVAILABLE for the given replica and the replica will be created in the given log directory on the + broker when it is created later. If the replica already exists on the broker, the replica will be moved to the given + log directory if it is not already there. For detailed result, inspect the returned AlterReplicaLogDirsResult instance. +

      + This operation is not transactional so it may succeed for some replicas while fail for others. +

      + This operation is supported by brokers with version 1.1.0 or higher.

      +
      +
      Parameters:
      +
      replicaAssignment - The replicas with their log directory absolute path
      +
      options - The options to use when changing replica dir
      +
      Returns:
      +
      The AlterReplicaLogDirsResult
      +
      +
      +
    • +
    • +
      +

      describeLogDirs

      +
      public DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers, + DescribeLogDirsOptions options)
      +
      Description copied from interface: Admin
      +
      Query the information of all log directories on the given set of brokers +

      + This operation is supported by brokers with version 1.0.0 or higher.

      +
      +
      Parameters:
      +
      brokers - A list of brokers
      +
      options - The options to use when querying log dir info
      +
      Returns:
      +
      The DescribeLogDirsResult
      +
      +
      +
    • +
    • +
      +

      describeReplicaLogDirs

      +
      public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection<TopicPartitionReplica> replicas, + DescribeReplicaLogDirsOptions options)
      +
      Description copied from interface: Admin
      +
      Query the replica log directory information for the specified replicas. +

      + This operation is supported by brokers with version 1.0.0 or higher.

      +
      +
      Parameters:
      +
      replicas - The replicas to query
      +
      options - The options to use when querying replica log dir info
      +
      Returns:
      +
      The DescribeReplicaLogDirsResult
      +
      +
      +
    • +
    • +
      +

      createPartitions

      +
      public CreatePartitionsResult createPartitions(Map<String,NewPartitions> newPartitions, + CreatePartitionsOptions options)
      +
      Description copied from interface: Admin
      +
      Increase the number of partitions of the topics given as the keys of newPartitions + according to the corresponding values. If partitions are increased for a topic that has a key, + the partition logic or ordering of the messages will be affected. +

      + This operation is not transactional so it may succeed for some topics while fail for others. +

      + It may take several seconds after this method returns + success for all the brokers to become aware that the partitions have been created. + During this time, Admin.describeTopics(Collection) + may not return information about the new partitions. +

      + This operation is supported by brokers with version 1.0.0 or higher. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from the + values() method of the returned CreatePartitionsResult +

      +
      +
      Parameters:
      +
      newPartitions - The topics which should have new partitions created, and corresponding parameters + for the created partitions.
      +
      options - The options to use when creating the new partitions.
      +
      Returns:
      +
      The CreatePartitionsResult.
      +
      +
      +
    • +
    • +
      +

      deleteRecords

      +
      public DeleteRecordsResult deleteRecords(Map<TopicPartition,RecordsToDelete> recordsToDelete, + DeleteRecordsOptions options)
      +
      Description copied from interface: Admin
      +
      Delete records whose offset is smaller than the given offset of the corresponding partition. +

      + This operation is supported by brokers with version 0.11.0.0 or higher.

      +
      +
      Parameters:
      +
      recordsToDelete - The topic partitions and related offsets from which records deletion starts.
      +
      options - The options to use when deleting records.
      +
      Returns:
      +
      The DeleteRecordsResult.
      +
      +
      +
    • +
    • +
      +

      createDelegationToken

      +
      public CreateDelegationTokenResult createDelegationToken(CreateDelegationTokenOptions options)
      +
      Description copied from interface: Admin
      +
      Create a Delegation Token. +

      + This operation is supported by brokers with version 1.1.0 or higher. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from the + delegationToken() method of the returned CreateDelegationTokenResult +

      +
      +
      Parameters:
      +
      options - The options to use when creating delegation token.
      +
      Returns:
      +
      The CreateDelegationTokenResult.
      +
      +
      +
    • +
    • +
      +

      renewDelegationToken

      +
      public RenewDelegationTokenResult renewDelegationToken(byte[] hmac, + RenewDelegationTokenOptions options)
      +
      Description copied from interface: Admin
      +
      Renew a Delegation Token. +

      + This operation is supported by brokers with version 1.1.0 or higher. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from the + expiryTimestamp() method of the returned RenewDelegationTokenResult +

      +
      +
      Parameters:
      +
      hmac - HMAC of the Delegation token
      +
      options - The options to use when renewing delegation token.
      +
      Returns:
      +
      The RenewDelegationTokenResult.
      +
      +
      +
    • +
    • +
      +

      expireDelegationToken

      +
      public ExpireDelegationTokenResult expireDelegationToken(byte[] hmac, + ExpireDelegationTokenOptions options)
      +
      Description copied from interface: Admin
      +
      Expire a Delegation Token. +

      + This operation is supported by brokers with version 1.1.0 or higher. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from the + expiryTimestamp() method of the returned ExpireDelegationTokenResult +

      +
      +
      Parameters:
      +
      hmac - HMAC of the Delegation token
      +
      options - The options to use when expiring delegation token.
      +
      Returns:
      +
      The ExpireDelegationTokenResult.
      +
      +
      +
    • +
    • +
      +

      describeDelegationToken

      +
      public DescribeDelegationTokenResult describeDelegationToken(DescribeDelegationTokenOptions options)
      +
      Description copied from interface: Admin
      +
      Describe the Delegation Tokens. +

      + This operation is supported by brokers with version 1.1.0 or higher. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from the + delegationTokens() method of the returned DescribeDelegationTokenResult +

      +
      +
      Parameters:
      +
      options - The options to use when describing delegation tokens.
      +
      Returns:
      +
      The DescribeDelegationTokenResult.
      +
      +
      +
    • +
    • +
      +

      listGroups

      +
      public ListGroupsResult listGroups(ListGroupsOptions options)
      +
      Description copied from interface: Admin
      +
      List the groups available in the cluster.
      +
      +
      Parameters:
      +
      options - The options to use when listing the groups.
      +
      Returns:
      +
      The ListGroupsResult.
      +
      +
      +
    • +
    • +
      +

      describeConsumerGroups

      +
      public DescribeConsumerGroupsResult describeConsumerGroups(Collection<String> groupIds, + DescribeConsumerGroupsOptions options)
      +
      Description copied from interface: Admin
      +
      Describe some consumer groups in the cluster.
      +
      +
      Parameters:
      +
      groupIds - The IDs of the groups to describe.
      +
      options - The options to use when describing the groups.
      +
      Returns:
      +
      The DescribeConsumerGroupsResult.
      +
      +
      +
    • +
    • +
      +

      listConsumerGroups

      +
      @Deprecated(since="4.1", + forRemoval=true) +public ListConsumerGroupsResult listConsumerGroups(ListConsumerGroupsOptions options)
      +
      Deprecated, for removal: This API element is subject to removal in a future version.
      +
      Description copied from interface: Admin
      +
      List the consumer groups available in the cluster.
      +
      +
      Parameters:
      +
      options - The options to use when listing the consumer groups.
      +
      Returns:
      +
      The ListConsumerGroupsResult.
      +
      +
      +
    • +
    • +
      +

      listConsumerGroupOffsets

      + +
      Description copied from interface: Admin
      +
      List the consumer group offsets available in the cluster for the specified consumer groups.
      +
      +
      Parameters:
      +
      groupSpecs - Map of consumer group ids to a spec that specifies the topic partitions of the group to list offsets for.
      +
      options - The options to use when listing the consumer group offsets.
      +
      Returns:
      +
      The ListConsumerGroupOffsetsResult
      +
      +
      +
    • +
    • +
      +

      listStreamsGroupOffsets

      + +
      Description copied from interface: Admin
      +
      List the streams group offsets available in the cluster for the specified streams groups. + + Note: this method effectively does the same as the corresponding consumer group method Admin.listConsumerGroupOffsets(java.lang.String, org.apache.kafka.clients.admin.ListConsumerGroupOffsetsOptions) does.
      +
      +
      Parameters:
      +
      groupSpecs - Map of streams group ids to a spec that specifies the topic partitions of the group to list offsets for.
      +
      options - The options to use when listing the streams group offsets.
      +
      Returns:
      +
      The ListStreamsGroupOffsetsResult
      +
      +
      +
    • +
    • +
      +

      deleteConsumerGroups

      +
      public DeleteConsumerGroupsResult deleteConsumerGroups(Collection<String> groupIds, + DeleteConsumerGroupsOptions options)
      +
      Description copied from interface: Admin
      +
      Delete consumer groups from the cluster.
      +
      +
      options - The options to use when deleting a consumer group.
      +
      Returns:
      +
      The DeleteConsumerGroupsResult.
      +
      +
      +
    • +
    • +
      +

      deleteStreamsGroups

      +
      public DeleteStreamsGroupsResult deleteStreamsGroups(Collection<String> groupIds, + DeleteStreamsGroupsOptions options)
      +
      Description copied from interface: Admin
      +
      Delete streams groups from the cluster. + + Note: this method effectively does the same as the corresponding consumer group method Admin.deleteConsumerGroups(java.util.Collection<java.lang.String>, org.apache.kafka.clients.admin.DeleteConsumerGroupsOptions) does.
      +
      +
      options - The options to use when deleting a streams group.
      +
      Returns:
      +
      The DeleteStreamsGroupsResult.
      +
      +
      +
    • +
    • +
      +

      deleteConsumerGroupOffsets

      +
      public DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets(String groupId, + Set<TopicPartition> partitions, + DeleteConsumerGroupOffsetsOptions options)
      +
      Description copied from interface: Admin
      +
      Delete committed offsets for a set of partitions in a consumer group. This will + succeed at the partition level only if the group is not actively subscribed + to the corresponding topic.
      +
      +
      options - The options to use when deleting offsets in a consumer group.
      +
      Returns:
      +
      The DeleteConsumerGroupOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      deleteStreamsGroupOffsets

      +
      public DeleteStreamsGroupOffsetsResult deleteStreamsGroupOffsets(String groupId, + Set<TopicPartition> partitions, + DeleteStreamsGroupOffsetsOptions options)
      +
      Description copied from interface: Admin
      +
      Delete committed offsets for a set of partitions in a streams group. This will + succeed at the partition level only if the group is not actively subscribed + to the corresponding topic. + + Note: this method effectively does the same as the corresponding consumer group method Admin.deleteConsumerGroupOffsets(java.lang.String, java.util.Set<org.apache.kafka.common.TopicPartition>, org.apache.kafka.clients.admin.DeleteConsumerGroupOffsetsOptions) does.
      +
      +
      options - The options to use when deleting offsets in a streams group.
      +
      Returns:
      +
      The DeleteStreamsGroupOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      describeShareGroups

      +
      public DescribeShareGroupsResult describeShareGroups(Collection<String> groupIds, + DescribeShareGroupsOptions options)
      +
      Description copied from interface: Admin
      +
      Describe some share groups in the cluster.
      +
      +
      Parameters:
      +
      groupIds - The IDs of the groups to describe.
      +
      options - The options to use when describing the groups.
      +
      Returns:
      +
      The DescribeShareGroupsResult.
      +
      +
      +
    • +
    • +
      +

      alterShareGroupOffsets

      +
      public AlterShareGroupOffsetsResult alterShareGroupOffsets(String groupId, + Map<TopicPartition,Long> offsets, + AlterShareGroupOffsetsOptions options)
      +
      Description copied from interface: Admin
      +
      Alters offsets for the specified group. In order to succeed, the group must be empty. + +

      This operation is not transactional, so it may succeed for some partitions while fail for others.

      +
      +
      Parameters:
      +
      groupId - The group for which to alter offsets.
      +
      offsets - A map of offsets by partition. Partitions not specified in the map are ignored.
      +
      options - The options to use when altering the offsets.
      +
      Returns:
      +
      The AlterShareGroupOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      listShareGroupOffsets

      +
      public ListShareGroupOffsetsResult listShareGroupOffsets(Map<String,ListShareGroupOffsetsSpec> groupSpecs, + ListShareGroupOffsetsOptions options)
      +
      Description copied from interface: Admin
      +
      List the share group offsets available in the cluster for the specified share groups.
      +
      +
      Parameters:
      +
      groupSpecs - Map of share group ids to a spec that specifies the topic partitions of the group to list offsets for.
      +
      options - The options to use when listing the share group offsets.
      +
      Returns:
      +
      The ListShareGroupOffsetsResult
      +
      +
      +
    • +
    • +
      +

      deleteShareGroupOffsets

      +
      public DeleteShareGroupOffsetsResult deleteShareGroupOffsets(String groupId, + Set<String> topics, + DeleteShareGroupOffsetsOptions options)
      +
      Description copied from interface: Admin
      +
      Delete offsets for a set of topics in a share group.
      +
      +
      Parameters:
      +
      groupId - The group for which to delete offsets.
      +
      topics - The topics for which to delete offsets.
      +
      options - The options to use when deleting offsets in a share group.
      +
      Returns:
      +
      The DeleteShareGroupOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      describeStreamsGroups

      +
      public DescribeStreamsGroupsResult describeStreamsGroups(Collection<String> groupIds, + DescribeStreamsGroupsOptions options)
      +
      Description copied from interface: Admin
      +
      Describe streams groups in the cluster.
      +
      +
      Parameters:
      +
      groupIds - The IDs of the groups to describe.
      +
      options - The options to use when describing the groups.
      +
      Returns:
      +
      The DescribeStreamsGroupsResult.
      +
      +
      +
    • +
    • +
      +

      describeClassicGroups

      +
      public DescribeClassicGroupsResult describeClassicGroups(Collection<String> groupIds, + DescribeClassicGroupsOptions options)
      +
      Description copied from interface: Admin
      +
      Describe some classic groups in the cluster.
      +
      +
      Parameters:
      +
      groupIds - The IDs of the groups to describe.
      +
      options - The options to use when describing the groups.
      +
      Returns:
      +
      The DescribeClassicGroupsResult.
      +
      +
      +
    • +
    • +
      +

      deleteShareGroups

      +
      public DeleteShareGroupsResult deleteShareGroups(Collection<String> groupIds, + DeleteShareGroupsOptions options)
      +
      Description copied from interface: Admin
      +
      Delete share groups from the cluster.
      +
      +
      Parameters:
      +
      groupIds - Collection of share group ids which are to be deleted.
      +
      options - The options to use when deleting a share group.
      +
      Returns:
      +
      The DeleteShareGroupsResult.
      +
      +
      +
    • +
    • +
      +

      metrics

      +
      public Map<MetricName,? extends Metric> metrics()
      +
      Description copied from interface: Admin
      +
      Get the metrics kept by the adminClient
      +
      +
    • +
    • +
      +

      electLeaders

      +
      public ElectLeadersResult electLeaders(ElectionType electionType, + Set<TopicPartition> topicPartitions, + ElectLeadersOptions options)
      +
      Description copied from interface: Admin
      +
      Elect a replica as leader for the given partitions, or for all partitions if the argument + to partitions is null. +

      + This operation is not transactional so it may succeed for some partitions while fail for others. +

      + It may take several seconds after this method returns success for all the brokers in the cluster + to become aware that the partitions have new leaders. During this time, + Admin.describeTopics(Collection) may not return information about the partitions' + new leaders. +

      + This operation is supported by brokers with version 2.2.0 or later if preferred election is use; + otherwise the brokers most be 2.4.0 or higher. +

      + The following exceptions can be anticipated when calling get() on the future obtained + from the returned ElectLeadersResult: +

      +
      +
      Parameters:
      +
      electionType - The type of election to conduct.
      +
      topicPartitions - The topics and partitions for which to conduct elections.
      +
      options - The options to use when electing the leaders.
      +
      Returns:
      +
      The ElectLeadersResult.
      +
      +
      +
    • +
    • +
      +

      alterPartitionReassignments

      + +
      Description copied from interface: Admin
      +
      Change the reassignments for one or more partitions. + Providing an empty Optional (e.g via Optional.empty()) will revert the reassignment for the associated partition. + +

      The following exceptions can be anticipated when calling get() on the futures obtained from + the returned AlterPartitionReassignmentsResult:

      +
      +
      +
      Parameters:
      +
      reassignments - The reassignments to add, modify, or remove. See NewPartitionReassignment.
      +
      options - The options to use.
      +
      Returns:
      +
      The result.
      +
      +
      +
    • +
    • +
      +

      listPartitionReassignments

      +
      public ListPartitionReassignmentsResult listPartitionReassignments(Optional<Set<TopicPartition>> partitions, + ListPartitionReassignmentsOptions options)
      +
      +
      Parameters:
      +
      partitions - the partitions we want to get reassignment for, or an empty optional if we want to get the reassignments for all partitions in the cluster
      +
      options - The options to use.
      +
      Returns:
      +
      The result.
      +
      +
      +
    • +
    • +
      +

      registerMetricForSubscription

      +
      public void registerMetricForSubscription(KafkaMetric metric)
      +
      Description copied from interface: Admin
      +
      Add the provided application metric for subscription. + This metric will be added to this client's metrics + that are available for subscription and sent as + telemetry data to the broker. + The provided metric must map to an OTLP metric data point + type in the OpenTelemetry v1 metrics protobuf message types. + Specifically, the metric should be one of the following: +
        +
      • + `Sum`: Monotonic total count meter (Counter). Suitable for metrics like total number of X, e.g., total bytes sent. +
      • +
      • + `Gauge`: Non-monotonic current value meter (UpDownCounter). Suitable for metrics like current value of Y, e.g., current queue count. +
      • +
      + Metrics not matching these types are silently ignored. + Executing this method for a previously registered metric is a benign operation and results in updating that metrics entry.
      +
      +
      Parameters:
      +
      metric - The application metric to register
      +
      +
      +
    • +
    • +
      +

      unregisterMetricFromSubscription

      +
      public void unregisterMetricFromSubscription(KafkaMetric metric)
      +
      Description copied from interface: Admin
      +
      Remove the provided application metric for subscription. + This metric is removed from this client's metrics + and will not be available for subscription any longer. + Executing this method with a metric that has not been registered is a + benign operation and does not result in any action taken (no-op).
      +
      +
      Parameters:
      +
      metric - The application metric to remove
      +
      +
      +
    • +
    • +
      +

      removeMembersFromConsumerGroup

      +
      public RemoveMembersFromConsumerGroupResult removeMembersFromConsumerGroup(String groupId, + RemoveMembersFromConsumerGroupOptions options)
      +
      Description copied from interface: Admin
      +
      Remove members from the consumer group by given member identities. +

      + For possible error codes, refer to LeaveGroupResponse.

      +
      +
      Parameters:
      +
      groupId - The ID of the group to remove member from.
      +
      options - The options to carry removing members' information.
      +
      Returns:
      +
      The MembershipChangeResult.
      +
      +
      +
    • +
    • +
      +

      alterConsumerGroupOffsets

      +
      public AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets(String groupId, + Map<TopicPartition,OffsetAndMetadata> offsets, + AlterConsumerGroupOffsetsOptions options)
      +
      Description copied from interface: Admin
      +

      Alters offsets for the specified group. In order to succeed, the group must be empty. + +

      This operation is not transactional so it may succeed for some partitions while fail for others.

      +
      +
      Parameters:
      +
      groupId - The group for which to alter offsets.
      +
      offsets - A map of offsets by partition with associated metadata. Partitions not specified in the map are ignored.
      +
      options - The options to use when altering the offsets.
      +
      Returns:
      +
      The AlterOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      alterStreamsGroupOffsets

      +
      public AlterStreamsGroupOffsetsResult alterStreamsGroupOffsets(String groupId, + Map<TopicPartition,OffsetAndMetadata> offsets, + AlterStreamsGroupOffsetsOptions options)
      +
      Description copied from interface: Admin
      +

      Alters offsets for the specified group. In order to succeed, the group must be empty. + +

      This operation is not transactional so it may succeed for some partitions while fail for others. + + Note: this method effectively does the same as the corresponding consumer group method Admin.alterConsumerGroupOffsets(java.lang.String, java.util.Map<org.apache.kafka.common.TopicPartition, org.apache.kafka.clients.consumer.OffsetAndMetadata>) does.

      +
      +
      Parameters:
      +
      groupId - The group for which to alter offsets.
      +
      offsets - A map of offsets by partition with associated metadata. Partitions not specified in the map are ignored.
      +
      options - The options to use when altering the offsets.
      +
      Returns:
      +
      The AlterOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      listOffsets

      +
      public ListOffsetsResult listOffsets(Map<TopicPartition,OffsetSpec> topicPartitionOffsets, + ListOffsetsOptions options)
      +
      Description copied from interface: Admin
      +

      List offset for the specified partitions. This operation enables to find + the beginning offset, end offset as well as the offset matching a timestamp in partitions.

      +
      +
      Parameters:
      +
      topicPartitionOffsets - The mapping from partition to the OffsetSpec to look up.
      +
      options - The options to use when retrieving the offsets
      +
      Returns:
      +
      The ListOffsetsResult.
      +
      +
      +
    • +
    • +
      +

      describeClientQuotas

      +
      public DescribeClientQuotasResult describeClientQuotas(ClientQuotaFilter filter, + DescribeClientQuotasOptions options)
      +
      Description copied from interface: Admin
      +
      Describes all entities matching the provided filter that have at least one client quota configuration + value defined. +

      + The following exceptions can be anticipated when calling get() on the future from the + returned DescribeClientQuotasResult: +

      +

      + This operation is supported by brokers with version 2.6.0 or higher.

      +
      +
      Parameters:
      +
      filter - the filter to apply to match entities
      +
      options - the options to use
      +
      Returns:
      +
      the DescribeClientQuotasResult containing the result
      +
      +
      +
    • +
    • +
      +

      alterClientQuotas

      +
      public AlterClientQuotasResult alterClientQuotas(Collection<ClientQuotaAlteration> entries, + AlterClientQuotasOptions options)
      +
      Description copied from interface: Admin
      +
      Alters client quota configurations with the specified alterations. +

      + Alterations for a single entity are atomic, but across entities is not guaranteed. The resulting + per-entity error code should be evaluated to resolve the success or failure of all updates. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from + the returned AlterClientQuotasResult: +

        +
      • ClusterAuthorizationException + If the authenticated user didn't have alter access to the cluster.
      • +
      • InvalidRequestException + If the request details are invalid. e.g., a configuration key was specified more than once for an entity.
      • +
      • TimeoutException + If the request timed out before the alterations could finish. It cannot be guaranteed whether the update + succeed or not.
      • +
      +

      + This operation is supported by brokers with version 2.6.0 or higher.

      +
      +
      Parameters:
      +
      entries - the alterations to perform
      +
      Returns:
      +
      the AlterClientQuotasResult containing the result
      +
      +
      +
    • +
    • +
      +

      describeUserScramCredentials

      +
      public DescribeUserScramCredentialsResult describeUserScramCredentials(List<String> users, + DescribeUserScramCredentialsOptions options)
      +
      Description copied from interface: Admin
      +
      Describe SASL/SCRAM credentials. +

      + The following exceptions can be anticipated when calling get() on the futures from the + returned DescribeUserScramCredentialsResult: +

      +

      + This operation is supported by brokers with version 2.7.0 or higher.

      +
      +
      Parameters:
      +
      users - the users for which credentials are to be described; all users' credentials are described if null + or empty.
      +
      options - The options to use when describing the credentials
      +
      Returns:
      +
      The DescribeUserScramCredentialsResult.
      +
      +
      +
    • +
    • +
      +

      alterUserScramCredentials

      +
      public AlterUserScramCredentialsResult alterUserScramCredentials(List<UserScramCredentialAlteration> alterations, + AlterUserScramCredentialsOptions options)
      +
      Description copied from interface: Admin
      +
      Alter SASL/SCRAM credentials. + +

      + The following exceptions can be anticipated when calling get() any of the futures from the + returned AlterUserScramCredentialsResult: +

      +

      + This operation is supported by brokers with version 2.7.0 or higher.

      +
      +
      Parameters:
      +
      alterations - the alterations to be applied
      +
      options - The options to use when altering the credentials
      +
      Returns:
      +
      The AlterUserScramCredentialsResult.
      +
      +
      +
    • +
    • +
      +

      describeFeatures

      +
      public DescribeFeaturesResult describeFeatures(DescribeFeaturesOptions options)
      +
      Description copied from interface: Admin
      +
      Describes finalized as well as supported features. The request is issued to any random + broker. +

      + The following exceptions can be anticipated when calling get() on the future from the + returned DescribeFeaturesResult: +

        +
      • TimeoutException + If the request timed out before the describe operation could finish.
      • +
      +

      +
      +
      Parameters:
      +
      options - the options to use
      +
      Returns:
      +
      the DescribeFeaturesResult containing the result
      +
      +
      +
    • +
    • +
      +

      updateFeatures

      +
      public UpdateFeaturesResult updateFeatures(Map<String,FeatureUpdate> featureUpdates, + UpdateFeaturesOptions options)
      +
      Description copied from interface: Admin
      +
      Applies specified updates to finalized features. This operation is not transactional so some + updates may succeed while the rest may fail. +

      + The API takes in a map of finalized feature names to FeatureUpdate that needs to be + applied. Each entry in the map specifies the finalized feature to be added or updated or + deleted, along with the new max feature version level value. This request is issued only to + the controller since the API is only served by the controller. The return value contains an + error code for each supplied FeatureUpdate, and the code indicates if the update + succeeded or failed in the controller. +

      +

      + The following exceptions can be anticipated when calling get() on the futures + obtained from the returned UpdateFeaturesResult: +

      +

      + This operation is supported by brokers with version 2.7.0 or higher.

      +
      +
      Parameters:
      +
      featureUpdates - the map of finalized feature name to FeatureUpdate
      +
      options - the options to use
      +
      Returns:
      +
      the UpdateFeaturesResult containing the result
      +
      +
      +
    • +
    • +
      +

      describeMetadataQuorum

      +
      public DescribeMetadataQuorumResult describeMetadataQuorum(DescribeMetadataQuorumOptions options)
      +
      Description copied from interface: Admin
      +
      Describes the state of the metadata quorum. +

      + The following exceptions can be anticipated when calling get() on the futures obtained from + the returned DescribeMetadataQuorumResult: +

      +
      +
      Parameters:
      +
      options - The DescribeMetadataQuorumOptions to use when describing the quorum.
      +
      Returns:
      +
      the DescribeMetadataQuorumResult containing the result
      +
      +
      +
    • +
    • +
      +

      unregisterBroker

      +
      public UnregisterBrokerResult unregisterBroker(int brokerId, + UnregisterBrokerOptions options)
      +
      Description copied from interface: Admin
      +
      Unregister a broker. +

      + This operation does not have any effect on partition assignments. + + The following exceptions can be anticipated when calling get() on the future from the + returned UnregisterBrokerResult: +

      +

      +
      +
      Parameters:
      +
      brokerId - the broker id to unregister.
      +
      options - the options to use.
      +
      Returns:
      +
      the UnregisterBrokerResult containing the result
      +
      +
      +
    • +
    • +
      +

      describeProducers

      +
      public DescribeProducersResult describeProducers(Collection<TopicPartition> topicPartitions, + DescribeProducersOptions options)
      +
      Description copied from interface: Admin
      +
      Describe active producer state on a set of topic partitions. Unless a specific broker + is requested through DescribeProducersOptions.brokerId(int), this will + query the partition leader to find the producer state.
      +
      +
      Parameters:
      +
      topicPartitions - The set of partitions to query
      +
      options - Options to control the method behavior
      +
      Returns:
      +
      The result
      +
      +
      +
    • +
    • +
      +

      describeTransactions

      +
      public DescribeTransactionsResult describeTransactions(Collection<String> transactionalIds, + DescribeTransactionsOptions options)
      +
      Description copied from interface: Admin
      +
      Describe the state of a set of transactional IDs from the respective transaction coordinators, + which are dynamically discovered.
      +
      +
      Parameters:
      +
      transactionalIds - The set of transactional IDs to query
      +
      options - Options to control the method behavior
      +
      Returns:
      +
      The result
      +
      +
      +
    • +
    • +
      +

      abortTransaction

      +
      public AbortTransactionResult abortTransaction(AbortTransactionSpec spec, + AbortTransactionOptions options)
      +
      Description copied from interface: Admin
      +
      Forcefully abort a transaction which is open on a topic partition. This will + send a `WriteTxnMarkers` request to the partition leader in order to abort the + transaction. This requires administrative privileges.
      +
      +
      Parameters:
      +
      spec - The transaction specification including topic partition and producer details
      +
      options - Options to control the method behavior (including filters)
      +
      Returns:
      +
      The result
      +
      +
      +
    • +
    • +
      +

      forceTerminateTransaction

      +
      public TerminateTransactionResult forceTerminateTransaction(String transactionalId, + TerminateTransactionOptions options)
      +
      Forcefully terminates an ongoing transaction for a given transactional ID. +

      + This API is intended for well-formed but long-running transactions that are known to the + transaction coordinator. It is primarily designed for supporting 2PC (two-phase commit) workflows, + where a coordinator may need to unilaterally terminate a participant transaction that hasn't completed. +

      +
      +
      Parameters:
      +
      transactionalId - The transactional ID whose active transaction should be forcefully terminated.
      +
      options - The options to use when terminating the transaction.
      +
      Returns:
      +
      a TerminateTransactionResult that can be used to await the operation result.
      +
      +
      +
    • +
    • +
      +

      listTransactions

      +
      public ListTransactionsResult listTransactions(ListTransactionsOptions options)
      +
      Description copied from interface: Admin
      +
      List active transactions in the cluster. This will query all potential transaction + coordinators in the cluster and collect the state of all transactions. Users + should typically attempt to reduce the size of the result set using + ListTransactionsOptions.filterProducerIds(Collection) or + ListTransactionsOptions.filterStates(Collection) or + ListTransactionsOptions.filterOnDuration(long).
      +
      +
      Parameters:
      +
      options - Options to control the method behavior (including filters)
      +
      Returns:
      +
      The result
      +
      +
      +
    • +
    • +
      +

      fenceProducers

      +
      public FenceProducersResult fenceProducers(Collection<String> transactionalIds, + FenceProducersOptions options)
      +
      Description copied from interface: Admin
      +
      Fence out all active producers that use any of the provided transactional IDs.
      +
      +
      Parameters:
      +
      transactionalIds - The IDs of the producers to fence.
      +
      options - The options to use when fencing the producers.
      +
      Returns:
      +
      The FenceProducersResult.
      +
      +
      +
    • +
    • +
      +

      listConfigResources

      +
      public ListConfigResourcesResult listConfigResources(Set<ConfigResource.Type> configResourceTypes, + ListConfigResourcesOptions options)
      +
      Description copied from interface: Admin
      +
      List the configuration resources available in the cluster which matches config resource type. + If no config resource types are specified, all configuration resources will be listed.
      +
      +
      Parameters:
      +
      configResourceTypes - The set of configuration resource types to list.
      +
      options - The options to use when listing the configuration resources.
      +
      Returns:
      +
      The ListConfigurationResourcesResult.
      +
      +
      +
    • +
    • +
      +

      listClientMetricsResources

      +
      public ListClientMetricsResourcesResult listClientMetricsResources(ListClientMetricsResourcesOptions options)
      +
      Description copied from interface: Admin
      +
      List the client metrics configuration resources available in the cluster.
      +
      +
      Parameters:
      +
      options - The options to use when listing the client metrics resources.
      +
      Returns:
      +
      The ListClientMetricsResourcesResult.
      +
      +
      +
    • +
    • +
      +

      addRaftVoter

      +
      public AddRaftVoterResult addRaftVoter(int voterId, + Uuid voterDirectoryId, + Set<RaftVoterEndpoint> endpoints, + AddRaftVoterOptions options)
      +
      Description copied from interface: Admin
      +
      Add a new voter node to the KRaft metadata quorum.
      +
      +
      Parameters:
      +
      voterId - The node ID of the voter.
      +
      voterDirectoryId - The directory ID of the voter.
      +
      endpoints - The endpoints that the new voter has.
      +
      options - The options to use when adding the new voter node.
      +
      +
      +
    • +
    • +
      +

      removeRaftVoter

      +
      public RemoveRaftVoterResult removeRaftVoter(int voterId, + Uuid voterDirectoryId, + RemoveRaftVoterOptions options)
      +
      Description copied from interface: Admin
      +
      Remove a voter node from the KRaft metadata quorum.
      +
      +
      Parameters:
      +
      voterId - The node ID of the voter.
      +
      voterDirectoryId - The directory ID of the voter.
      +
      options - The options to use when removing the voter node.
      +
      +
      +
    • +
    • +
      +

      clientInstanceId

      +
      public Uuid clientInstanceId(Duration timeout)
      +
      Description copied from interface: Admin
      +
      Determines the client's unique client instance ID used for telemetry. This ID is unique to + this specific client instance and will not change after it is initially generated. + The ID is useful for correlating client operations with telemetry sent to the broker and + to its eventual monitoring destinations. +

      + If telemetry is enabled, this will first require a connection to the cluster to generate + the unique client instance ID. This method waits up to timeout for the admin + client to complete the request. +

      + Client telemetry is controlled by the AdminClientConfig.ENABLE_METRICS_PUSH_CONFIG + configuration option.

      +
      +
      Parameters:
      +
      timeout - The maximum time to wait for admin client to determine its client instance ID. + The value must be non-negative. Specifying a timeout of zero means do not + wait for the initial request to complete if it hasn't already.
      +
      Returns:
      +
      The client's assigned instance id used for metrics collection.
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListClientMetricsResourcesOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListClientMetricsResourcesOptions.html new file mode 100644 index 000000000..c4f211647 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListClientMetricsResourcesOptions.html @@ -0,0 +1,140 @@ + + + + +ListClientMetricsResourcesOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListClientMetricsResourcesOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<ListClientMetricsResourcesOptions> +
org.apache.kafka.clients.admin.ListClientMetricsResourcesOptions
+
+
+
+
+
@Deprecated(since="4.1") +public class ListClientMetricsResourcesOptions +extends AbstractOptions<ListClientMetricsResourcesOptions>
+
Deprecated. +
Since 4.1. Use ListConfigResourcesOptions instead.
+
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ListClientMetricsResourcesOptions

      +
      public ListClientMetricsResourcesOptions()
      +
      Deprecated.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListClientMetricsResourcesResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListClientMetricsResourcesResult.html new file mode 100644 index 000000000..f4e45e442 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListClientMetricsResourcesResult.html @@ -0,0 +1,147 @@ + + + + +ListClientMetricsResourcesResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListClientMetricsResourcesResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ListClientMetricsResourcesResult
+
+
+
+
@Deprecated(since="4.1") +public class ListClientMetricsResourcesResult +extends Object
+
Deprecated. +
Since 4.1. Use ListConfigResourcesResult instead.
+
+
The result of the Admin.listClientMetricsResources() call. +

+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      all

      + +
      Deprecated.
      +
      Returns a future that yields either an exception, or the full set of client metrics + listings. + + In the event of a failure, the future yields nothing but the first exception which + occurred.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListConfigResourcesOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListConfigResourcesOptions.html new file mode 100644 index 000000000..e7f2991ad --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListConfigResourcesOptions.html @@ -0,0 +1,133 @@ + + + + +ListConfigResourcesOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListConfigResourcesOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<ListConfigResourcesOptions> +
org.apache.kafka.clients.admin.ListConfigResourcesOptions
+
+
+
+
+
public class ListConfigResourcesOptions +extends AbstractOptions<ListConfigResourcesOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ListConfigResourcesOptions

      +
      public ListConfigResourcesOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListConfigResourcesResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListConfigResourcesResult.html new file mode 100644 index 000000000..623b2e3ec --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListConfigResourcesResult.html @@ -0,0 +1,139 @@ + + + + +ListConfigResourcesResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListConfigResourcesResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ListConfigResourcesResult
+
+
+
+
public class ListConfigResourcesResult +extends Object
+
The result of the Admin.listConfigResources() call. +

+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      all

      + +
      Returns a future that yields either an exception, or the full set of config resources. + + In the event of a failure, the future yields nothing but the first exception which + occurred.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsOptions.html new file mode 100644 index 000000000..34063465c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsOptions.html @@ -0,0 +1,177 @@ + + + + +ListConsumerGroupOffsetsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListConsumerGroupOffsetsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<ListConsumerGroupOffsetsOptions> +
org.apache.kafka.clients.admin.ListConsumerGroupOffsetsOptions
+
+
+
+
+
public class ListConsumerGroupOffsetsOptions +extends AbstractOptions<ListConsumerGroupOffsetsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ListConsumerGroupOffsetsOptions

      +
      public ListConsumerGroupOffsetsOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      requireStable

      +
      public ListConsumerGroupOffsetsOptions requireStable(boolean requireStable)
      +
      Sets an optional requireStable flag.
      +
      +
    • +
    • +
      +

      requireStable

      +
      public boolean requireStable()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsResult.html new file mode 100644 index 000000000..917f12052 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsResult.html @@ -0,0 +1,167 @@ + + + + +ListConsumerGroupOffsetsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListConsumerGroupOffsetsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ListConsumerGroupOffsetsResult
+
+
+
+
public class ListConsumerGroupOffsetsResult +extends Object
+ +
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      partitionsToOffsetAndMetadata

      +
      public KafkaFuture<Map<TopicPartition,OffsetAndMetadata>> partitionsToOffsetAndMetadata()
      +
      Return a future which yields a map of topic partitions to OffsetAndMetadata objects. + If the group does not have a committed offset for this partition, the corresponding value in the returned map will be null.
      +
      +
    • +
    • +
      +

      partitionsToOffsetAndMetadata

      +
      public KafkaFuture<Map<TopicPartition,OffsetAndMetadata>> partitionsToOffsetAndMetadata(String groupId)
      +
      Return a future which yields a map of topic partitions to OffsetAndMetadata objects for + the specified group. If the group doesn't have a committed offset for a specific + partition, the corresponding value in the returned map will be null.
      +
      +
    • +
    • +
      +

      all

      + +
      Return a future which yields all Map<String, Map<TopicPartition, OffsetAndMetadata> objects, + if requests for all the groups succeed.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsSpec.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsSpec.html new file mode 100644 index 000000000..3295f94d3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsSpec.html @@ -0,0 +1,221 @@ + + + + +ListConsumerGroupOffsetsSpec (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListConsumerGroupOffsetsSpec

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ListConsumerGroupOffsetsSpec
+
+
+
+
public class ListConsumerGroupOffsetsSpec +extends Object
+
Specification of consumer group offsets to list using Admin.listConsumerGroupOffsets(java.util.Map).
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ListConsumerGroupOffsetsSpec

      +
      public ListConsumerGroupOffsetsSpec()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      topicPartitions

      +
      public ListConsumerGroupOffsetsSpec topicPartitions(Collection<TopicPartition> topicPartitions)
      +
      Set the topic partitions whose offsets are to be listed for a consumer group. + null includes all topic partitions.
      +
      +
      Parameters:
      +
      topicPartitions - List of topic partitions to include
      +
      Returns:
      +
      This ListConsumerGroupOffsetSpec
      +
      +
      +
    • +
    • +
      +

      topicPartitions

      +
      public Collection<TopicPartition> topicPartitions()
      +
      Returns the topic partitions whose offsets are to be listed for a consumer group. + null indicates that offsets of all partitions of the group are to be listed.
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListConsumerGroupsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListConsumerGroupsOptions.html new file mode 100644 index 000000000..f24b1ffed --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListConsumerGroupsOptions.html @@ -0,0 +1,259 @@ + + + + +ListConsumerGroupsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListConsumerGroupsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<ListConsumerGroupsOptions> +
org.apache.kafka.clients.admin.ListConsumerGroupsOptions
+
+
+
+
+
@Deprecated(since="4.1") +public class ListConsumerGroupsOptions +extends AbstractOptions<ListConsumerGroupsOptions>
+
Deprecated. +
Since 4.1. Use Admin.listGroups(ListGroupsOptions) instead.
+
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ListConsumerGroupsOptions

      +
      public ListConsumerGroupsOptions()
      +
      Deprecated.
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      inGroupStates

      +
      public ListConsumerGroupsOptions inGroupStates(Set<GroupState> groupStates)
      +
      Deprecated.
      +
      If groupStates is set, only groups in these states will be returned by listGroups(). + Otherwise, all groups are returned. + This operation is supported by brokers with version 2.6.0 or later.
      +
      +
    • +
    • +
      +

      inStates

      + +
      Deprecated. +
      Since 4.0. Use inGroupStates(Set) instead.
      +
      +
      If states is set, only groups in these states will be returned by listConsumerGroups(). + Otherwise, all groups are returned. + This operation is supported by brokers with version 2.6.0 or later.
      +
      +
    • +
    • +
      +

      withTypes

      +
      public ListConsumerGroupsOptions withTypes(Set<GroupType> types)
      +
      Deprecated.
      +
      If types is set, only groups of these types will be returned by listConsumerGroups(). + Otherwise, all groups are returned.
      +
      +
    • +
    • +
      +

      groupStates

      +
      public Set<GroupState> groupStates()
      +
      Deprecated.
      +
      Returns the list of group states that are requested or empty if no states have been specified.
      +
      +
    • +
    • +
      +

      states

      +
      @Deprecated +public Set<ConsumerGroupState> states()
      +
      Deprecated. +
      Since 4.0. Use inGroupStates(Set) instead.
      +
      +
      Returns the list of States that are requested or empty if no states have been specified.
      +
      +
    • +
    • +
      +

      types

      +
      public Set<GroupType> types()
      +
      Deprecated.
      +
      Returns the list of group types that are requested or empty if no types have been specified.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListConsumerGroupsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListConsumerGroupsResult.html new file mode 100644 index 000000000..736818dd9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListConsumerGroupsResult.html @@ -0,0 +1,186 @@ + + + + +ListConsumerGroupsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListConsumerGroupsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ListConsumerGroupsResult
+
+
+
+
@Deprecated(since="4.1") +public class ListConsumerGroupsResult +extends Object
+
Deprecated. +
Since 4.1. Use Admin.listGroups(ListGroupsOptions) instead.
+
+
The result of the Admin.listConsumerGroups() call.
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      all

      + +
      Deprecated.
      +
      Returns a future that yields either an exception, or the full set of consumer group + listings. + + In the event of a failure, the future yields nothing but the first exception which + occurred.
      +
      +
    • +
    • +
      +

      valid

      + +
      Deprecated.
      +
      Returns a future which yields just the valid listings. + + This future never fails with an error, no matter what happens. Errors are completely + ignored. If nothing can be fetched, an empty collection is yielded. + If there is an error, but some results can be returned, this future will yield + those partial results. When using this future, it is a good idea to also check + the errors future so that errors can be displayed and handled.
      +
      +
    • +
    • +
      +

      errors

      +
      public KafkaFuture<Collection<Throwable>> errors()
      +
      Deprecated.
      +
      Returns a future which yields just the errors which occurred. + + If this future yields a non-empty collection, it is very likely that elements are + missing from the valid() set. + + This future itself never fails with an error. In the event of an error, this future + will successfully yield a collection containing at least one exception.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListGroupsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListGroupsOptions.html new file mode 100644 index 000000000..3e22bd615 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListGroupsOptions.html @@ -0,0 +1,273 @@ + + + + +ListGroupsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListGroupsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<ListGroupsOptions> +
org.apache.kafka.clients.admin.ListGroupsOptions
+
+
+
+
+
@Evolving +public class ListGroupsOptions +extends AbstractOptions<ListGroupsOptions>
+
Options for Admin.listGroups(). +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ListGroupsOptions

      +
      public ListGroupsOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      forConsumerGroups

      +
      public static ListGroupsOptions forConsumerGroups()
      +
      Only consumer groups will be returned by listGroups(). + This operation sets filters on group type and protocol type which select consumer groups.
      +
      +
    • +
    • +
      +

      forShareGroups

      +
      public static ListGroupsOptions forShareGroups()
      +
      Only share groups will be returned by listGroups(). + This operation sets a filter on group type which select share groups.
      +
      +
    • +
    • +
      +

      forStreamsGroups

      +
      public static ListGroupsOptions forStreamsGroups()
      +
      Only streams groups will be returned by listGroups(). + This operation sets a filter on group type which select streams groups.
      +
      +
    • +
    • +
      +

      inGroupStates

      +
      public ListGroupsOptions inGroupStates(Set<GroupState> groupStates)
      +
      If groupStates is set, only groups in these states will be returned by listGroups(). + Otherwise, all groups are returned. + This operation is supported by brokers with version 2.6.0 or later.
      +
      +
    • +
    • +
      +

      withProtocolTypes

      +
      public ListGroupsOptions withProtocolTypes(Set<String> protocolTypes)
      +
      If protocol types is set, only groups of these protocol types will be returned by listGroups(). + Otherwise, all groups are returned.
      +
      +
    • +
    • +
      +

      withTypes

      +
      public ListGroupsOptions withTypes(Set<GroupType> types)
      +
      If types is set, only groups of these types will be returned by listGroups(). + Otherwise, all groups are returned.
      +
      +
    • +
    • +
      +

      groupStates

      +
      public Set<GroupState> groupStates()
      +
      Returns the list of group states that are requested or empty if no states have been specified.
      +
      +
    • +
    • +
      +

      protocolTypes

      +
      public Set<String> protocolTypes()
      +
      Returns the list of protocol types that are requested or empty if no protocol types have been specified.
      +
      +
    • +
    • +
      +

      types

      +
      public Set<GroupType> types()
      +
      Returns the list of group types that are requested or empty if no types have been specified.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListGroupsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListGroupsResult.html new file mode 100644 index 000000000..fed1e7963 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListGroupsResult.html @@ -0,0 +1,177 @@ + + + + +ListGroupsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListGroupsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ListGroupsResult
+
+
+
+
@Evolving +public class ListGroupsResult +extends Object
+
The result of the Admin.listGroups() call. +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      all

      + +
      Returns a future that yields either an exception, or the full set of group listings. +

      + In the event of a failure, the future yields nothing but the first exception which + occurred.

      +
      +
    • +
    • +
      +

      valid

      + +
      Returns a future which yields just the valid listings. +

      + This future never fails with an error, no matter what happens. Errors are completely + ignored. If nothing can be fetched, an empty collection is yielded. + If there is an error, but some results can be returned, this future will yield + those partial results. When using this future, it is a good idea to also check + the errors future so that errors can be displayed and handled.

      +
      +
    • +
    • +
      +

      errors

      +
      public KafkaFuture<Collection<Throwable>> errors()
      +
      Returns a future which yields just the errors which occurred. +

      + If this future yields a non-empty collection, it is very likely that elements are + missing from the valid() set. +

      + This future itself never fails with an error. In the event of an error, this future + will successfully yield a collection containing at least one exception.

      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListOffsetsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListOffsetsOptions.html new file mode 100644 index 000000000..da6592ccf --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListOffsetsOptions.html @@ -0,0 +1,172 @@ + + + + +ListOffsetsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListOffsetsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<ListOffsetsOptions> +
org.apache.kafka.clients.admin.ListOffsetsOptions
+
+
+
+
+
public class ListOffsetsOptions +extends AbstractOptions<ListOffsetsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ListOffsetsOptions

      +
      public ListOffsetsOptions()
      +
      +
    • +
    • +
      +

      ListOffsetsOptions

      +
      public ListOffsetsOptions(IsolationLevel isolationLevel)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    + +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListOffsetsResult.ListOffsetsResultInfo.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListOffsetsResult.ListOffsetsResultInfo.html new file mode 100644 index 000000000..4cfe58126 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListOffsetsResult.ListOffsetsResultInfo.html @@ -0,0 +1,197 @@ + + + + +ListOffsetsResult.ListOffsetsResultInfo (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListOffsetsResult.ListOffsetsResultInfo

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo
+
+
+
+
Enclosing class:
+
ListOffsetsResult
+
+
+
public static class ListOffsetsResult.ListOffsetsResultInfo +extends Object
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ListOffsetsResultInfo

      +
      public ListOffsetsResultInfo(long offset, + long timestamp, + Optional<Integer> leaderEpoch)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      offset

      +
      public long offset()
      +
      +
    • +
    • +
      +

      timestamp

      +
      public long timestamp()
      +
      +
    • +
    • +
      +

      leaderEpoch

      +
      public Optional<Integer> leaderEpoch()
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListOffsetsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListOffsetsResult.html new file mode 100644 index 000000000..c59a0acf4 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListOffsetsResult.html @@ -0,0 +1,191 @@ + + + + +ListOffsetsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListOffsetsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ListOffsetsResult
+
+
+
+
public class ListOffsetsResult +extends Object
+
The result of the Admin.listOffsets(Map) call.
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListPartitionReassignmentsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListPartitionReassignmentsOptions.html new file mode 100644 index 000000000..66556a7fd --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListPartitionReassignmentsOptions.html @@ -0,0 +1,133 @@ + + + + +ListPartitionReassignmentsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListPartitionReassignmentsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<ListPartitionReassignmentsOptions> +
org.apache.kafka.clients.admin.ListPartitionReassignmentsOptions
+
+
+
+
+
public class ListPartitionReassignmentsOptions +extends AbstractOptions<ListPartitionReassignmentsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ListPartitionReassignmentsOptions

      +
      public ListPartitionReassignmentsOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListPartitionReassignmentsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListPartitionReassignmentsResult.html new file mode 100644 index 000000000..61723ec66 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListPartitionReassignmentsResult.html @@ -0,0 +1,137 @@ + + + + +ListPartitionReassignmentsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListPartitionReassignmentsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ListPartitionReassignmentsResult
+
+
+
+
public class ListPartitionReassignmentsResult +extends Object
+
The result of Admin.listPartitionReassignments(ListPartitionReassignmentsOptions). + + The API of this class is evolving. See AdminClient for details.
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListShareGroupOffsetsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListShareGroupOffsetsOptions.html new file mode 100644 index 000000000..545074584 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListShareGroupOffsetsOptions.html @@ -0,0 +1,136 @@ + + + + +ListShareGroupOffsetsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListShareGroupOffsetsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<ListShareGroupOffsetsOptions> +
org.apache.kafka.clients.admin.ListShareGroupOffsetsOptions
+
+
+
+
+
@Evolving +public class ListShareGroupOffsetsOptions +extends AbstractOptions<ListShareGroupOffsetsOptions>
+
Options for Admin.listShareGroupOffsets(Map, ListShareGroupOffsetsOptions). +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ListShareGroupOffsetsOptions

      +
      public ListShareGroupOffsetsOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListShareGroupOffsetsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListShareGroupOffsetsResult.html new file mode 100644 index 000000000..0a5044328 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListShareGroupOffsetsResult.html @@ -0,0 +1,155 @@ + + + + +ListShareGroupOffsetsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListShareGroupOffsetsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ListShareGroupOffsetsResult
+
+
+
+
@Evolving +public class ListShareGroupOffsetsResult +extends Object
+
The result of the Admin.listShareGroupOffsets(Map, ListShareGroupOffsetsOptions) call. +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      all

      + +
      Return the future when the requests for all groups succeed.
      +
      +
      Returns:
      +
      Future which yields all Map<String, Map<TopicPartition, Long>> objects, if requests for all the groups succeed.
      +
      +
      +
    • +
    • +
      +

      partitionsToOffsetAndMetadata

      +
      public KafkaFuture<Map<TopicPartition,OffsetAndMetadata>> partitionsToOffsetAndMetadata(String groupId)
      +
      Return a future which yields a map of topic partitions to offsets for the specified group. If the group doesn't + have a committed offset for a specific partition, the corresponding value in the returned map will be null.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListShareGroupOffsetsSpec.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListShareGroupOffsetsSpec.html new file mode 100644 index 000000000..bb2843b7f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListShareGroupOffsetsSpec.html @@ -0,0 +1,221 @@ + + + + +ListShareGroupOffsetsSpec (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListShareGroupOffsetsSpec

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ListShareGroupOffsetsSpec
+
+
+
+
@Evolving +public class ListShareGroupOffsetsSpec +extends Object
+
Specification of share group offsets to list using Admin.listShareGroupOffsets(Map, ListShareGroupOffsetsOptions). +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ListShareGroupOffsetsSpec

      +
      public ListShareGroupOffsetsSpec()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      topicPartitions

      +
      public ListShareGroupOffsetsSpec topicPartitions(Collection<TopicPartition> topicPartitions)
      +
      Set the topic partitions whose offsets are to be listed for a share group.
      +
      +
      Parameters:
      +
      topicPartitions - List of topic partitions to include
      +
      +
      +
    • +
    • +
      +

      topicPartitions

      +
      public Collection<TopicPartition> topicPartitions()
      +
      Returns the topic partitions whose offsets are to be listed for a share group. + null indicates that offsets of all partitions of the group are to be listed.
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsOptions.html new file mode 100644 index 000000000..67b1850ac --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsOptions.html @@ -0,0 +1,179 @@ + + + + +ListStreamsGroupOffsetsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListStreamsGroupOffsetsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<ListStreamsGroupOffsetsOptions> +
org.apache.kafka.clients.admin.ListStreamsGroupOffsetsOptions
+
+
+
+
+
@Evolving +public class ListStreamsGroupOffsetsOptions +extends AbstractOptions<ListStreamsGroupOffsetsOptions>
+
Options for Admin.listStreamsGroupOffsets(java.util.Map, ListStreamsGroupOffsetsOptions). +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ListStreamsGroupOffsetsOptions

      +
      public ListStreamsGroupOffsetsOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      requireStable

      +
      public ListStreamsGroupOffsetsOptions requireStable(boolean requireStable)
      +
      Sets an optional requireStable flag.
      +
      +
    • +
    • +
      +

      requireStable

      +
      public boolean requireStable()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsResult.html new file mode 100644 index 000000000..0d608f98b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsResult.html @@ -0,0 +1,151 @@ + + + + +ListStreamsGroupOffsetsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListStreamsGroupOffsetsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ListStreamsGroupOffsetsResult
+
+
+
+
@Evolving +public class ListStreamsGroupOffsetsResult +extends Object
+
The result of the Admin.listStreamsGroupOffsets(Map, ListStreamsGroupOffsetsOptions) call. +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      all

      + +
      Return a future which yields all Map<String, Map<TopicPartition, OffsetAndMetadata>> objects, if requests for all the groups succeed.
      +
      +
    • +
    • +
      +

      partitionsToOffsetAndMetadata

      +
      public KafkaFuture<Map<TopicPartition,OffsetAndMetadata>> partitionsToOffsetAndMetadata(String groupId)
      +
      Return a future which yields a map of topic partitions to offsets for the specified group. If the group doesn't + have a committed offset for a specific partition, the corresponding value in the returned map will be null.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsSpec.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsSpec.html new file mode 100644 index 000000000..f0d71d6db --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsSpec.html @@ -0,0 +1,131 @@ + + + + +ListStreamsGroupOffsetsSpec (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListStreamsGroupOffsetsSpec

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ListStreamsGroupOffsetsSpec
+
+
+
+
@Evolving +public class ListStreamsGroupOffsetsSpec +extends Object
+
Specification of streams group offsets to list using Admin.listStreamsGroupOffsets(Map, ListStreamsGroupOffsetsOptions). +

+ The API of this class is evolving, see Admin for details.

+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ListStreamsGroupOffsetsSpec

      +
      public ListStreamsGroupOffsetsSpec()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListTopicsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListTopicsOptions.html new file mode 100644 index 000000000..e70348b5e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListTopicsOptions.html @@ -0,0 +1,243 @@ + + + + +ListTopicsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListTopicsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<ListTopicsOptions> +
org.apache.kafka.clients.admin.ListTopicsOptions
+
+
+
+
+
public class ListTopicsOptions +extends AbstractOptions<ListTopicsOptions>
+
Options for Admin.listTopics().
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ListTopicsOptions

      +
      public ListTopicsOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      timeoutMs

      +
      public ListTopicsOptions timeoutMs(Integer timeoutMs)
      +
      Set the timeout in milliseconds for this operation or null if the default api timeout for the + AdminClient should be used.
      +
      +
      Overrides:
      +
      timeoutMs in class AbstractOptions<ListTopicsOptions>
      +
      +
      +
    • +
    • +
      +

      listInternal

      +
      public ListTopicsOptions listInternal(boolean listInternal)
      +
      Set whether we should list internal topics.
      +
      +
      Parameters:
      +
      listInternal - Whether we should list internal topics. null means to use + the default.
      +
      Returns:
      +
      This ListTopicsOptions object.
      +
      +
      +
    • +
    • +
      +

      shouldListInternal

      +
      public boolean shouldListInternal()
      +
      Return true if we should list internal topics.
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListTopicsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListTopicsResult.html new file mode 100644 index 000000000..7094ec245 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListTopicsResult.html @@ -0,0 +1,159 @@ + + + + +ListTopicsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListTopicsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ListTopicsResult
+
+
+
+
public class ListTopicsResult +extends Object
+
The result of the Admin.listTopics() call.
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      namesToListings

      +
      public KafkaFuture<Map<String,TopicListing>> namesToListings()
      +
      Return a future which yields a map of topic names to TopicListing objects.
      +
      +
    • +
    • +
      +

      listings

      +
      public KafkaFuture<Collection<TopicListing>> listings()
      +
      Return a future which yields a collection of TopicListing objects.
      +
      +
    • +
    • +
      +

      names

      +
      public KafkaFuture<Set<String>> names()
      +
      Return a future which yields a collection of topic names.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListTransactionsOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListTransactionsOptions.html new file mode 100644 index 000000000..1a2f4e09a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListTransactionsOptions.html @@ -0,0 +1,341 @@ + + + + +ListTransactionsOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListTransactionsOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<ListTransactionsOptions> +
org.apache.kafka.clients.admin.ListTransactionsOptions
+
+
+
+
+
public class ListTransactionsOptions +extends AbstractOptions<ListTransactionsOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ListTransactionsOptions

      +
      public ListTransactionsOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      filterStates

      +
      public ListTransactionsOptions filterStates(Collection<TransactionState> states)
      +
      Filter only the transactions that are in a specific set of states. If no filter + is specified or if the passed set of states is empty, then transactions in all + states will be returned.
      +
      +
      Parameters:
      +
      states - the set of states to filter by
      +
      Returns:
      +
      this object
      +
      +
      +
    • +
    • +
      +

      filterProducerIds

      +
      public ListTransactionsOptions filterProducerIds(Collection<Long> producerIdFilters)
      +
      Filter only the transactions from producers in a specific set of producerIds. + If no filter is specified or if the passed collection of producerIds is empty, + then the transactions of all producerIds will be returned.
      +
      +
      Parameters:
      +
      producerIdFilters - the set of producerIds to filter by
      +
      Returns:
      +
      this object
      +
      +
      +
    • +
    • +
      +

      filterOnDuration

      +
      public ListTransactionsOptions filterOnDuration(long durationMs)
      +
      Filter only the transactions that are running longer than the specified duration. + If no filter is specified or if the passed duration ms is less than 0, + then the all transactions will be returned.
      +
      +
      Parameters:
      +
      durationMs - the duration in milliseconds to filter by
      +
      Returns:
      +
      this object
      +
      +
      +
    • +
    • +
      +

      filterOnTransactionalIdPattern

      +
      public ListTransactionsOptions filterOnTransactionalIdPattern(String pattern)
      +
      Filter only the transactions that match with the given transactional ID pattern. + If the filter is null or if the passed string is empty, + then all the transactions will be returned.
      +
      +
      Parameters:
      +
      pattern - the transactional ID regular expression pattern to filter by
      +
      Returns:
      +
      this object
      +
      +
      +
    • +
    • +
      +

      filteredStates

      +
      public Set<TransactionState> filteredStates()
      +
      Returns the set of states to be filtered or empty if no states have been specified.
      +
      +
      Returns:
      +
      the current set of filtered states (empty means that no states are filtered and + all transactions will be returned)
      +
      +
      +
    • +
    • +
      +

      filteredProducerIds

      +
      public Set<Long> filteredProducerIds()
      +
      Returns the set of producerIds that are being filtered or empty if none have been specified.
      +
      +
      Returns:
      +
      the current set of filtered states (empty means that no producerIds are filtered and + all transactions will be returned)
      +
      +
      +
    • +
    • +
      +

      filteredDuration

      +
      public long filteredDuration()
      +
      Returns the duration ms value being filtered.
      +
      +
      Returns:
      +
      the current duration filter value in ms (negative value means transactions are not filtered by duration)
      +
      +
      +
    • +
    • +
      +

      filteredTransactionalIdPattern

      +
      public String filteredTransactionalIdPattern()
      +
      Returns transactional ID being filtered.
      +
      +
      Returns:
      +
      the current transactional ID pattern filter (empty means no transactional IDs are filtered and all + transactions will be returned)
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ListTransactionsResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/ListTransactionsResult.html new file mode 100644 index 000000000..2a0948c63 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ListTransactionsResult.html @@ -0,0 +1,185 @@ + + + + +ListTransactionsResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ListTransactionsResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ListTransactionsResult
+
+
+
+
public class ListTransactionsResult +extends Object
+
The result of the Admin.listTransactions() call. +

+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      all

      + +
      Get all transaction listings. If any of the underlying requests fail, then the future + returned from this method will also fail with the first encountered error.
      +
      +
      Returns:
      +
      A future containing the collection of transaction listings. The future completes + when all transaction listings are available and fails after any non-retriable error.
      +
      +
      +
    • +
    • +
      +

      byBrokerId

      + +
      Get a future which returns a map containing the underlying listing future for each broker + in the cluster. This is useful, for example, if a partial listing of transactions is + sufficient, or if you want more granular error details.
      +
      +
      Returns:
      +
      A future containing a map of futures by broker which complete individually when + their respective transaction listings are available. The top-level future returned + from this method may fail if the admin client is unable to lookup the available + brokers in the cluster.
      +
      +
      +
    • +
    • +
      +

      allByBrokerId

      + +
      Get all transaction listings in a map which is keyed by the ID of respective broker + that is currently managing them. If any of the underlying requests fail, then the future + returned from this method will also fail with the first encountered error.
      +
      +
      Returns:
      +
      A future containing a map from the broker ID to the transactions hosted by that + broker respectively. This future completes when all transaction listings are + available and fails after any non-retriable error.
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/LogDirDescription.html b/static/41/javadoc/org/apache/kafka/clients/admin/LogDirDescription.html new file mode 100644 index 000000000..13075620b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/LogDirDescription.html @@ -0,0 +1,235 @@ + + + + +LogDirDescription (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class LogDirDescription

+
+
java.lang.Object +
org.apache.kafka.clients.admin.LogDirDescription
+
+
+
+
public class LogDirDescription +extends Object
+
A description of a log directory on a particular broker.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    + +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      error

      +
      public ApiException error()
      +
      Returns `ApiException` if the log directory is offline or an error occurred, otherwise returns null. +
        +
      • KafkaStorageException - The log directory is offline. +
      • UnknownServerException - The server experienced an unexpected error when processing the request. +
      +
      +
    • +
    • +
      +

      replicaInfos

      +
      public Map<TopicPartition,ReplicaInfo> replicaInfos()
      +
      A map from topic partition to replica information for that partition + in this log directory.
      +
      +
    • +
    • +
      +

      totalBytes

      +
      public OptionalLong totalBytes()
      +
      The total size of the volume this log directory is on or empty if the broker did not return a value. + For volumes larger than Long.MAX_VALUE, Long.MAX_VALUE is returned.
      +
      +
    • +
    • +
      +

      usableBytes

      +
      public OptionalLong usableBytes()
      +
      The usable size on the volume this log directory is on or empty if the broker did not return a value. + For usable sizes larger than Long.MAX_VALUE, Long.MAX_VALUE is returned.
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/MemberAssignment.html b/static/41/javadoc/org/apache/kafka/clients/admin/MemberAssignment.html new file mode 100644 index 000000000..1880c91e9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/MemberAssignment.html @@ -0,0 +1,208 @@ + + + + +MemberAssignment (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class MemberAssignment

+
+
java.lang.Object +
org.apache.kafka.clients.admin.MemberAssignment
+
+
+
+
public class MemberAssignment +extends Object
+
A description of the assignments of a specific group member.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      MemberAssignment

      +
      public MemberAssignment(Set<TopicPartition> topicPartitions)
      +
      Creates an instance with the specified parameters.
      +
      +
      Parameters:
      +
      topicPartitions - List of topic partitions
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      topicPartitions

      +
      public Set<TopicPartition> topicPartitions()
      +
      The topic partitions assigned to a group member.
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/MemberDescription.html b/static/41/javadoc/org/apache/kafka/clients/admin/MemberDescription.html new file mode 100644 index 000000000..84ae4d4c6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/MemberDescription.html @@ -0,0 +1,377 @@ + + + + +MemberDescription (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class MemberDescription

+
+
java.lang.Object +
org.apache.kafka.clients.admin.MemberDescription
+
+
+
+
public class MemberDescription +extends Object
+
A detailed description of a single group member in the cluster.
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/MemberToRemove.html b/static/41/javadoc/org/apache/kafka/clients/admin/MemberToRemove.html new file mode 100644 index 000000000..2efdbab7f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/MemberToRemove.html @@ -0,0 +1,185 @@ + + + + +MemberToRemove (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class MemberToRemove

+
+
java.lang.Object +
org.apache.kafka.clients.admin.MemberToRemove
+
+
+
+
public class MemberToRemove +extends Object
+
A struct containing information about the member to be removed.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      MemberToRemove

      +
      public MemberToRemove(String groupInstanceId)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      groupInstanceId

      +
      public String groupInstanceId()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/NewPartitionReassignment.html b/static/41/javadoc/org/apache/kafka/clients/admin/NewPartitionReassignment.html new file mode 100644 index 000000000..3a06db42e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/NewPartitionReassignment.html @@ -0,0 +1,163 @@ + + + + +NewPartitionReassignment (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class NewPartitionReassignment

+
+
java.lang.Object +
org.apache.kafka.clients.admin.NewPartitionReassignment
+
+
+
+
public class NewPartitionReassignment +extends Object
+
A new partition reassignment, which can be applied via Admin.alterPartitionReassignments(Map, AlterPartitionReassignmentsOptions).
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    + +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      targetReplicas

      +
      public List<Integer> targetReplicas()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/NewPartitions.html b/static/41/javadoc/org/apache/kafka/clients/admin/NewPartitions.html new file mode 100644 index 000000000..df744fa8b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/NewPartitions.html @@ -0,0 +1,215 @@ + + + + +NewPartitions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class NewPartitions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.NewPartitions
+
+
+
+
public class NewPartitions +extends Object
+
Describes new partitions for a particular topic in a call to Admin.createPartitions(Map).
+
+
+
    + +
  • +
    +

    Method Summary

    +
    +
    +
    +
    +
    Modifier and Type
    +
    Method
    +
    Description
    + + +
    +
    The replica assignments for the new partitions, or null if the assignment will be done by the controller.
    +
    + +
    increaseTo(int totalCount)
    +
    +
    Increase the partition count for a topic to the given totalCount.
    +
    + +
    increaseTo(int totalCount, + List<List<Integer>> newAssignments)
    +
    +
    Increase the partition count for a topic to the given totalCount + assigning the new partitions according to the given newAssignments.
    +
    + + +
     
    +
    int
    + +
    +
    The total number of partitions after the operation succeeds.
    +
    +
    +
    +
    +
    +

    Methods inherited from class java.lang.Object

    +equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
    +
    +
  • +
+
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      increaseTo

      +
      public static NewPartitions increaseTo(int totalCount)
      +
      Increase the partition count for a topic to the given totalCount. + The assignment of new replicas to brokers will be decided by the broker.
      +
      +
      Parameters:
      +
      totalCount - The total number of partitions after the operation succeeds.
      +
      +
      +
    • +
    • +
      +

      increaseTo

      +
      public static NewPartitions increaseTo(int totalCount, + List<List<Integer>> newAssignments)
      +

      Increase the partition count for a topic to the given totalCount + assigning the new partitions according to the given newAssignments. + The length of the given newAssignments should equal totalCount - oldCount, since + the assignment of existing partitions are not changed. + Each inner list of newAssignments should have a length equal to + the topic's replication factor. + The first broker id in each inner list is the "preferred replica".

      + +

      For example, suppose a topic currently has a replication factor of 2, and + has 3 partitions. The number of partitions can be increased to 6 using a + NewPartition constructed like this:

      + +
      
      + NewPartitions.increaseTo(6, asList(asList(1, 2),
      +                                    asList(2, 3),
      +                                    asList(3, 1)))
      + 
      +

      In this example partition 3's preferred leader will be broker 1, partition 4's preferred leader will be + broker 2 and partition 5's preferred leader will be broker 3.

      +
      +
      Parameters:
      +
      totalCount - The total number of partitions after the operation succeeds.
      +
      newAssignments - The replica assignments for the new partitions.
      +
      +
      +
    • +
    • +
      +

      totalCount

      +
      public int totalCount()
      +
      The total number of partitions after the operation succeeds.
      +
      +
    • +
    • +
      +

      assignments

      +
      public List<List<Integer>> assignments()
      +
      The replica assignments for the new partitions, or null if the assignment will be done by the controller.
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/NewTopic.html b/static/41/javadoc/org/apache/kafka/clients/admin/NewTopic.html new file mode 100644 index 000000000..bd7ec2e16 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/NewTopic.html @@ -0,0 +1,314 @@ + + + + +NewTopic (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class NewTopic

+
+
java.lang.Object +
org.apache.kafka.clients.admin.NewTopic
+
+
+
+
public class NewTopic +extends Object
+
A new topic to be created via Admin.createTopics(Collection).
+
+
+
    + +
  • +
    +

    Constructor Summary

    +
    Constructors
    +
    +
    Constructor
    +
    Description
    +
    NewTopic(String name, + int numPartitions, + short replicationFactor)
    +
    +
    A new topic with the specified replication factor and number of partitions.
    +
    +
    NewTopic(String name, + Map<Integer,List<Integer>> replicasAssignments)
    +
    +
    A new topic with the specified replica assignment configuration.
    +
    +
    NewTopic(String name, + Optional<Integer> numPartitions, + Optional<Short> replicationFactor)
    +
    +
    A new topic that optionally defaults numPartitions and replicationFactor to + the broker configurations for num.partitions and default.replication.factor + respectively.
    +
    +
    +
    +
  • + +
  • +
    +

    Method Summary

    +
    +
    +
    +
    +
    Modifier and Type
    +
    Method
    +
    Description
    + + +
    +
    The configuration for the new topic or null if no configs ever specified.
    +
    + + +
    +
    Set the configuration to use on the new topic.
    +
    +
    boolean
    + +
     
    +
    int
    + +
     
    + + +
    +
    The name of the topic to be created.
    +
    +
    int
    + +
    +
    The number of partitions for the new topic or -1 if a replica assignment has been specified.
    +
    + + +
    +
    A map from partition id to replica ids (i.e.
    +
    +
    short
    + +
    +
    The replication factor for the new topic or -1 if a replica assignment has been specified.
    +
    + + +
     
    +
    +
    +
    +
    +

    Methods inherited from class java.lang.Object

    +getClass, notify, notifyAll, wait, wait, wait
    +
    +
  • +
+
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      NewTopic

      +
      public NewTopic(String name, + int numPartitions, + short replicationFactor)
      +
      A new topic with the specified replication factor and number of partitions.
      +
      +
    • +
    • +
      +

      NewTopic

      +
      public NewTopic(String name, + Optional<Integer> numPartitions, + Optional<Short> replicationFactor)
      +
      A new topic that optionally defaults numPartitions and replicationFactor to + the broker configurations for num.partitions and default.replication.factor + respectively.
      +
      +
    • +
    • +
      +

      NewTopic

      +
      public NewTopic(String name, + Map<Integer,List<Integer>> replicasAssignments)
      +
      A new topic with the specified replica assignment configuration.
      +
      +
      Parameters:
      +
      name - the topic name.
      +
      replicasAssignments - a map from partition id to replica ids (i.e. broker ids). Although not enforced, it is + generally a good idea for all partitions to have the same number of replicas. + The first replica will be treated as the preferred leader.
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      name

      +
      public String name()
      +
      The name of the topic to be created.
      +
      +
    • +
    • +
      +

      numPartitions

      +
      public int numPartitions()
      +
      The number of partitions for the new topic or -1 if a replica assignment has been specified.
      +
      +
    • +
    • +
      +

      replicationFactor

      +
      public short replicationFactor()
      +
      The replication factor for the new topic or -1 if a replica assignment has been specified.
      +
      +
    • +
    • +
      +

      replicasAssignments

      +
      public Map<Integer,List<Integer>> replicasAssignments()
      +
      A map from partition id to replica ids (i.e. broker ids) or null if the number of partitions and replication + factor have been specified instead.
      +
      +
    • +
    • +
      +

      configs

      +
      public NewTopic configs(Map<String,String> configs)
      +
      Set the configuration to use on the new topic.
      +
      +
      Parameters:
      +
      configs - The configuration map.
      +
      Returns:
      +
      This NewTopic object.
      +
      +
      +
    • +
    • +
      +

      configs

      +
      public Map<String,String> configs()
      +
      The configuration for the new topic or null if no configs ever specified.
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/OffsetSpec.EarliestLocalSpec.html b/static/41/javadoc/org/apache/kafka/clients/admin/OffsetSpec.EarliestLocalSpec.html new file mode 100644 index 000000000..baa3591fe --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/OffsetSpec.EarliestLocalSpec.html @@ -0,0 +1,145 @@ + + + + +OffsetSpec.EarliestLocalSpec (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class OffsetSpec.EarliestLocalSpec

+
+
java.lang.Object +
org.apache.kafka.clients.admin.OffsetSpec +
org.apache.kafka.clients.admin.OffsetSpec.EarliestLocalSpec
+
+
+
+
+
Enclosing class:
+
OffsetSpec
+
+
+
public static class OffsetSpec.EarliestLocalSpec +extends OffsetSpec
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      EarliestLocalSpec

      +
      public EarliestLocalSpec()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/OffsetSpec.EarliestSpec.html b/static/41/javadoc/org/apache/kafka/clients/admin/OffsetSpec.EarliestSpec.html new file mode 100644 index 000000000..847ce7475 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/OffsetSpec.EarliestSpec.html @@ -0,0 +1,145 @@ + + + + +OffsetSpec.EarliestSpec (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class OffsetSpec.EarliestSpec

+
+
java.lang.Object +
org.apache.kafka.clients.admin.OffsetSpec +
org.apache.kafka.clients.admin.OffsetSpec.EarliestSpec
+
+
+
+
+
Enclosing class:
+
OffsetSpec
+
+
+
public static class OffsetSpec.EarliestSpec +extends OffsetSpec
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      EarliestSpec

      +
      public EarliestSpec()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/OffsetSpec.LatestSpec.html b/static/41/javadoc/org/apache/kafka/clients/admin/OffsetSpec.LatestSpec.html new file mode 100644 index 000000000..1283e0fa9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/OffsetSpec.LatestSpec.html @@ -0,0 +1,145 @@ + + + + +OffsetSpec.LatestSpec (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class OffsetSpec.LatestSpec

+
+
java.lang.Object +
org.apache.kafka.clients.admin.OffsetSpec +
org.apache.kafka.clients.admin.OffsetSpec.LatestSpec
+
+
+
+
+
Enclosing class:
+
OffsetSpec
+
+
+
public static class OffsetSpec.LatestSpec +extends OffsetSpec
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      LatestSpec

      +
      public LatestSpec()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/OffsetSpec.LatestTieredSpec.html b/static/41/javadoc/org/apache/kafka/clients/admin/OffsetSpec.LatestTieredSpec.html new file mode 100644 index 000000000..13ad74cd2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/OffsetSpec.LatestTieredSpec.html @@ -0,0 +1,145 @@ + + + + +OffsetSpec.LatestTieredSpec (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class OffsetSpec.LatestTieredSpec

+
+
java.lang.Object +
org.apache.kafka.clients.admin.OffsetSpec +
org.apache.kafka.clients.admin.OffsetSpec.LatestTieredSpec
+
+
+
+
+
Enclosing class:
+
OffsetSpec
+
+
+
public static class OffsetSpec.LatestTieredSpec +extends OffsetSpec
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      LatestTieredSpec

      +
      public LatestTieredSpec()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/OffsetSpec.MaxTimestampSpec.html b/static/41/javadoc/org/apache/kafka/clients/admin/OffsetSpec.MaxTimestampSpec.html new file mode 100644 index 000000000..3861499ec --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/OffsetSpec.MaxTimestampSpec.html @@ -0,0 +1,145 @@ + + + + +OffsetSpec.MaxTimestampSpec (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class OffsetSpec.MaxTimestampSpec

+
+
java.lang.Object +
org.apache.kafka.clients.admin.OffsetSpec +
org.apache.kafka.clients.admin.OffsetSpec.MaxTimestampSpec
+
+
+
+
+
Enclosing class:
+
OffsetSpec
+
+
+
public static class OffsetSpec.MaxTimestampSpec +extends OffsetSpec
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      MaxTimestampSpec

      +
      public MaxTimestampSpec()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/OffsetSpec.TimestampSpec.html b/static/41/javadoc/org/apache/kafka/clients/admin/OffsetSpec.TimestampSpec.html new file mode 100644 index 000000000..39676eca8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/OffsetSpec.TimestampSpec.html @@ -0,0 +1,114 @@ + + + + +OffsetSpec.TimestampSpec (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class OffsetSpec.TimestampSpec

+
+
java.lang.Object +
org.apache.kafka.clients.admin.OffsetSpec +
org.apache.kafka.clients.admin.OffsetSpec.TimestampSpec
+
+
+
+
+
Enclosing class:
+
OffsetSpec
+
+
+
public static class OffsetSpec.TimestampSpec +extends OffsetSpec
+
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/OffsetSpec.html b/static/41/javadoc/org/apache/kafka/clients/admin/OffsetSpec.html new file mode 100644 index 000000000..aacd4b0dd --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/OffsetSpec.html @@ -0,0 +1,272 @@ + + + + +OffsetSpec (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class OffsetSpec

+
+
java.lang.Object +
org.apache.kafka.clients.admin.OffsetSpec
+
+
+
+
Direct Known Subclasses:
+
OffsetSpec.EarliestLocalSpec, OffsetSpec.EarliestSpec, OffsetSpec.LatestSpec, OffsetSpec.LatestTieredSpec, OffsetSpec.MaxTimestampSpec, OffsetSpec.TimestampSpec
+
+
+
public class OffsetSpec +extends Object
+
This class allows to specify the desired offsets when using KafkaAdminClient.listOffsets(Map, ListOffsetsOptions)
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      OffsetSpec

      +
      public OffsetSpec()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      latest

      +
      public static OffsetSpec latest()
      +
      Used to retrieve the latest offset of a partition
      +
      +
    • +
    • +
      +

      earliest

      +
      public static OffsetSpec earliest()
      +
      Used to retrieve the earliest offset of a partition
      +
      +
    • +
    • +
      +

      forTimestamp

      +
      public static OffsetSpec forTimestamp(long timestamp)
      +
      Used to retrieve the earliest offset whose timestamp is greater than + or equal to the given timestamp in the corresponding partition
      +
      +
      Parameters:
      +
      timestamp - in milliseconds
      +
      +
      +
    • +
    • +
      +

      maxTimestamp

      +
      public static OffsetSpec maxTimestamp()
      +
      Used to retrieve the offset with the largest timestamp of a partition + as message timestamps can be specified client side this may not match + the log end offset returned by LatestSpec
      +
      +
    • +
    • +
      +

      earliestLocal

      +
      public static OffsetSpec earliestLocal()
      +
      Used to retrieve the local log start offset. + Local log start offset is the offset of a log above which reads + are guaranteed to be served from the disk of the leader broker. +
      + Note: When tiered Storage is not enabled, it behaves the same as retrieving the earliest timestamp offset.
      +
      +
    • +
    • +
      +

      latestTiered

      +
      public static OffsetSpec latestTiered()
      +
      Used to retrieve the highest offset of data stored in remote storage. +
      + Note: When tiered storage is not enabled, we will return unknown offset.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/PartitionReassignment.html b/static/41/javadoc/org/apache/kafka/clients/admin/PartitionReassignment.html new file mode 100644 index 000000000..29df3bdeb --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/PartitionReassignment.html @@ -0,0 +1,205 @@ + + + + +PartitionReassignment (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class PartitionReassignment

+
+
java.lang.Object +
org.apache.kafka.clients.admin.PartitionReassignment
+
+
+
+
public class PartitionReassignment +extends Object
+
A partition reassignment, which has been listed via Admin.listPartitionReassignments().
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    + +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      replicas

      +
      public List<Integer> replicas()
      +
      The brokers which this partition currently resides on.
      +
      +
    • +
    • +
      +

      addingReplicas

      +
      public List<Integer> addingReplicas()
      +
      The brokers that we are adding this partition to as part of a reassignment. + A subset of replicas.
      +
      +
    • +
    • +
      +

      removingReplicas

      +
      public List<Integer> removingReplicas()
      +
      The brokers that we are removing this partition from as part of a reassignment. + A subset of replicas.
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ProducerState.html b/static/41/javadoc/org/apache/kafka/clients/admin/ProducerState.html new file mode 100644 index 000000000..000ea40df --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ProducerState.html @@ -0,0 +1,252 @@ + + + + +ProducerState (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ProducerState

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ProducerState
+
+
+
+
public class ProducerState +extends Object
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ProducerState

      +
      public ProducerState(long producerId, + int producerEpoch, + int lastSequence, + long lastTimestamp, + OptionalInt coordinatorEpoch, + OptionalLong currentTransactionStartOffset)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      producerId

      +
      public long producerId()
      +
      +
    • +
    • +
      +

      producerEpoch

      +
      public int producerEpoch()
      +
      +
    • +
    • +
      +

      lastSequence

      +
      public int lastSequence()
      +
      +
    • +
    • +
      +

      lastTimestamp

      +
      public long lastTimestamp()
      +
      +
    • +
    • +
      +

      currentTransactionStartOffset

      +
      public OptionalLong currentTransactionStartOffset()
      +
      +
    • +
    • +
      +

      coordinatorEpoch

      +
      public OptionalInt coordinatorEpoch()
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/QuorumInfo.Node.html b/static/41/javadoc/org/apache/kafka/clients/admin/QuorumInfo.Node.html new file mode 100644 index 000000000..8b38d88c9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/QuorumInfo.Node.html @@ -0,0 +1,183 @@ + + + + +QuorumInfo.Node (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class QuorumInfo.Node

+
+
java.lang.Object +
org.apache.kafka.clients.admin.QuorumInfo.Node
+
+
+
+
Enclosing class:
+
QuorumInfo
+
+
+
public static class QuorumInfo.Node +extends Object
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      nodeId

      +
      public int nodeId()
      +
      +
    • +
    • +
      +

      endpoints

      +
      public List<RaftVoterEndpoint> endpoints()
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/QuorumInfo.ReplicaState.html b/static/41/javadoc/org/apache/kafka/clients/admin/QuorumInfo.ReplicaState.html new file mode 100644 index 000000000..0e0c29211 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/QuorumInfo.ReplicaState.html @@ -0,0 +1,245 @@ + + + + +QuorumInfo.ReplicaState (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class QuorumInfo.ReplicaState

+
+
java.lang.Object +
org.apache.kafka.clients.admin.QuorumInfo.ReplicaState
+
+
+
+
Enclosing class:
+
QuorumInfo
+
+
+
public static class QuorumInfo.ReplicaState +extends Object
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      replicaId

      +
      public int replicaId()
      +
      Return the ID for this replica.
      +
      +
      Returns:
      +
      The ID for this replica
      +
      +
      +
    • +
    • +
      +

      replicaDirectoryId

      +
      public Uuid replicaDirectoryId()
      +
      Return the directory id of the replica if configured, or Uuid.ZERO_UUID if not.
      +
      +
    • +
    • +
      +

      logEndOffset

      +
      public long logEndOffset()
      +
      Return the logEndOffset known by the leader for this replica.
      +
      +
      Returns:
      +
      The logEndOffset for this replica
      +
      +
      +
    • +
    • +
      +

      lastFetchTimestamp

      +
      public OptionalLong lastFetchTimestamp()
      +
      Return the last millisecond timestamp that the leader received a + fetch from this replica.
      +
      +
      Returns:
      +
      The value of the lastFetchTime if known, empty otherwise
      +
      +
      +
    • +
    • +
      +

      lastCaughtUpTimestamp

      +
      public OptionalLong lastCaughtUpTimestamp()
      +
      Return the last millisecond timestamp at which this replica was known to be + caught up with the leader.
      +
      +
      Returns:
      +
      The value of the lastCaughtUpTime if known, empty otherwise
      +
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/QuorumInfo.html b/static/41/javadoc/org/apache/kafka/clients/admin/QuorumInfo.html new file mode 100644 index 000000000..1418b48bc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/QuorumInfo.html @@ -0,0 +1,238 @@ + + + + +QuorumInfo (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class QuorumInfo

+
+
java.lang.Object +
org.apache.kafka.clients.admin.QuorumInfo
+
+
+
+
public class QuorumInfo +extends Object
+
This class is used to describe the state of the quorum received in DescribeQuorumResponse.
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      leaderId

      +
      public int leaderId()
      +
      +
    • +
    • +
      +

      leaderEpoch

      +
      public long leaderEpoch()
      +
      +
    • +
    • +
      +

      highWatermark

      +
      public long highWatermark()
      +
      +
    • +
    • +
      +

      voters

      +
      public List<QuorumInfo.ReplicaState> voters()
      +
      +
    • +
    • +
      +

      observers

      +
      public List<QuorumInfo.ReplicaState> observers()
      +
      +
    • +
    • +
      +

      nodes

      +
      public Map<Integer,QuorumInfo.Node> nodes()
      +
      +
      Returns:
      +
      The voter nodes in the Raft cluster, or an empty map if KIP-853 is not enabled.
      +
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/RaftVoterEndpoint.html b/static/41/javadoc/org/apache/kafka/clients/admin/RaftVoterEndpoint.html new file mode 100644 index 000000000..3236e1ec4 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/RaftVoterEndpoint.html @@ -0,0 +1,251 @@ + + + + +RaftVoterEndpoint (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class RaftVoterEndpoint

+
+
java.lang.Object +
org.apache.kafka.clients.admin.RaftVoterEndpoint
+
+
+
+
@Stable +public class RaftVoterEndpoint +extends Object
+
An endpoint for a raft quorum voter.
+
+
+
    + +
  • +
    +

    Constructor Summary

    +
    Constructors
    +
    +
    Constructor
    +
    Description
    +
    RaftVoterEndpoint(String listener, + String host, + int port)
    +
    +
    Create an endpoint for a metadata quorum voter.
    +
    +
    +
    +
  • + +
  • +
    +

    Method Summary

    +
    +
    +
    +
    +
    Modifier and Type
    +
    Method
    +
    Description
    +
    boolean
    + +
     
    +
    int
    + +
     
    + + +
     
    + + +
    +
    The listener name for this endpoint.
    +
    + + +
    +
    Deprecated, for removal: This API element is subject to removal in a future version. +
    Since 4.1.
    +
    +
    +
    int
    + +
     
    + + +
     
    +
    +
    +
    +
    +

    Methods inherited from class java.lang.Object

    +getClass, notify, notifyAll, wait, wait, wait
    +
    +
  • +
+
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      RaftVoterEndpoint

      +
      public RaftVoterEndpoint(String listener, + String host, + int port)
      +
      Create an endpoint for a metadata quorum voter.
      +
      +
      Parameters:
      +
      listener - The human-readable name for this endpoint. For example, CONTROLLER.
      +
      host - The DNS hostname for this endpoint.
      +
      port - The network port for this endpoint.
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      listener

      +
      public String listener()
      +
      The listener name for this endpoint.
      +
      +
    • +
    • +
      +

      name

      +
      @Deprecated(since="4.1", + forRemoval=true) +public String name()
      +
      Deprecated, for removal: This API element is subject to removal in a future version. +
      Since 4.1. Use listener() instead. This function will be removed in 5.0.
      +
      +
      +
    • +
    • +
      +

      host

      +
      public String host()
      +
      +
    • +
    • +
      +

      port

      +
      public int port()
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/RecordsToDelete.html b/static/41/javadoc/org/apache/kafka/clients/admin/RecordsToDelete.html new file mode 100644 index 000000000..66b15b4a4 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/RecordsToDelete.html @@ -0,0 +1,190 @@ + + + + +RecordsToDelete (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class RecordsToDelete

+
+
java.lang.Object +
org.apache.kafka.clients.admin.RecordsToDelete
+
+
+
+
public class RecordsToDelete +extends Object
+
Describe records to delete in a call to Admin.deleteRecords(Map)
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      beforeOffset

      +
      public static RecordsToDelete beforeOffset(long offset)
      +
      Delete all the records before the given offset
      +
      +
      Parameters:
      +
      offset - the offset before which all records will be deleted
      +
      +
      +
    • +
    • +
      +

      beforeOffset

      +
      public long beforeOffset()
      +
      The offset before which all records will be deleted
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/RemoveMembersFromConsumerGroupOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/RemoveMembersFromConsumerGroupOptions.html new file mode 100644 index 000000000..4f128b0a4 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/RemoveMembersFromConsumerGroupOptions.html @@ -0,0 +1,203 @@ + + + + +RemoveMembersFromConsumerGroupOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class RemoveMembersFromConsumerGroupOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<RemoveMembersFromConsumerGroupOptions> +
org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupOptions
+
+
+
+
+
public class RemoveMembersFromConsumerGroupOptions +extends AbstractOptions<RemoveMembersFromConsumerGroupOptions>
+
Options for Admin.removeMembersFromConsumerGroup(String, RemoveMembersFromConsumerGroupOptions). + It carries the members to be removed from the consumer group.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      RemoveMembersFromConsumerGroupOptions

      +
      public RemoveMembersFromConsumerGroupOptions(Collection<MemberToRemove> members)
      +
      +
    • +
    • +
      +

      RemoveMembersFromConsumerGroupOptions

      +
      public RemoveMembersFromConsumerGroupOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      reason

      +
      public void reason(String reason)
      +
      Sets an optional reason.
      +
      +
    • +
    • +
      +

      members

      +
      public Set<MemberToRemove> members()
      +
      +
    • +
    • +
      +

      reason

      +
      public String reason()
      +
      +
    • +
    • +
      +

      removeAll

      +
      public boolean removeAll()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/RemoveMembersFromConsumerGroupResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/RemoveMembersFromConsumerGroupResult.html new file mode 100644 index 000000000..eced96dba --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/RemoveMembersFromConsumerGroupResult.html @@ -0,0 +1,151 @@ + + + + +RemoveMembersFromConsumerGroupResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class RemoveMembersFromConsumerGroupResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupResult
+
+
+
+
public class RemoveMembersFromConsumerGroupResult +extends Object
+
The result of the Admin.removeMembersFromConsumerGroup(String, RemoveMembersFromConsumerGroupOptions) call. + + The API of this class is evolving, see Admin for details.
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Returns a future which indicates whether the request was 100% success, i.e. no + either top level or member level error. + If not, the first member error shall be returned.
      +
      +
    • +
    • +
      +

      memberResult

      +
      public KafkaFuture<Void> memberResult(MemberToRemove member)
      +
      Returns the selected member future.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/RemoveRaftVoterOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/RemoveRaftVoterOptions.html new file mode 100644 index 000000000..60a6bddcc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/RemoveRaftVoterOptions.html @@ -0,0 +1,174 @@ + + + + +RemoveRaftVoterOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class RemoveRaftVoterOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<RemoveRaftVoterOptions> +
org.apache.kafka.clients.admin.RemoveRaftVoterOptions
+
+
+
+
+
@Stable +public class RemoveRaftVoterOptions +extends AbstractOptions<RemoveRaftVoterOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      RemoveRaftVoterOptions

      +
      public RemoveRaftVoterOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    + +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/RemoveRaftVoterResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/RemoveRaftVoterResult.html new file mode 100644 index 000000000..6191646fb --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/RemoveRaftVoterResult.html @@ -0,0 +1,138 @@ + + + + +RemoveRaftVoterResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class RemoveRaftVoterResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.RemoveRaftVoterResult
+
+
+
+
@Stable +public class RemoveRaftVoterResult +extends Object
+ +
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Returns a future that completes when the voter has been removed.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/RenewDelegationTokenOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/RenewDelegationTokenOptions.html new file mode 100644 index 000000000..1f868542e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/RenewDelegationTokenOptions.html @@ -0,0 +1,173 @@ + + + + +RenewDelegationTokenOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class RenewDelegationTokenOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<RenewDelegationTokenOptions> +
org.apache.kafka.clients.admin.RenewDelegationTokenOptions
+
+
+
+
+
public class RenewDelegationTokenOptions +extends AbstractOptions<RenewDelegationTokenOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      RenewDelegationTokenOptions

      +
      public RenewDelegationTokenOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      renewTimePeriodMs

      +
      public RenewDelegationTokenOptions renewTimePeriodMs(long renewTimePeriodMs)
      +
      +
    • +
    • +
      +

      renewTimePeriodMs

      +
      public long renewTimePeriodMs()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/RenewDelegationTokenResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/RenewDelegationTokenResult.html new file mode 100644 index 000000000..4ba0a4889 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/RenewDelegationTokenResult.html @@ -0,0 +1,135 @@ + + + + +RenewDelegationTokenResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class RenewDelegationTokenResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.RenewDelegationTokenResult
+
+
+
+
public class RenewDelegationTokenResult +extends Object
+ +
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      expiryTimestamp

      +
      public KafkaFuture<Long> expiryTimestamp()
      +
      Returns a future which yields expiry timestamp
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ReplicaInfo.html b/static/41/javadoc/org/apache/kafka/clients/admin/ReplicaInfo.html new file mode 100644 index 000000000..dbeab6b4e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ReplicaInfo.html @@ -0,0 +1,216 @@ + + + + +ReplicaInfo (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ReplicaInfo

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ReplicaInfo
+
+
+
+
public class ReplicaInfo +extends Object
+
A description of a replica on a particular broker.
+
+
+
    + +
  • +
    +

    Constructor Summary

    +
    Constructors
    +
    +
    Constructor
    +
    Description
    +
    ReplicaInfo(long size, + long offsetLag, + boolean isFuture)
    +
     
    +
    +
    +
  • + +
  • +
    +

    Method Summary

    +
    +
    +
    +
    +
    Modifier and Type
    +
    Method
    +
    Description
    +
    boolean
    + +
    +
    Whether this replica has been created by a AlterReplicaLogDirsRequest + but not yet replaced the current replica on the broker.
    +
    +
    long
    + +
    +
    The lag of the log's LEO with respect to the partition's + high watermark (if it is the current log for the partition) + or the current replica's LEO (if it is the future log + for the partition).
    +
    +
    long
    + +
    +
    The total size of the log segments in this replica in bytes.
    +
    + + +
     
    +
    +
    +
    +
    +

    Methods inherited from class java.lang.Object

    +equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
    +
    +
  • +
+
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ReplicaInfo

      +
      public ReplicaInfo(long size, + long offsetLag, + boolean isFuture)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      size

      +
      public long size()
      +
      The total size of the log segments in this replica in bytes.
      +
      +
    • +
    • +
      +

      offsetLag

      +
      public long offsetLag()
      +
      The lag of the log's LEO with respect to the partition's + high watermark (if it is the current log for the partition) + or the current replica's LEO (if it is the future log + for the partition).
      +
      +
    • +
    • +
      +

      isFuture

      +
      public boolean isFuture()
      +
      Whether this replica has been created by a AlterReplicaLogDirsRequest + but not yet replaced the current replica on the broker.
      +
      +
      Returns:
      +
      true if this log is created by AlterReplicaLogDirsRequest and will replace the current log + of the replica at some time in the future.
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ScramCredentialInfo.html b/static/41/javadoc/org/apache/kafka/clients/admin/ScramCredentialInfo.html new file mode 100644 index 000000000..3f9f5e3b3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ScramCredentialInfo.html @@ -0,0 +1,230 @@ + + + + +ScramCredentialInfo (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ScramCredentialInfo

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ScramCredentialInfo
+
+
+
+
public class ScramCredentialInfo +extends Object
+
Mechanism and iterations for a SASL/SCRAM credential associated with a user.
+
+
See Also:
+
+ +
+
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ScramCredentialInfo

      +
      public ScramCredentialInfo(ScramMechanism mechanism, + int iterations)
      +
      +
      Parameters:
      +
      mechanism - the required mechanism
      +
      iterations - the number of iterations used when creating the credential
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      mechanism

      +
      public ScramMechanism mechanism()
      +
      +
      Returns:
      +
      the mechanism
      +
      +
      +
    • +
    • +
      +

      iterations

      +
      public int iterations()
      +
      +
      Returns:
      +
      the number of iterations used when creating the credential
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ScramMechanism.html b/static/41/javadoc/org/apache/kafka/clients/admin/ScramMechanism.html new file mode 100644 index 000000000..4b42db7cc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ScramMechanism.html @@ -0,0 +1,309 @@ + + + + +ScramMechanism (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Enum Class ScramMechanism

+
+
java.lang.Object +
java.lang.Enum<ScramMechanism> +
org.apache.kafka.clients.admin.ScramMechanism
+
+
+
+
+
All Implemented Interfaces:
+
Serializable, Comparable<ScramMechanism>, Constable
+
+
+
public enum ScramMechanism +extends Enum<ScramMechanism>
+
Representation of a SASL/SCRAM Mechanism.
+
+
See Also:
+
+
    +
  • KIP-554: Add Broker-side SCRAM Config API + + This code is duplicated in org.apache.kafka.common.security.scram.internals.ScramMechanism. + The type field in both files must match and must not change. The type field + is used both for passing ScramCredentialUpsertion and for the internal + UserScramCredentialRecord. Do not change the type field.
  • +
+
+
+
+
+ +
+
+
    + +
  • +
    +

    Enum Constant Details

    +
      +
    • +
      +

      UNKNOWN

      +
      public static final ScramMechanism UNKNOWN
      +
      +
    • +
    • +
      +

      SCRAM_SHA_256

      +
      public static final ScramMechanism SCRAM_SHA_256
      +
      +
    • +
    • +
      +

      SCRAM_SHA_512

      +
      public static final ScramMechanism SCRAM_SHA_512
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      values

      +
      public static ScramMechanism[] values()
      +
      Returns an array containing the constants of this enum class, in +the order they are declared.
      +
      +
      Returns:
      +
      an array containing the constants of this enum class, in the order they are declared
      +
      +
      +
    • +
    • +
      +

      valueOf

      +
      public static ScramMechanism valueOf(String name)
      +
      Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
      +
      +
      Parameters:
      +
      name - the name of the enum constant to be returned.
      +
      Returns:
      +
      the enum constant with the specified name
      +
      Throws:
      +
      IllegalArgumentException - if this enum class has no constant with the specified name
      +
      NullPointerException - if the argument is null
      +
      +
      +
    • +
    • +
      +

      fromType

      +
      public static ScramMechanism fromType(byte type)
      +
      +
      Parameters:
      +
      type - the type indicator
      +
      Returns:
      +
      the instance corresponding to the given type indicator, otherwise UNKNOWN
      +
      +
      +
    • +
    • +
      +

      fromMechanismName

      +
      public static ScramMechanism fromMechanismName(String mechanismName)
      +
      +
      Parameters:
      +
      mechanismName - the SASL SCRAM mechanism name
      +
      Returns:
      +
      the corresponding SASL SCRAM mechanism enum, otherwise UNKNOWN
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      mechanismName

      +
      public String mechanismName()
      +
      +
      Returns:
      +
      the corresponding SASL SCRAM mechanism name
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      type

      +
      public byte type()
      +
      +
      Returns:
      +
      the type indicator for this SASL SCRAM mechanism
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ShareGroupDescription.html b/static/41/javadoc/org/apache/kafka/clients/admin/ShareGroupDescription.html new file mode 100644 index 000000000..0ee80101a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ShareGroupDescription.html @@ -0,0 +1,304 @@ + + + + +ShareGroupDescription (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ShareGroupDescription

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ShareGroupDescription
+
+
+
+
@Evolving +public class ShareGroupDescription +extends Object
+
A detailed description of a single share group in the cluster.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    + +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      groupId

      +
      public String groupId()
      +
      The id of the share group.
      +
      +
    • +
    • +
      +

      members

      + +
      A list of the members of the share group.
      +
      +
    • +
    • +
      +

      groupState

      +
      public GroupState groupState()
      +
      The group state, or UNKNOWN if the state is too new for us to parse.
      +
      +
    • +
    • +
      +

      coordinator

      +
      public Node coordinator()
      +
      The share group coordinator, or null if the coordinator is not known.
      +
      +
    • +
    • +
      +

      authorizedOperations

      +
      public Set<AclOperation> authorizedOperations()
      +
      authorizedOperations for this group, or null if that information is not known.
      +
      +
    • +
    • +
      +

      groupEpoch

      +
      public int groupEpoch()
      +
      The epoch of the share group.
      +
      +
    • +
    • +
      +

      targetAssignmentEpoch

      +
      public int targetAssignmentEpoch()
      +
      The epoch of the target assignment.
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ShareMemberAssignment.html b/static/41/javadoc/org/apache/kafka/clients/admin/ShareMemberAssignment.html new file mode 100644 index 000000000..a4c6738f6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ShareMemberAssignment.html @@ -0,0 +1,209 @@ + + + + +ShareMemberAssignment (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ShareMemberAssignment

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ShareMemberAssignment
+
+
+
+
@Evolving +public class ShareMemberAssignment +extends Object
+
A description of the assignments of a specific share group member.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ShareMemberAssignment

      +
      public ShareMemberAssignment(Set<TopicPartition> topicPartitions)
      +
      Creates an instance with the specified parameters.
      +
      +
      Parameters:
      +
      topicPartitions - List of topic partitions
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      topicPartitions

      +
      public Set<TopicPartition> topicPartitions()
      +
      The topic partitions assigned to a group member.
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/ShareMemberDescription.html b/static/41/javadoc/org/apache/kafka/clients/admin/ShareMemberDescription.html new file mode 100644 index 000000000..12169ac2f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/ShareMemberDescription.html @@ -0,0 +1,258 @@ + + + + +ShareMemberDescription (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ShareMemberDescription

+
+
java.lang.Object +
org.apache.kafka.clients.admin.ShareMemberDescription
+
+
+
+
@Evolving +public class ShareMemberDescription +extends Object
+
A detailed description of a single share group member in the cluster.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    + +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      consumerId

      +
      public String consumerId()
      +
      The consumer id of the group member.
      +
      +
    • +
    • +
      +

      clientId

      +
      public String clientId()
      +
      The client id of the group member.
      +
      +
    • +
    • +
      +

      host

      +
      public String host()
      +
      The host where the group member is running.
      +
      +
    • +
    • +
      +

      assignment

      +
      public ShareMemberAssignment assignment()
      +
      The assignment of the group member.
      +
      +
    • +
    • +
      +

      memberEpoch

      +
      public int memberEpoch()
      +
      The epoch of the group member.
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupDescription.html b/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupDescription.html new file mode 100644 index 000000000..90ffb0ae8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupDescription.html @@ -0,0 +1,314 @@ + + + + +StreamsGroupDescription (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class StreamsGroupDescription

+
+
java.lang.Object +
org.apache.kafka.clients.admin.StreamsGroupDescription
+
+
+
+
@Evolving +public class StreamsGroupDescription +extends Object
+
A detailed description of a single streams group in the cluster.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    + +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      groupId

      +
      public String groupId()
      +
      The id of the streams group.
      +
      +
    • +
    • +
      +

      groupEpoch

      +
      public int groupEpoch()
      +
      The epoch of the consumer group.
      +
      +
    • +
    • +
      +

      targetAssignmentEpoch

      +
      public int targetAssignmentEpoch()
      +
      The epoch of the target assignment.
      +
      +
    • +
    • +
      +

      topologyEpoch

      +
      public int topologyEpoch()
      +
      The epoch of the currently used topology.
      +
      +
    • +
    • +
      +

      members

      + +
      A list of the members of the streams group.
      +
      +
    • +
    • +
      +

      subtopologies

      + +
      A list of the subtopologies in the streams group.
      +
      +
    • +
    • +
      +

      groupState

      +
      public GroupState groupState()
      +
      The state of the streams group, or UNKNOWN if the state is too new for us to parse.
      +
      +
    • +
    • +
      +

      coordinator

      +
      public Node coordinator()
      +
      The group coordinator, or null if the coordinator is not known.
      +
      +
    • +
    • +
      +

      authorizedOperations

      +
      public Set<AclOperation> authorizedOperations()
      +
      authorizedOperations for this group, or null if that information is not known.
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupMemberAssignment.TaskIds.html b/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupMemberAssignment.TaskIds.html new file mode 100644 index 000000000..1e860cbdc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupMemberAssignment.TaskIds.html @@ -0,0 +1,219 @@ + + + + +StreamsGroupMemberAssignment.TaskIds (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class StreamsGroupMemberAssignment.TaskIds

+
+
java.lang.Object +
org.apache.kafka.clients.admin.StreamsGroupMemberAssignment.TaskIds
+
+
+
+
Enclosing class:
+
StreamsGroupMemberAssignment
+
+
+
public static class StreamsGroupMemberAssignment.TaskIds +extends Object
+
All tasks for one subtopology of a member.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      TaskIds

      +
      public TaskIds(String subtopologyId, + List<Integer> partitions)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      subtopologyId

      +
      public String subtopologyId()
      +
      The subtopology identifier.
      +
      +
    • +
    • +
      +

      partitions

      +
      public List<Integer> partitions()
      +
      The partitions of the subtopology processed by this member.
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupMemberAssignment.html b/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupMemberAssignment.html new file mode 100644 index 000000000..0a2972c58 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupMemberAssignment.html @@ -0,0 +1,247 @@ + + + + +StreamsGroupMemberAssignment (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class StreamsGroupMemberAssignment

+
+
java.lang.Object +
org.apache.kafka.clients.admin.StreamsGroupMemberAssignment
+
+
+
+
@Evolving +public class StreamsGroupMemberAssignment +extends Object
+
A description of the assignments of a specific group member.
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupMemberDescription.Endpoint.html b/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupMemberDescription.Endpoint.html new file mode 100644 index 000000000..993e717cf --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupMemberDescription.Endpoint.html @@ -0,0 +1,213 @@ + + + + +StreamsGroupMemberDescription.Endpoint (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class StreamsGroupMemberDescription.Endpoint

+
+
java.lang.Object +
org.apache.kafka.clients.admin.StreamsGroupMemberDescription.Endpoint
+
+
+
+
Enclosing class:
+
StreamsGroupMemberDescription
+
+
+
public static class StreamsGroupMemberDescription.Endpoint +extends Object
+
The user-defined endpoint for the member.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      Endpoint

      +
      public Endpoint(String host, + int port)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      host

      +
      public String host()
      +
      +
    • +
    • +
      +

      port

      +
      public int port()
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupMemberDescription.TaskOffset.html b/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupMemberDescription.TaskOffset.html new file mode 100644 index 000000000..46652ef7c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupMemberDescription.TaskOffset.html @@ -0,0 +1,233 @@ + + + + +StreamsGroupMemberDescription.TaskOffset (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class StreamsGroupMemberDescription.TaskOffset

+
+
java.lang.Object +
org.apache.kafka.clients.admin.StreamsGroupMemberDescription.TaskOffset
+
+
+
+
Enclosing class:
+
StreamsGroupMemberDescription
+
+
+
public static class StreamsGroupMemberDescription.TaskOffset +extends Object
+
The cumulative offset for one task.
+
+
+
    + +
  • +
    +

    Constructor Summary

    +
    Constructors
    +
    +
    Constructor
    +
    Description
    +
    TaskOffset(String subtopologyId, + int partition, + long offset)
    +
     
    +
    +
    +
  • + +
  • +
    +

    Method Summary

    +
    +
    +
    +
    +
    Modifier and Type
    +
    Method
    +
    Description
    +
    boolean
    + +
     
    +
    int
    + +
     
    +
    long
    + +
    +
    The cumulative offset (sum of offsets in all input partitions).
    +
    +
    int
    + +
    +
    The partition of the task.
    +
    + + +
    +
    The subtopology identifier.
    +
    + + +
     
    +
    +
    +
    +
    +

    Methods inherited from class java.lang.Object

    +getClass, notify, notifyAll, wait, wait, wait
    +
    +
  • +
+
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      TaskOffset

      +
      public TaskOffset(String subtopologyId, + int partition, + long offset)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      subtopologyId

      +
      public String subtopologyId()
      +
      The subtopology identifier.
      +
      +
    • +
    • +
      +

      partition

      +
      public int partition()
      +
      The partition of the task.
      +
      +
    • +
    • +
      +

      offset

      +
      public long offset()
      +
      The cumulative offset (sum of offsets in all input partitions).
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupMemberDescription.html b/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupMemberDescription.html new file mode 100644 index 000000000..1c4520391 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupMemberDescription.html @@ -0,0 +1,420 @@ + + + + +StreamsGroupMemberDescription (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class StreamsGroupMemberDescription

+
+
java.lang.Object +
org.apache.kafka.clients.admin.StreamsGroupMemberDescription
+
+
+
+
@Evolving +public class StreamsGroupMemberDescription +extends Object
+
A detailed description of a single streams groups member in the cluster.
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupSubtopologyDescription.TopicInfo.html b/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupSubtopologyDescription.TopicInfo.html new file mode 100644 index 000000000..c9b09379b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupSubtopologyDescription.TopicInfo.html @@ -0,0 +1,234 @@ + + + + +StreamsGroupSubtopologyDescription.TopicInfo (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class StreamsGroupSubtopologyDescription.TopicInfo

+
+
java.lang.Object +
org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription.TopicInfo
+
+
+
+
Enclosing class:
+
StreamsGroupSubtopologyDescription
+
+
+
public static class StreamsGroupSubtopologyDescription.TopicInfo +extends Object
+
Information about a topic. These configs reflect what is required by the topology, but may differ from the current state on the + broker.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      TopicInfo

      +
      public TopicInfo(int partitions, + int replicationFactor, + Map<String,String> topicConfigs)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      partitions

      +
      public int partitions()
      +
      The number of partitions in the topic. Can be 0 if no specific number of partitions is enforced.
      +
      +
    • +
    • +
      +

      replicationFactor

      +
      public int replicationFactor()
      +
      The replication factor of the topic. Can be 0 if the default replication factor is used.
      +
      +
    • +
    • +
      +

      topicConfigs

      +
      public Map<String,String> topicConfigs()
      +
      Topic-level configurations as key-value pairs. Default configuration can be omitted.
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupSubtopologyDescription.html b/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupSubtopologyDescription.html new file mode 100644 index 000000000..7790367da --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/StreamsGroupSubtopologyDescription.html @@ -0,0 +1,275 @@ + + + + +StreamsGroupSubtopologyDescription (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class StreamsGroupSubtopologyDescription

+
+
java.lang.Object +
org.apache.kafka.clients.admin.StreamsGroupSubtopologyDescription
+
+
+
+
@Evolving +public class StreamsGroupSubtopologyDescription +extends Object
+
A detailed description of a subtopology in a streams group.
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/SupportedVersionRange.html b/static/41/javadoc/org/apache/kafka/clients/admin/SupportedVersionRange.html new file mode 100644 index 000000000..67dcbc2c7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/SupportedVersionRange.html @@ -0,0 +1,221 @@ + + + + +SupportedVersionRange (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class SupportedVersionRange

+
+
java.lang.Object +
org.apache.kafka.clients.admin.SupportedVersionRange
+
+
+
+
public class SupportedVersionRange +extends Object
+
Represents a range of versions that a particular broker supports for some feature.
+
+
+
    + +
  • +
    +

    Constructor Summary

    +
    Constructors
    +
    +
    Constructor
    +
    Description
    +
    SupportedVersionRange(short minVersion, + short maxVersion)
    +
    +
    Raises an exception unless the following conditions are met: + 0 <= minVersion <= maxVersion.
    +
    +
    +
    +
  • + +
  • +
    +

    Method Summary

    +
    +
    +
    +
    +
    Modifier and Type
    +
    Method
    +
    Description
    +
    boolean
    +
    equals(Object other)
    +
     
    +
    int
    + +
     
    +
    short
    + +
     
    +
    short
    + +
     
    + + +
     
    +
    +
    +
    +
    +

    Methods inherited from class java.lang.Object

    +getClass, notify, notifyAll, wait, wait, wait
    +
    +
  • +
+
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      SupportedVersionRange

      +
      public SupportedVersionRange(short minVersion, + short maxVersion)
      +
      Raises an exception unless the following conditions are met: + 0 <= minVersion <= maxVersion.
      +
      +
      Parameters:
      +
      minVersion - The minimum version value.
      +
      maxVersion - The maximum version value.
      +
      Throws:
      +
      IllegalArgumentException - Raised when the condition described above is not met.
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      minVersion

      +
      public short minVersion()
      +
      +
    • +
    • +
      +

      maxVersion

      +
      public short maxVersion()
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object other)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/TerminateTransactionOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/TerminateTransactionOptions.html new file mode 100644 index 000000000..23865be34 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/TerminateTransactionOptions.html @@ -0,0 +1,168 @@ + + + + +TerminateTransactionOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class TerminateTransactionOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<TerminateTransactionOptions> +
org.apache.kafka.clients.admin.TerminateTransactionOptions
+
+
+
+
+
public class TerminateTransactionOptions +extends AbstractOptions<TerminateTransactionOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      TerminateTransactionOptions

      +
      public TerminateTransactionOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    + +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/TerminateTransactionResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/TerminateTransactionResult.html new file mode 100644 index 000000000..27acb4d94 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/TerminateTransactionResult.html @@ -0,0 +1,135 @@ + + + + +TerminateTransactionResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class TerminateTransactionResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.TerminateTransactionResult
+
+
+
+
public class TerminateTransactionResult +extends Object
+ +
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      result

      +
      public KafkaFuture<Void> result()
      +
      Return a future which indicates whether the transaction was successfully terminated.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/TopicDescription.html b/static/41/javadoc/org/apache/kafka/clients/admin/TopicDescription.html new file mode 100644 index 000000000..973da6b7c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/TopicDescription.html @@ -0,0 +1,316 @@ + + + + +TopicDescription (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class TopicDescription

+
+
java.lang.Object +
org.apache.kafka.clients.admin.TopicDescription
+
+
+
+
public class TopicDescription +extends Object
+
A detailed description of a single topic in the cluster.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      TopicDescription

      +
      public TopicDescription(String name, + boolean internal, + List<TopicPartitionInfo> partitions)
      +
      Create an instance with the specified parameters.
      +
      +
      Parameters:
      +
      name - The topic name
      +
      internal - Whether the topic is internal to Kafka
      +
      partitions - A list of partitions where the index represents the partition id and the element contains + leadership and replica information for that partition.
      +
      +
      +
    • +
    • +
      +

      TopicDescription

      +
      public TopicDescription(String name, + boolean internal, + List<TopicPartitionInfo> partitions, + Set<AclOperation> authorizedOperations)
      +
      Create an instance with the specified parameters.
      +
      +
      Parameters:
      +
      name - The topic name
      +
      internal - Whether the topic is internal to Kafka
      +
      partitions - A list of partitions where the index represents the partition id and the element contains + leadership and replica information for that partition.
      +
      authorizedOperations - authorized operations for this topic, or empty set if this is not known.
      +
      +
      +
    • +
    • +
      +

      TopicDescription

      +
      public TopicDescription(String name, + boolean internal, + List<TopicPartitionInfo> partitions, + Set<AclOperation> authorizedOperations, + Uuid topicId)
      +
      Create an instance with the specified parameters.
      +
      +
      Parameters:
      +
      name - The topic name
      +
      internal - Whether the topic is internal to Kafka
      +
      partitions - A list of partitions where the index represents the partition id and the element contains + leadership and replica information for that partition.
      +
      authorizedOperations - authorized operations for this topic, or empty set if this is not known.
      +
      topicId - the topic id
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      name

      +
      public String name()
      +
      The name of the topic.
      +
      +
    • +
    • +
      +

      isInternal

      +
      public boolean isInternal()
      +
      Whether the topic is internal to Kafka. An example of an internal topic is the offsets and group management topic: + __consumer_offsets.
      +
      +
    • +
    • +
      +

      topicId

      +
      public Uuid topicId()
      +
      +
    • +
    • +
      +

      partitions

      +
      public List<TopicPartitionInfo> partitions()
      +
      A list of partitions where the index represents the partition id and the element contains leadership and replica + information for that partition.
      +
      +
    • +
    • +
      +

      authorizedOperations

      +
      public Set<AclOperation> authorizedOperations()
      +
      authorized operations for this topic, or null if this is not known.
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/TopicListing.html b/static/41/javadoc/org/apache/kafka/clients/admin/TopicListing.html new file mode 100644 index 000000000..53848aaf2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/TopicListing.html @@ -0,0 +1,213 @@ + + + + +TopicListing (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class TopicListing

+
+
java.lang.Object +
org.apache.kafka.clients.admin.TopicListing
+
+
+
+
public class TopicListing +extends Object
+
A listing of a topic in the cluster.
+
+
+
    + +
  • +
    +

    Constructor Summary

    +
    Constructors
    +
    +
    Constructor
    +
    Description
    +
    TopicListing(String name, + Uuid topicId, + boolean internal)
    +
    +
    Create an instance with the specified parameters.
    +
    +
    +
    +
  • + +
  • +
    +

    Method Summary

    +
    +
    +
    +
    +
    Modifier and Type
    +
    Method
    +
    Description
    +
    boolean
    + +
    +
    Whether the topic is internal to Kafka.
    +
    + + +
    +
    The name of the topic.
    +
    + + +
    +
    The id of the topic.
    +
    + + +
     
    +
    +
    +
    +
    +

    Methods inherited from class java.lang.Object

    +equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
    +
    +
  • +
+
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      TopicListing

      +
      public TopicListing(String name, + Uuid topicId, + boolean internal)
      +
      Create an instance with the specified parameters.
      +
      +
      Parameters:
      +
      name - The topic name
      +
      topicId - The topic id.
      +
      internal - Whether the topic is internal to Kafka
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      topicId

      +
      public Uuid topicId()
      +
      The id of the topic.
      +
      +
    • +
    • +
      +

      name

      +
      public String name()
      +
      The name of the topic.
      +
      +
    • +
    • +
      +

      isInternal

      +
      public boolean isInternal()
      +
      Whether the topic is internal to Kafka. An example of an internal topic is the offsets and group management topic: + __consumer_offsets.
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/TransactionDescription.html b/static/41/javadoc/org/apache/kafka/clients/admin/TransactionDescription.html new file mode 100644 index 000000000..6e3444f54 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/TransactionDescription.html @@ -0,0 +1,263 @@ + + + + +TransactionDescription (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class TransactionDescription

+
+
java.lang.Object +
org.apache.kafka.clients.admin.TransactionDescription
+
+
+
+
public class TransactionDescription +extends Object
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      TransactionDescription

      +
      public TransactionDescription(int coordinatorId, + TransactionState state, + long producerId, + int producerEpoch, + long transactionTimeoutMs, + OptionalLong transactionStartTimeMs, + Set<TopicPartition> topicPartitions)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      coordinatorId

      +
      public int coordinatorId()
      +
      +
    • +
    • +
      +

      state

      +
      public TransactionState state()
      +
      +
    • +
    • +
      +

      producerId

      +
      public long producerId()
      +
      +
    • +
    • +
      +

      producerEpoch

      +
      public int producerEpoch()
      +
      +
    • +
    • +
      +

      transactionTimeoutMs

      +
      public long transactionTimeoutMs()
      +
      +
    • +
    • +
      +

      transactionStartTimeMs

      +
      public OptionalLong transactionStartTimeMs()
      +
      +
    • +
    • +
      +

      topicPartitions

      +
      public Set<TopicPartition> topicPartitions()
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/TransactionListing.html b/static/41/javadoc/org/apache/kafka/clients/admin/TransactionListing.html new file mode 100644 index 000000000..aad042a43 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/TransactionListing.html @@ -0,0 +1,219 @@ + + + + +TransactionListing (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class TransactionListing

+
+
java.lang.Object +
org.apache.kafka.clients.admin.TransactionListing
+
+
+
+
public class TransactionListing +extends Object
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      TransactionListing

      +
      public TransactionListing(String transactionalId, + long producerId, + TransactionState transactionState)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      transactionalId

      +
      public String transactionalId()
      +
      +
    • +
    • +
      +

      producerId

      +
      public long producerId()
      +
      +
    • +
    • +
      +

      state

      +
      public TransactionState state()
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/TransactionState.html b/static/41/javadoc/org/apache/kafka/clients/admin/TransactionState.html new file mode 100644 index 000000000..c045245db --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/TransactionState.html @@ -0,0 +1,287 @@ + + + + +TransactionState (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Enum Class TransactionState

+
+
java.lang.Object +
java.lang.Enum<TransactionState> +
org.apache.kafka.clients.admin.TransactionState
+
+
+
+
+
All Implemented Interfaces:
+
Serializable, Comparable<TransactionState>, Constable
+
+
+
public enum TransactionState +extends Enum<TransactionState>
+
+
+ +
+
+
    + +
  • +
    +

    Enum Constant Details

    + +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      values

      +
      public static TransactionState[] values()
      +
      Returns an array containing the constants of this enum class, in +the order they are declared.
      +
      +
      Returns:
      +
      an array containing the constants of this enum class, in the order they are declared
      +
      +
      +
    • +
    • +
      +

      valueOf

      +
      public static TransactionState valueOf(String name)
      +
      Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
      +
      +
      Parameters:
      +
      name - the name of the enum constant to be returned.
      +
      Returns:
      +
      the enum constant with the specified name
      +
      Throws:
      +
      IllegalArgumentException - if this enum class has no constant with the specified name
      +
      NullPointerException - if the argument is null
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Enum<TransactionState>
      +
      +
      +
    • +
    • +
      +

      parse

      +
      public static TransactionState parse(String name)
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/UnregisterBrokerOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/UnregisterBrokerOptions.html new file mode 100644 index 000000000..74e0da484 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/UnregisterBrokerOptions.html @@ -0,0 +1,133 @@ + + + + +UnregisterBrokerOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class UnregisterBrokerOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<UpdateFeaturesOptions> +
org.apache.kafka.clients.admin.UnregisterBrokerOptions
+
+
+
+
+
public class UnregisterBrokerOptions +extends AbstractOptions<UpdateFeaturesOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      UnregisterBrokerOptions

      +
      public UnregisterBrokerOptions()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/UnregisterBrokerResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/UnregisterBrokerResult.html new file mode 100644 index 000000000..7b022fa67 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/UnregisterBrokerResult.html @@ -0,0 +1,137 @@ + + + + +UnregisterBrokerResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class UnregisterBrokerResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.UnregisterBrokerResult
+
+
+
+
public class UnregisterBrokerResult +extends Object
+
The result of the Admin.unregisterBroker(int, UnregisterBrokerOptions) call. + + The API of this class is evolving, see Admin for details.
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      all

      +
      public KafkaFuture<Void> all()
      +
      Return a future which succeeds if the operation is successful.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/UpdateFeaturesOptions.html b/static/41/javadoc/org/apache/kafka/clients/admin/UpdateFeaturesOptions.html new file mode 100644 index 000000000..84e6f5760 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/UpdateFeaturesOptions.html @@ -0,0 +1,173 @@ + + + + +UpdateFeaturesOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class UpdateFeaturesOptions

+
+
java.lang.Object +
org.apache.kafka.clients.admin.AbstractOptions<UpdateFeaturesOptions> +
org.apache.kafka.clients.admin.UpdateFeaturesOptions
+
+
+
+
+
public class UpdateFeaturesOptions +extends AbstractOptions<UpdateFeaturesOptions>
+ +
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      UpdateFeaturesOptions

      +
      public UpdateFeaturesOptions()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      validateOnly

      +
      public boolean validateOnly()
      +
      +
    • +
    • +
      +

      validateOnly

      +
      public UpdateFeaturesOptions validateOnly(boolean validateOnly)
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/UpdateFeaturesResult.html b/static/41/javadoc/org/apache/kafka/clients/admin/UpdateFeaturesResult.html new file mode 100644 index 000000000..55e6ca8f3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/UpdateFeaturesResult.html @@ -0,0 +1,146 @@ + + + + +UpdateFeaturesResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class UpdateFeaturesResult

+
+
java.lang.Object +
org.apache.kafka.clients.admin.UpdateFeaturesResult
+
+
+
+
public class UpdateFeaturesResult +extends Object
+
The result of the Admin.updateFeatures(Map, UpdateFeaturesOptions) call. + + The API of this class is evolving, see Admin for details.
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    + +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/UserScramCredentialAlteration.html b/static/41/javadoc/org/apache/kafka/clients/admin/UserScramCredentialAlteration.html new file mode 100644 index 000000000..70d3e4752 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/UserScramCredentialAlteration.html @@ -0,0 +1,148 @@ + + + + +UserScramCredentialAlteration (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class UserScramCredentialAlteration

+
+
java.lang.Object +
org.apache.kafka.clients.admin.UserScramCredentialAlteration
+
+
+
+
Direct Known Subclasses:
+
UserScramCredentialDeletion, UserScramCredentialUpsertion
+
+
+
public abstract class UserScramCredentialAlteration +extends Object
+
A request to alter a user's SASL/SCRAM credentials.
+
+
See Also:
+
+ +
+
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      user

      +
      public String user()
      +
      +
      Returns:
      +
      the always non-null user
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/UserScramCredentialDeletion.html b/static/41/javadoc/org/apache/kafka/clients/admin/UserScramCredentialDeletion.html new file mode 100644 index 000000000..17671cd00 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/UserScramCredentialDeletion.html @@ -0,0 +1,183 @@ + + + + +UserScramCredentialDeletion (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class UserScramCredentialDeletion

+
+
java.lang.Object +
org.apache.kafka.clients.admin.UserScramCredentialAlteration +
org.apache.kafka.clients.admin.UserScramCredentialDeletion
+
+
+
+
+
public class UserScramCredentialDeletion +extends UserScramCredentialAlteration
+
A request to delete a SASL/SCRAM credential for a user.
+
+
See Also:
+
+ +
+
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      UserScramCredentialDeletion

      +
      public UserScramCredentialDeletion(String user, + ScramMechanism mechanism)
      +
      +
      Parameters:
      +
      user - the mandatory user
      +
      mechanism - the mandatory mechanism
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      mechanism

      +
      public ScramMechanism mechanism()
      +
      +
      Returns:
      +
      the always non-null mechanism
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/UserScramCredentialUpsertion.html b/static/41/javadoc/org/apache/kafka/clients/admin/UserScramCredentialUpsertion.html new file mode 100644 index 000000000..c0f79a025 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/UserScramCredentialUpsertion.html @@ -0,0 +1,260 @@ + + + + +UserScramCredentialUpsertion (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class UserScramCredentialUpsertion

+
+
java.lang.Object +
org.apache.kafka.clients.admin.UserScramCredentialAlteration +
org.apache.kafka.clients.admin.UserScramCredentialUpsertion
+
+
+
+
+
public class UserScramCredentialUpsertion +extends UserScramCredentialAlteration
+
A request to update/insert a SASL/SCRAM credential for a user.
+
+
See Also:
+
+ +
+
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      UserScramCredentialUpsertion

      +
      public UserScramCredentialUpsertion(String user, + ScramCredentialInfo credentialInfo, + String password)
      +
      Constructor that generates a random salt
      +
      +
      Parameters:
      +
      user - the user for which the credential is to be updated/inserted
      +
      credentialInfo - the mechanism and iterations to be used
      +
      password - the password
      +
      +
      +
    • +
    • +
      +

      UserScramCredentialUpsertion

      +
      public UserScramCredentialUpsertion(String user, + ScramCredentialInfo credentialInfo, + byte[] password)
      +
      Constructor that generates a random salt
      +
      +
      Parameters:
      +
      user - the user for which the credential is to be updated/inserted
      +
      credentialInfo - the mechanism and iterations to be used
      +
      password - the password
      +
      +
      +
    • +
    • +
      +

      UserScramCredentialUpsertion

      +
      public UserScramCredentialUpsertion(String user, + ScramCredentialInfo credentialInfo, + byte[] password, + byte[] salt)
      +
      Constructor that accepts an explicit salt
      +
      +
      Parameters:
      +
      user - the user for which the credential is to be updated/inserted
      +
      credentialInfo - the mechanism and iterations to be used
      +
      password - the password
      +
      salt - the salt to be used
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      credentialInfo

      +
      public ScramCredentialInfo credentialInfo()
      +
      +
      Returns:
      +
      the mechanism and iterations
      +
      +
      +
    • +
    • +
      +

      salt

      +
      public byte[] salt()
      +
      +
      Returns:
      +
      the salt
      +
      +
      +
    • +
    • +
      +

      password

      +
      public byte[] password()
      +
      +
      Returns:
      +
      the password
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/UserScramCredentialsDescription.html b/static/41/javadoc/org/apache/kafka/clients/admin/UserScramCredentialsDescription.html new file mode 100644 index 000000000..5c1364a8f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/UserScramCredentialsDescription.html @@ -0,0 +1,231 @@ + + + + +UserScramCredentialsDescription (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class UserScramCredentialsDescription

+
+
java.lang.Object +
org.apache.kafka.clients.admin.UserScramCredentialsDescription
+
+
+
+
public class UserScramCredentialsDescription +extends Object
+
Representation of all SASL/SCRAM credentials associated with a user that can be retrieved, or an exception indicating + why credentials could not be retrieved.
+
+
See Also:
+
+ +
+
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      UserScramCredentialsDescription

      +
      public UserScramCredentialsDescription(String name, + List<ScramCredentialInfo> credentialInfos)
      +
      +
      Parameters:
      +
      name - the required user name
      +
      credentialInfos - the required SASL/SCRAM credential representations for the user
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    • +
      +

      name

      +
      public String name()
      +
      +
      Returns:
      +
      the user name
      +
      +
      +
    • +
    • +
      +

      credentialInfos

      +
      public List<ScramCredentialInfo> credentialInfos()
      +
      +
      Returns:
      +
      the always non-null/unmodifiable list of SASL/SCRAM credential representations for the user
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/package-summary.html b/static/41/javadoc/org/apache/kafka/clients/admin/package-summary.html new file mode 100644 index 000000000..98795e514 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/package-summary.html @@ -0,0 +1,841 @@ + + + + +org.apache.kafka.clients.admin (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+
+

Package org.apache.kafka.clients.admin

+
+
+
package org.apache.kafka.clients.admin
+
+
Provides a Kafka client for performing administrative operations (such as creating topics and configuring brokers) on a Kafka cluster.
+
+
+ +
+
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/admin/package-tree.html b/static/41/javadoc/org/apache/kafka/clients/admin/package-tree.html new file mode 100644 index 000000000..69a834bc9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/admin/package-tree.html @@ -0,0 +1,307 @@ + + + + +org.apache.kafka.clients.admin Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+
+

Hierarchy For Package org.apache.kafka.clients.admin

+Package Hierarchies: + +
+
+

Class Hierarchy

+ +
+
+

Interface Hierarchy

+ +
+
+

Enum Class Hierarchy

+ +
+
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/AcknowledgeType.html b/static/41/javadoc/org/apache/kafka/clients/consumer/AcknowledgeType.html new file mode 100644 index 000000000..0af99ff31 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/AcknowledgeType.html @@ -0,0 +1,288 @@ + + + + +AcknowledgeType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Enum Class AcknowledgeType

+
+
java.lang.Object +
java.lang.Enum<AcknowledgeType> +
org.apache.kafka.clients.consumer.AcknowledgeType
+
+
+
+
+
All Implemented Interfaces:
+
Serializable, Comparable<AcknowledgeType>, Constable
+
+
+
@Evolving +public enum AcknowledgeType +extends Enum<AcknowledgeType>
+
The acknowledge type is used with KafkaShareConsumer.acknowledge(ConsumerRecord, AcknowledgeType) to indicate + whether the record was consumed successfully.
+
+
+ +
+
+
    + +
  • +
    +

    Enum Constant Details

    +
      +
    • +
      +

      ACCEPT

      +
      public static final AcknowledgeType ACCEPT
      +
      The record was consumed successfully.
      +
      +
    • +
    • +
      +

      RELEASE

      +
      public static final AcknowledgeType RELEASE
      +
      The record was not consumed successfully. Release it for another delivery attempt.
      +
      +
    • +
    • +
      +

      REJECT

      +
      public static final AcknowledgeType REJECT
      +
      The record was not consumed successfully. Reject it and do not release it for another delivery attempt.
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Field Details

    +
      +
    • +
      +

      id

      +
      public final byte id
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      values

      +
      public static AcknowledgeType[] values()
      +
      Returns an array containing the constants of this enum class, in +the order they are declared.
      +
      +
      Returns:
      +
      an array containing the constants of this enum class, in the order they are declared
      +
      +
      +
    • +
    • +
      +

      valueOf

      +
      public static AcknowledgeType valueOf(String name)
      +
      Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
      +
      +
      Parameters:
      +
      name - the name of the enum constant to be returned.
      +
      Returns:
      +
      the enum constant with the specified name
      +
      Throws:
      +
      IllegalArgumentException - if this enum class has no constant with the specified name
      +
      NullPointerException - if the argument is null
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Enum<AcknowledgeType>
      +
      +
      +
    • +
    • +
      +

      forId

      +
      public static AcknowledgeType forId(byte id)
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/AcknowledgementCommitCallback.html b/static/41/javadoc/org/apache/kafka/clients/consumer/AcknowledgementCommitCallback.html new file mode 100644 index 000000000..7da80aca5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/AcknowledgementCommitCallback.html @@ -0,0 +1,149 @@ + + + + +AcknowledgementCommitCallback (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Interface AcknowledgementCommitCallback

+
+
+
+
@Evolving +public interface AcknowledgementCommitCallback
+
A callback interface that the user can implement to trigger custom actions when an acknowledgement completes. + The callback may be executed in any thread calling ShareConsumer.poll(java.time.Duration).
+
+
+
    + +
  • +
    +

    Method Summary

    +
    +
    +
    +
    +
    Modifier and Type
    +
    Method
    +
    Description
    +
    void
    +
    onComplete(Map<TopicIdPartition,Set<Long>> offsets, + Exception exception)
    +
    +
    A callback method the user can implement to provide asynchronous handling of acknowledgement completion.
    +
    +
    +
    +
    +
    +
  • +
+
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      onComplete

      +
      void onComplete(Map<TopicIdPartition,Set<Long>> offsets, + Exception exception)
      +
      A callback method the user can implement to provide asynchronous handling of acknowledgement completion. + This method will be called when the acknowledgement request sent to the server has been completed.
      +
      +
      Parameters:
      +
      offsets - A map of the offsets that this callback applies to.
      +
      exception - The exception thrown during processing of the request, or null if the acknowledgement completed successfully. +

      +

      Note that even if the exception is a retriable exception, the acknowledgement could not be completed and the + records need to be fetched again. The callback is called after any retries have been performed.

      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/CloseOptions.GroupMembershipOperation.html b/static/41/javadoc/org/apache/kafka/clients/consumer/CloseOptions.GroupMembershipOperation.html new file mode 100644 index 000000000..2967b5747 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/CloseOptions.GroupMembershipOperation.html @@ -0,0 +1,241 @@ + + + + +CloseOptions.GroupMembershipOperation (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Enum Class CloseOptions.GroupMembershipOperation

+
+
java.lang.Object +
java.lang.Enum<CloseOptions.GroupMembershipOperation> +
org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation
+
+
+
+
+
All Implemented Interfaces:
+
Serializable, Comparable<CloseOptions.GroupMembershipOperation>, Constable
+
+
+
Enclosing class:
+
CloseOptions
+
+
+
public static enum CloseOptions.GroupMembershipOperation +extends Enum<CloseOptions.GroupMembershipOperation>
+
Enum to specify the group membership operation upon leaving group. + +
    +
  • LEAVE_GROUP: means the consumer will leave the group.
  • +
  • REMAIN_IN_GROUP: means the consumer will remain in the group.
  • +
  • DEFAULT: Applies the default behavior: +
      +
    • For static members: The consumer will remain in the group.
    • +
    • For dynamic members: The consumer will leave the group.
    • +
    +
  • +
+
+
+ +
+
+
    + +
  • +
    +

    Enum Constant Details

    + +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      values

      +
      public static CloseOptions.GroupMembershipOperation[] values()
      +
      Returns an array containing the constants of this enum class, in +the order they are declared.
      +
      +
      Returns:
      +
      an array containing the constants of this enum class, in the order they are declared
      +
      +
      +
    • +
    • +
      +

      valueOf

      +
      public static CloseOptions.GroupMembershipOperation valueOf(String name)
      +
      Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
      +
      +
      Parameters:
      +
      name - the name of the enum constant to be returned.
      +
      Returns:
      +
      the enum constant with the specified name
      +
      Throws:
      +
      IllegalArgumentException - if this enum class has no constant with the specified name
      +
      NullPointerException - if the argument is null
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/CloseOptions.html b/static/41/javadoc/org/apache/kafka/clients/consumer/CloseOptions.html new file mode 100644 index 000000000..e3fff3648 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/CloseOptions.html @@ -0,0 +1,230 @@ + + + + +CloseOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class CloseOptions

+
+
java.lang.Object +
org.apache.kafka.clients.consumer.CloseOptions
+
+
+
+
public class CloseOptions +extends Object
+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      timeout

      +
      public static CloseOptions timeout(Duration timeout)
      +
      Static method to create a CloseOptions with a custom timeout.
      +
      +
      Parameters:
      +
      timeout - the maximum time to wait for the consumer to close.
      +
      Returns:
      +
      a new CloseOptions instance with the specified timeout.
      +
      +
      +
    • +
    • +
      +

      groupMembershipOperation

      +
      public static CloseOptions groupMembershipOperation(CloseOptions.GroupMembershipOperation operation)
      +
      Static method to create a CloseOptions with a specified group membership operation.
      +
      +
      Parameters:
      +
      operation - the group membership operation to apply. Must be one of LEAVE_GROUP, REMAIN_IN_GROUP, + or DEFAULT.
      +
      Returns:
      +
      a new CloseOptions instance with the specified group membership operation.
      +
      +
      +
    • +
    • +
      +

      withTimeout

      +
      public CloseOptions withTimeout(Duration timeout)
      +
      Fluent method to set the timeout for the close process.
      +
      +
      Parameters:
      +
      timeout - the maximum time to wait for the consumer to close. If null, the default timeout will be used.
      +
      Returns:
      +
      this CloseOptions instance.
      +
      +
      +
    • +
    • +
      +

      withGroupMembershipOperation

      +
      public CloseOptions withGroupMembershipOperation(CloseOptions.GroupMembershipOperation operation)
      +
      Fluent method to set the group membership operation upon shutdown.
      +
      +
      Parameters:
      +
      operation - the group membership operation to apply. Must be one of LEAVE_GROUP, REMAIN_IN_GROUP, or DEFAULT.
      +
      Returns:
      +
      this CloseOptions instance.
      +
      +
      +
    • +
    • +
      +

      groupMembershipOperation

      +
      public CloseOptions.GroupMembershipOperation groupMembershipOperation()
      +
      +
    • +
    • +
      +

      timeout

      +
      public Optional<Duration> timeout()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/CommitFailedException.html b/static/41/javadoc/org/apache/kafka/clients/consumer/CommitFailedException.html new file mode 100644 index 000000000..24e3a0f6a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/CommitFailedException.html @@ -0,0 +1,162 @@ + + + + +CommitFailedException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class CommitFailedException

+
+ +
+
+
All Implemented Interfaces:
+
Serializable
+
+
+
public class CommitFailedException +extends KafkaException
+
This exception is raised when an offset commit with KafkaConsumer.commitSync() fails + with an unrecoverable error. This can happen when a group rebalance completes before the commit + could be successfully applied. In this case, the commit cannot generally be retried because some + of the partitions may have already been assigned to another member in the group.
+
+
See Also:
+
+ +
+
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      CommitFailedException

      +
      public CommitFailedException(String message)
      +
      +
    • +
    • +
      +

      CommitFailedException

      +
      public CommitFailedException()
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/Consumer.html b/static/41/javadoc/org/apache/kafka/clients/consumer/Consumer.html new file mode 100644 index 000000000..58fc80057 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/Consumer.html @@ -0,0 +1,1029 @@ + + + + +Consumer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Interface Consumer<K,V>

+
+
+
+
All Superinterfaces:
+
AutoCloseable, Closeable
+
+
+
All Known Implementing Classes:
+
KafkaConsumer, MockConsumer
+
+
+
public interface Consumer<K,V> +extends Closeable
+
+
See Also:
+
+ +
+
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerConfig.html b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerConfig.html new file mode 100644 index 000000000..d3258e6cb --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerConfig.html @@ -0,0 +1,1454 @@ + + + + +ConsumerConfig (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ConsumerConfig

+
+
java.lang.Object +
org.apache.kafka.common.config.AbstractConfig +
org.apache.kafka.clients.consumer.ConsumerConfig
+
+
+
+
+
public class ConsumerConfig +extends AbstractConfig
+
The consumer configuration keys
+
+
+ +
+
+
    + +
  • +
    +

    Field Details

    +
      +
    • +
      +

      ASSIGN_FROM_SUBSCRIBED_ASSIGNORS

      +
      public static final List<String> ASSIGN_FROM_SUBSCRIBED_ASSIGNORS
      +
      +
    • +
    • +
      +

      GROUP_ID_CONFIG

      +
      public static final String GROUP_ID_CONFIG
      +
      group.id
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      GROUP_INSTANCE_ID_CONFIG

      +
      public static final String GROUP_INSTANCE_ID_CONFIG
      +
      group.instance.id
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      MAX_POLL_RECORDS_CONFIG

      +
      public static final String MAX_POLL_RECORDS_CONFIG
      +
      max.poll.records
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      DEFAULT_MAX_POLL_RECORDS

      +
      public static final int DEFAULT_MAX_POLL_RECORDS
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      MAX_POLL_INTERVAL_MS_CONFIG

      +
      public static final String MAX_POLL_INTERVAL_MS_CONFIG
      +
      max.poll.interval.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      SESSION_TIMEOUT_MS_CONFIG

      +
      public static final String SESSION_TIMEOUT_MS_CONFIG
      +
      session.timeout.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      HEARTBEAT_INTERVAL_MS_CONFIG

      +
      public static final String HEARTBEAT_INTERVAL_MS_CONFIG
      +
      heartbeat.interval.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      GROUP_PROTOCOL_CONFIG

      +
      public static final String GROUP_PROTOCOL_CONFIG
      +
      group.protocol
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      DEFAULT_GROUP_PROTOCOL

      +
      public static final String DEFAULT_GROUP_PROTOCOL
      +
      +
    • +
    • +
      +

      GROUP_PROTOCOL_DOC

      +
      public static final String GROUP_PROTOCOL_DOC
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      GROUP_REMOTE_ASSIGNOR_CONFIG

      +
      public static final String GROUP_REMOTE_ASSIGNOR_CONFIG
      +
      group.remote.assignor
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      DEFAULT_GROUP_REMOTE_ASSIGNOR

      +
      public static final String DEFAULT_GROUP_REMOTE_ASSIGNOR
      +
      +
    • +
    • +
      +

      GROUP_REMOTE_ASSIGNOR_DOC

      +
      public static final String GROUP_REMOTE_ASSIGNOR_DOC
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      BOOTSTRAP_SERVERS_CONFIG

      +
      public static final String BOOTSTRAP_SERVERS_CONFIG
      +
      bootstrap.servers
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      CLIENT_DNS_LOOKUP_CONFIG

      +
      public static final String CLIENT_DNS_LOOKUP_CONFIG
      +
      client.dns.lookup
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      ENABLE_AUTO_COMMIT_CONFIG

      +
      public static final String ENABLE_AUTO_COMMIT_CONFIG
      +
      enable.auto.commit
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      AUTO_COMMIT_INTERVAL_MS_CONFIG

      +
      public static final String AUTO_COMMIT_INTERVAL_MS_CONFIG
      +
      auto.commit.interval.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      PARTITION_ASSIGNMENT_STRATEGY_CONFIG

      +
      public static final String PARTITION_ASSIGNMENT_STRATEGY_CONFIG
      +
      partition.assignment.strategy
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      AUTO_OFFSET_RESET_CONFIG

      +
      public static final String AUTO_OFFSET_RESET_CONFIG
      +
      auto.offset.reset
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      AUTO_OFFSET_RESET_DOC

      +
      public static final String AUTO_OFFSET_RESET_DOC
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      FETCH_MIN_BYTES_CONFIG

      +
      public static final String FETCH_MIN_BYTES_CONFIG
      +
      fetch.min.bytes
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      DEFAULT_FETCH_MIN_BYTES

      +
      public static final int DEFAULT_FETCH_MIN_BYTES
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      FETCH_MAX_BYTES_CONFIG

      +
      public static final String FETCH_MAX_BYTES_CONFIG
      +
      fetch.max.bytes
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      DEFAULT_FETCH_MAX_BYTES

      +
      public static final int DEFAULT_FETCH_MAX_BYTES
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      FETCH_MAX_WAIT_MS_CONFIG

      +
      public static final String FETCH_MAX_WAIT_MS_CONFIG
      +
      fetch.max.wait.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      DEFAULT_FETCH_MAX_WAIT_MS

      +
      public static final int DEFAULT_FETCH_MAX_WAIT_MS
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      METADATA_MAX_AGE_CONFIG

      +
      public static final String METADATA_MAX_AGE_CONFIG
      +
      metadata.max.age.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      MAX_PARTITION_FETCH_BYTES_CONFIG

      +
      public static final String MAX_PARTITION_FETCH_BYTES_CONFIG
      +
      max.partition.fetch.bytes
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      DEFAULT_MAX_PARTITION_FETCH_BYTES

      +
      public static final int DEFAULT_MAX_PARTITION_FETCH_BYTES
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      SEND_BUFFER_CONFIG

      +
      public static final String SEND_BUFFER_CONFIG
      +
      send.buffer.bytes
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      RECEIVE_BUFFER_CONFIG

      +
      public static final String RECEIVE_BUFFER_CONFIG
      +
      receive.buffer.bytes
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      CLIENT_ID_CONFIG

      +
      public static final String CLIENT_ID_CONFIG
      +
      client.id
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      CLIENT_RACK_CONFIG

      +
      public static final String CLIENT_RACK_CONFIG
      +
      client.rack
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      DEFAULT_CLIENT_RACK

      +
      public static final String DEFAULT_CLIENT_RACK
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      RECONNECT_BACKOFF_MS_CONFIG

      +
      public static final String RECONNECT_BACKOFF_MS_CONFIG
      +
      reconnect.backoff.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      RECONNECT_BACKOFF_MAX_MS_CONFIG

      +
      public static final String RECONNECT_BACKOFF_MAX_MS_CONFIG
      +
      reconnect.backoff.max.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      RETRY_BACKOFF_MS_CONFIG

      +
      public static final String RETRY_BACKOFF_MS_CONFIG
      +
      retry.backoff.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      ENABLE_METRICS_PUSH_CONFIG

      +
      public static final String ENABLE_METRICS_PUSH_CONFIG
      +
      enable.metrics.push
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      ENABLE_METRICS_PUSH_DOC

      +
      public static final String ENABLE_METRICS_PUSH_DOC
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      RETRY_BACKOFF_MAX_MS_CONFIG

      +
      public static final String RETRY_BACKOFF_MAX_MS_CONFIG
      +
      retry.backoff.max.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      METRICS_SAMPLE_WINDOW_MS_CONFIG

      +
      public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG
      +
      metrics.sample.window.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      METRICS_NUM_SAMPLES_CONFIG

      +
      public static final String METRICS_NUM_SAMPLES_CONFIG
      +
      metrics.num.samples
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      METRICS_RECORDING_LEVEL_CONFIG

      +
      public static final String METRICS_RECORDING_LEVEL_CONFIG
      +
      metrics.log.level
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      METRIC_REPORTER_CLASSES_CONFIG

      +
      public static final String METRIC_REPORTER_CLASSES_CONFIG
      +
      metric.reporters
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      CHECK_CRCS_CONFIG

      +
      public static final String CHECK_CRCS_CONFIG
      +
      check.crcs
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      KEY_DESERIALIZER_CLASS_CONFIG

      +
      public static final String KEY_DESERIALIZER_CLASS_CONFIG
      +
      key.deserializer
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      KEY_DESERIALIZER_CLASS_DOC

      +
      public static final String KEY_DESERIALIZER_CLASS_DOC
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      VALUE_DESERIALIZER_CLASS_CONFIG

      +
      public static final String VALUE_DESERIALIZER_CLASS_CONFIG
      +
      value.deserializer
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      VALUE_DESERIALIZER_CLASS_DOC

      +
      public static final String VALUE_DESERIALIZER_CLASS_DOC
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG

      +
      public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG
      +
      socket.connection.setup.timeout.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG

      +
      public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG
      +
      socket.connection.setup.timeout.max.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      CONNECTIONS_MAX_IDLE_MS_CONFIG

      +
      public static final String CONNECTIONS_MAX_IDLE_MS_CONFIG
      +
      connections.max.idle.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      REQUEST_TIMEOUT_MS_CONFIG

      +
      public static final String REQUEST_TIMEOUT_MS_CONFIG
      +
      request.timeout.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      DEFAULT_API_TIMEOUT_MS_CONFIG

      +
      public static final String DEFAULT_API_TIMEOUT_MS_CONFIG
      +
      default.api.timeout.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      INTERCEPTOR_CLASSES_CONFIG

      +
      public static final String INTERCEPTOR_CLASSES_CONFIG
      +
      interceptor.classes
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      INTERCEPTOR_CLASSES_DOC

      +
      public static final String INTERCEPTOR_CLASSES_DOC
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      EXCLUDE_INTERNAL_TOPICS_CONFIG

      +
      public static final String EXCLUDE_INTERNAL_TOPICS_CONFIG
      +
      exclude.internal.topics
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      DEFAULT_EXCLUDE_INTERNAL_TOPICS

      +
      public static final boolean DEFAULT_EXCLUDE_INTERNAL_TOPICS
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      ISOLATION_LEVEL_CONFIG

      +
      public static final String ISOLATION_LEVEL_CONFIG
      +
      isolation.level
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      ISOLATION_LEVEL_DOC

      +
      public static final String ISOLATION_LEVEL_DOC
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      DEFAULT_ISOLATION_LEVEL

      +
      public static final String DEFAULT_ISOLATION_LEVEL
      +
      +
    • +
    • +
      +

      ALLOW_AUTO_CREATE_TOPICS_CONFIG

      +
      public static final String ALLOW_AUTO_CREATE_TOPICS_CONFIG
      +
      allow.auto.create.topics
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      DEFAULT_ALLOW_AUTO_CREATE_TOPICS

      +
      public static final boolean DEFAULT_ALLOW_AUTO_CREATE_TOPICS
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      SECURITY_PROVIDERS_CONFIG

      +
      public static final String SECURITY_PROVIDERS_CONFIG
      +
      security.providers
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      SHARE_ACKNOWLEDGEMENT_MODE_CONFIG

      +
      public static final String SHARE_ACKNOWLEDGEMENT_MODE_CONFIG
      +
      share.acknowledgement.mode
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ConsumerConfig

      +
      public ConsumerConfig(Properties props)
      +
      +
    • +
    • +
      +

      ConsumerConfig

      +
      public ConsumerConfig(Map<String,Object> props)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    + +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerGroupMetadata.html b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerGroupMetadata.html new file mode 100644 index 000000000..61ab0baf3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerGroupMetadata.html @@ -0,0 +1,240 @@ + + + + +ConsumerGroupMetadata (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ConsumerGroupMetadata

+
+
java.lang.Object +
org.apache.kafka.clients.consumer.ConsumerGroupMetadata
+
+
+
+
public class ConsumerGroupMetadata +extends Object
+
A metadata struct containing the consumer group information. + Note: Any change to this class is considered public and requires a KIP.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ConsumerGroupMetadata

      +
      public ConsumerGroupMetadata(String groupId, + int generationId, + String memberId, + Optional<String> groupInstanceId)
      +
      +
    • +
    • +
      +

      ConsumerGroupMetadata

      +
      public ConsumerGroupMetadata(String groupId)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      groupId

      +
      public String groupId()
      +
      +
    • +
    • +
      +

      generationId

      +
      public int generationId()
      +
      +
    • +
    • +
      +

      memberId

      +
      public String memberId()
      +
      +
    • +
    • +
      +

      groupInstanceId

      +
      public Optional<String> groupInstanceId()
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerInterceptor.html b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerInterceptor.html new file mode 100644 index 000000000..cf38034f3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerInterceptor.html @@ -0,0 +1,211 @@ + + + + +ConsumerInterceptor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Interface ConsumerInterceptor<K,V>

+
+
+
+
All Superinterfaces:
+
AutoCloseable, Configurable
+
+
+
public interface ConsumerInterceptor<K,V> +extends Configurable, AutoCloseable
+
A plugin interface that allows you to intercept (and possibly mutate) records received by the consumer. A primary use-case + is for third-party components to hook into the consumer applications for custom monitoring, logging, etc. + +

+ This class will get consumer config properties via configure() method, including clientId assigned + by KafkaConsumer if not specified in the consumer config. The interceptor implementation needs to be aware that it will be + sharing consumer config namespace with other interceptors and serializers, and ensure that there are no conflicts. +

+ Exceptions thrown by ConsumerInterceptor methods will be caught, logged, but not propagated further. As a result, if + the user configures the interceptor with the wrong key and value type parameters, the consumer will not throw an exception, + just log the errors. +

+ ConsumerInterceptor callbacks are called from the same thread that invokes + KafkaConsumer.poll(java.time.Duration). +

+ Implement ClusterResourceListener to receive cluster metadata once it's available. Please see the class documentation for ClusterResourceListener for more information. + Implement Monitorable to enable the interceptor to register metrics. The following tags are automatically added to + all metrics registered: config set to interceptor.classes, and class set to the ConsumerInterceptor class name.

+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      onConsume

      +
      ConsumerRecords<K,V> onConsume(ConsumerRecords<K,V> records)
      +
      This is called just before the records are returned by + KafkaConsumer.poll(java.time.Duration) +

      + This method is allowed to modify consumer records, in which case the new records will be + returned. There is no limitation on number of records that could be returned from this + method. I.e., the interceptor can filter the records or generate new records. +

      + Any exception thrown by this method will be caught by the caller, logged, but not propagated to the client. +

      + Since the consumer may run multiple interceptors, a particular interceptor's onConsume() callback will be called + in the order specified by ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG. + The first interceptor in the list gets the consumed records, the following interceptor will be passed the records returned + by the previous interceptor, and so on. Since interceptors are allowed to modify records, interceptors may potentially get + the records already modified by other interceptors. However, building a pipeline of mutable interceptors that depend on the output + of the previous interceptor is discouraged, because of potential side-effects caused by interceptors potentially failing + to modify the record and throwing an exception. If one of the interceptors in the list throws an exception from onConsume(), + the exception is caught, logged, and the next interceptor is called with the records returned by the last successful interceptor + in the list, or otherwise the original consumed records.

      +
      +
      Parameters:
      +
      records - records to be consumed by the client or records returned by the previous interceptors in the list.
      +
      Returns:
      +
      records that are either modified by the interceptor or same as records passed to this method.
      +
      +
      +
    • +
    • +
      +

      onCommit

      +
      void onCommit(Map<TopicPartition,OffsetAndMetadata> offsets)
      +
      This is called when offsets get committed. +

      + Any exception thrown by this method will be ignored by the caller.

      +
      +
      Parameters:
      +
      offsets - A map of offsets by partition with associated metadata
      +
      +
      +
    • +
    • +
      +

      close

      +
      void close()
      +
      This is called when interceptor is closed
      +
      +
      Specified by:
      +
      close in interface AutoCloseable
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.Assignment.html b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.Assignment.html new file mode 100644 index 000000000..c1887ad91 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.Assignment.html @@ -0,0 +1,194 @@ + + + + +ConsumerPartitionAssignor.Assignment (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ConsumerPartitionAssignor.Assignment

+
+
java.lang.Object +
org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment
+
+
+
+
Enclosing interface:
+
ConsumerPartitionAssignor
+
+
+
public static final class ConsumerPartitionAssignor.Assignment +extends Object
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.GroupAssignment.html b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.GroupAssignment.html new file mode 100644 index 000000000..41b413763 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.GroupAssignment.html @@ -0,0 +1,175 @@ + + + + +ConsumerPartitionAssignor.GroupAssignment (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ConsumerPartitionAssignor.GroupAssignment

+
+
java.lang.Object +
org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupAssignment
+
+
+
+
Enclosing interface:
+
ConsumerPartitionAssignor
+
+
+
public static final class ConsumerPartitionAssignor.GroupAssignment +extends Object
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.GroupSubscription.html b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.GroupSubscription.html new file mode 100644 index 000000000..f5bbe4400 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.GroupSubscription.html @@ -0,0 +1,175 @@ + + + + +ConsumerPartitionAssignor.GroupSubscription (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ConsumerPartitionAssignor.GroupSubscription

+
+
java.lang.Object +
org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription
+
+
+
+
Enclosing interface:
+
ConsumerPartitionAssignor
+
+
+
public static final class ConsumerPartitionAssignor.GroupSubscription +extends Object
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.RebalanceProtocol.html b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.RebalanceProtocol.html new file mode 100644 index 000000000..c96beb527 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.RebalanceProtocol.html @@ -0,0 +1,254 @@ + + + + +ConsumerPartitionAssignor.RebalanceProtocol (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Enum Class ConsumerPartitionAssignor.RebalanceProtocol

+
+
java.lang.Object +
java.lang.Enum<ConsumerPartitionAssignor.RebalanceProtocol> +
org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.RebalanceProtocol
+
+
+
+
+
All Implemented Interfaces:
+
Serializable, Comparable<ConsumerPartitionAssignor.RebalanceProtocol>, Constable
+
+
+
Enclosing interface:
+
ConsumerPartitionAssignor
+
+
+
public static enum ConsumerPartitionAssignor.RebalanceProtocol +extends Enum<ConsumerPartitionAssignor.RebalanceProtocol>
+
The rebalance protocol defines partition assignment and revocation semantics. The purpose is to establish a + consistent set of rules that all consumers in a group follow in order to transfer ownership of a partition. + ConsumerPartitionAssignor implementors can claim supporting one or more rebalance protocols via the + ConsumerPartitionAssignor.supportedProtocols(), and it is their responsibility to respect the rules + of those protocols in their ConsumerPartitionAssignor.assign(Cluster, GroupSubscription) implementations. + Failures to follow the rules of the supported protocols would lead to runtime error or undefined behavior. + + The EAGER rebalance protocol requires a consumer to always revoke all its owned + partitions before participating in a rebalance event. It therefore allows a complete reshuffling of the assignment. + + COOPERATIVE rebalance protocol allows a consumer to retain its currently owned + partitions before participating in a rebalance event. The assignor should not reassign any owned partitions + immediately, but instead may indicate consumers the need for partition revocation so that the revoked + partitions can be reassigned to other consumers in the next rebalance event. This is designed for sticky assignment + logic which attempts to minimize partition reassignment with cooperative adjustments.
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.Subscription.html b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.Subscription.html new file mode 100644 index 000000000..4855dc329 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.Subscription.html @@ -0,0 +1,267 @@ + + + + +ConsumerPartitionAssignor.Subscription (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ConsumerPartitionAssignor.Subscription

+
+
java.lang.Object +
org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription
+
+
+
+
Enclosing interface:
+
ConsumerPartitionAssignor
+
+
+
public static final class ConsumerPartitionAssignor.Subscription +extends Object
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.html b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.html new file mode 100644 index 000000000..3cb059a15 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.html @@ -0,0 +1,284 @@ + + + + +ConsumerPartitionAssignor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Interface ConsumerPartitionAssignor

+
+
+
+
All Known Implementing Classes:
+
org.apache.kafka.clients.consumer.internals.AbstractPartitionAssignor, org.apache.kafka.clients.consumer.internals.AbstractStickyAssignor, CooperativeStickyAssignor, RangeAssignor, RoundRobinAssignor, StickyAssignor
+
+
+
public interface ConsumerPartitionAssignor
+
This interface is used to define custom partition assignment for use in + KafkaConsumer. Members of the consumer group subscribe + to the topics they are interested in and forward their subscriptions to a Kafka broker serving + as the group coordinator. The coordinator selects one member to perform the group assignment and + propagates the subscriptions of all members to it. Then assign(Cluster, GroupSubscription) is called + to perform the assignment and the results are forwarded back to each respective members +

+ In some cases, it is useful to forward additional metadata to the assignor in order to make + assignment decisions. For this, you can override subscriptionUserData(Set) and provide custom + userData in the returned Subscription. For example, to have a rack-aware assignor, an implementation + can use this user data to forward the rackId belonging to each member. +

+ The implementation can extend Configurable to get configs from consumer.

+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerRebalanceListener.html b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerRebalanceListener.html new file mode 100644 index 000000000..1ca5316b1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerRebalanceListener.html @@ -0,0 +1,318 @@ + + + + +ConsumerRebalanceListener (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Interface ConsumerRebalanceListener

+
+
+
+
public interface ConsumerRebalanceListener
+
A callback interface that the user can implement to trigger custom actions when the set of partitions assigned to the + consumer changes. +

+ This is applicable when the consumer is having Kafka auto-manage group membership. If the consumer directly assigns partitions, + those partitions will never be reassigned and this callback is not applicable. +

+ When Kafka is managing the group membership, a partition re-assignment will be triggered any time the members of the group change or the subscription + of the members changes. This can occur when processes die, new process instances are added or old instances come back to life after failure. + Partition re-assignments can also be triggered by changes affecting the subscribed topics (e.g. when the number of partitions is + administratively adjusted). +

+ There are many uses for this functionality. One common use is saving offsets in a custom store. By saving offsets in + the onPartitionsRevoked(Collection) call we can ensure that any time partition assignment changes + the offset gets saved. +

+ Another use is flushing out any kind of cache of intermediate results the consumer may be keeping. For example, + consider a case where the consumer is subscribed to a topic containing user page views, and the goal is to count the + number of page views per user for each five minute window. Let's say the topic is partitioned by the user id so that + all events for a particular user go to a single consumer instance. The consumer can keep in memory a running + tally of actions per user and only flush these out to a remote data store when its cache gets too big. However if a + partition is reassigned it may want to automatically trigger a flush of this cache, before the new owner takes over + consumption. +

+ This callback will only execute in the user thread as part of the poll(long) call + whenever partition assignment changes. +

+ Under normal conditions, if a partition is reassigned from one consumer to another, then the old consumer will + always invoke onPartitionsRevoked for that partition prior to the new consumer + invoking onPartitionsAssigned for the same partition. So if offsets or other state is saved in the + onPartitionsRevoked call by one consumer member, it will be always accessible by the time the + other consumer member taking over that partition and triggering its onPartitionsAssigned callback to load the state. +

+ You can think of revocation as a graceful way to give up ownership of a partition. In some cases, the consumer may not have an opportunity to do so. + For example, if the session times out, then the partitions may be reassigned before we have a chance to revoke them gracefully. + For this case, we have a third callback onPartitionsLost(Collection). The difference between this function and + onPartitionsRevoked(Collection) is that upon invocation of onPartitionsLost(Collection), the partitions + may already be owned by some other members in the group and therefore users would not be able to commit its consumed offsets for example. + Users could implement these two functions differently (by default, + onPartitionsLost(Collection) will be calling onPartitionsRevoked(Collection) directly); for example, in the + onPartitionsLost(Collection) we should not need to store the offsets since we know these partitions are no longer owned by the consumer + at that time. +

+ During a rebalance event, the onPartitionsAssigned function will always be triggered exactly once when + the rebalance completes. That is, even if there is no newly assigned partitions for a consumer member, its onPartitionsAssigned + will still be triggered with an empty collection of partitions. As a result this function can be used also to notify when a rebalance event has happened. + With eager rebalancing, onPartitionsRevoked(Collection) will always be called at the start of a rebalance. On the other hand, onPartitionsLost(Collection) + will only be called when there were non-empty partitions that were lost. + With cooperative rebalancing, onPartitionsRevoked(Collection) and onPartitionsLost(Collection) + will only be triggered when there are non-empty partitions revoked or lost from this consumer member during a rebalance event. +

+ It is possible + for a WakeupException or InterruptException + to be raised from one of these nested invocations. In this case, the exception will be propagated to the current + invocation of KafkaConsumer.poll(java.time.Duration) in which this callback is being executed. This means it is not + necessary to catch these exceptions and re-attempt to wakeup or interrupt the consumer thread. + Also if the callback function implementation itself throws an exception, this exception will be propagated to the current + invocation of KafkaConsumer.poll(java.time.Duration) as well. +

+ Note that callbacks only serve as notification of an assignment change. + They cannot be used to express acceptance of the change. + Hence throwing an exception from a callback does not affect the assignment in any way, + as it will be propagated all the way up to the KafkaConsumer.poll(java.time.Duration) call. + If user captures the exception in the caller, the callback is still assumed successful and no further retries will be attempted. +

+ + Here is pseudo-code for a callback implementation for saving offsets: +

+ 
+   public class SaveOffsetsOnRebalance implements ConsumerRebalanceListener {
+       private Consumer<?,?> consumer;
+
+       public SaveOffsetsOnRebalance(Consumer<?,?> consumer) {
+           this.consumer = consumer;
+       }
+
+       public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
+           // save the offsets in an external store using some custom code not described here
+           for(TopicPartition partition: partitions)
+              saveOffsetInExternalStore(consumer.position(partition));
+       }
+
+       public void onPartitionsLost(Collection<TopicPartition> partitions) {
+           // do not need to save the offsets since these partitions are probably owned by other consumers already
+       }
+
+       public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
+           // read the offsets from an external store using some custom code not described here
+           for(TopicPartition partition: partitions)
+              consumer.seek(partition, readOffsetFromExternalStore(partition));
+       }
+   }
+ 
+ 
+
+
+
    + +
  • +
    +

    Method Summary

    +
    +
    +
    +
    +
    Modifier and Type
    +
    Method
    +
    Description
    +
    void
    + +
    +
    A callback method the user can implement to provide handling of customized offsets on completion of a successful + partition re-assignment.
    +
    +
    default void
    + +
    +
    A callback method you can implement to provide handling of cleaning up resources for partitions that have already + been reassigned to other consumers.
    +
    +
    void
    + +
    +
    A callback method the user can implement to provide handling of offset commits to a customized store.
    +
    +
    +
    +
    +
    +
  • +
+
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      onPartitionsRevoked

      +
      void onPartitionsRevoked(Collection<TopicPartition> partitions)
      +
      A callback method the user can implement to provide handling of offset commits to a customized store. + This method will be called during a rebalance operation when the consumer has to give up some partitions. + It can also be called when consumer is being closed (KafkaConsumer.close(Duration)) + or is unsubscribing (KafkaConsumer.unsubscribe()). + It is recommended that offsets should be committed in this callback to either Kafka or a + custom offset store to prevent duplicate data. +

      + In eager rebalancing, it will always be called at the start of a rebalance and after the consumer stops fetching data. + In cooperative rebalancing, it will be called at the end of a rebalance on the set of partitions being revoked iff the set is non-empty. + For examples on usage of this API, see Usage Examples section of KafkaConsumer. +

      + It is common for the revocation callback to use the consumer instance in order to commit offsets. It is possible + for a WakeupException or InterruptException + to be raised from one of these nested invocations. In this case, the exception will be propagated to the current + invocation of KafkaConsumer.poll(java.time.Duration) in which this callback is being executed. This means it is not + necessary to catch these exceptions and re-attempt to wakeup or interrupt the consumer thread.

      +
      +
      Parameters:
      +
      partitions - The list of partitions that were assigned to the consumer and now need to be revoked (may not + include all currently assigned partitions, i.e. there may still be some partitions left)
      +
      Throws:
      +
      WakeupException - If raised from a nested call to KafkaConsumer
      +
      InterruptException - If raised from a nested call to KafkaConsumer
      +
      +
      +
    • +
    • +
      +

      onPartitionsAssigned

      +
      void onPartitionsAssigned(Collection<TopicPartition> partitions)
      +
      A callback method the user can implement to provide handling of customized offsets on completion of a successful + partition re-assignment. This method will be called after the partition re-assignment completes and before the + consumer starts fetching data, and only as the result of a poll(long) call. +

      + It is guaranteed that under normal conditions all the processes in a consumer group will execute their + onPartitionsRevoked(Collection) callback before any instance executes its + onPartitionsAssigned(Collection) callback. During exceptional scenarios, partitions may be migrated + without the old owner being notified (i.e. their onPartitionsRevoked(Collection) callback not triggered), + and later when the old owner consumer realized this event, the onPartitionsLost(Collection) callback + will be triggered by the consumer then. +

      + It is common for the assignment callback to use the consumer instance in order to query offsets. It is possible + for a WakeupException or InterruptException + to be raised from one of these nested invocations. In this case, the exception will be propagated to the current + invocation of KafkaConsumer.poll(java.time.Duration) in which this callback is being executed. This means it is not + necessary to catch these exceptions and re-attempt to wakeup or interrupt the consumer thread.

      +
      +
      Parameters:
      +
      partitions - The list of partitions that are now assigned to the consumer (previously owned partitions will + NOT be included, i.e. this list will only include newly added partitions)
      +
      Throws:
      +
      WakeupException - If raised from a nested call to KafkaConsumer
      +
      InterruptException - If raised from a nested call to KafkaConsumer
      +
      +
      +
    • +
    • +
      +

      onPartitionsLost

      +
      default void onPartitionsLost(Collection<TopicPartition> partitions)
      +
      A callback method you can implement to provide handling of cleaning up resources for partitions that have already + been reassigned to other consumers. This method will not be called during normal execution as the owned partitions would + first be revoked by calling the onPartitionsRevoked(java.util.Collection<org.apache.kafka.common.TopicPartition>), before being reassigned + to other consumers during a rebalance event. However, during exceptional scenarios when the consumer realized that it + does not own this partition any longer, i.e. not revoked via a normal rebalance event, then this method would be invoked. +

      + For example, this function is called if a consumer's session timeout has expired, or if a fatal error has been + received indicating the consumer is no longer part of the group. +

      + By default it will just trigger onPartitionsRevoked(java.util.Collection<org.apache.kafka.common.TopicPartition>); for users who want to distinguish + the handling logic of revoked partitions v.s. lost partitions, they can override the default implementation. +

      + It is possible + for a WakeupException or InterruptException + to be raised from one of these nested invocations. In this case, the exception will be propagated to the current + invocation of KafkaConsumer.poll(java.time.Duration) in which this callback is being executed. This means it is not + necessary to catch these exceptions and re-attempt to wakeup or interrupt the consumer thread.

      +
      +
      Parameters:
      +
      partitions - The list of partitions that were assigned to the consumer and now have been reassigned + to other consumers. With the current protocol this will always include all of the consumer's + previously assigned partitions, but this may change in future protocols (ie there would still + be some partitions left)
      +
      Throws:
      +
      WakeupException - If raised from a nested call to KafkaConsumer
      +
      InterruptException - If raised from a nested call to KafkaConsumer
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerRecord.html b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerRecord.html new file mode 100644 index 000000000..e3bd7a0ac --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerRecord.html @@ -0,0 +1,508 @@ + + + + +ConsumerRecord (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ConsumerRecord<K,V>

+
+
java.lang.Object +
org.apache.kafka.clients.consumer.ConsumerRecord<K,V>
+
+
+
+
public class ConsumerRecord<K,V> +extends Object
+
A key/value pair to be received from Kafka. This also consists of a topic name and + a partition number from which the record is being received, an offset that points + to the record in a Kafka partition, and a timestamp as marked by the corresponding ProducerRecord. +

+ +

Thread Safety

+ This consumer record is not thread-safe. Concurrent access to a ConsumerRecord instance by + multiple threads may result in undefined behavior, including but not limited to the following: +
    +
  • Throwing ConcurrentModificationException (e.g., when concurrently modifying headers()).
  • +
  • Data corruption or logical errors (e.g., inconsistent state of headers or value).
  • +
  • Visibility issues (e.g., modifications by one thread not being visible to another thread).
  • +
+ +

+ In particular, the headers() method returns a mutable collection of headers. If multiple + threads access or modify these headers concurrently, it may lead to race conditions or inconsistent + states. It is the responsibility of the user to ensure that multi-threaded access is properly synchronized. + +

+ Refer to the KafkaConsumer documentation for more details on multi-threaded consumption and processing strategies.

+
+
+
    + +
  • +
    +

    Field Summary

    +
    Fields
    +
    +
    Modifier and Type
    +
    Field
    +
    Description
    +
    static final long
    + +
     
    +
    static final int
    + +
     
    +
    +
    +
  • + +
  • +
    +

    Constructor Summary

    +
    Constructors
    +
    +
    Constructor
    +
    Description
    +
    ConsumerRecord(String topic, + int partition, + long offset, + long timestamp, + org.apache.kafka.common.record.TimestampType timestampType, + int serializedKeySize, + int serializedValueSize, + K key, + V value, + Headers headers, + Optional<Integer> leaderEpoch)
    +
    +
    Creates a record to be received from a specified topic and partition.
    +
    +
    ConsumerRecord(String topic, + int partition, + long offset, + long timestamp, + org.apache.kafka.common.record.TimestampType timestampType, + int serializedKeySize, + int serializedValueSize, + K key, + V value, + Headers headers, + Optional<Integer> leaderEpoch, + Optional<Short> deliveryCount)
    +
    +
    Creates a record to be received from a specified topic and partition.
    +
    +
    ConsumerRecord(String topic, + int partition, + long offset, + K key, + V value)
    +
    +
    Creates a record to be received from a specified topic and partition (provided for + compatibility with Kafka 0.9 before the message format supported timestamps and before + serialized metadata were exposed).
    +
    +
    +
    +
  • + +
  • +
    +

    Method Summary

    +
    +
    +
    +
    +
    Modifier and Type
    +
    Method
    +
    Description
    + + +
    +
    Get the delivery count for the record if available.
    +
    + + +
    +
    The headers (never null)
    +
    + +
    key()
    +
    +
    The key (or null if no key is specified)
    +
    + + +
    +
    Get the leader epoch for the record if available
    +
    +
    long
    + +
    +
    The position of this record in the corresponding Kafka partition.
    +
    +
    int
    + +
    +
    The partition from which this record is received
    +
    +
    int
    + +
    +
    The size of the serialized, uncompressed key in bytes.
    +
    +
    int
    + +
    +
    The size of the serialized, uncompressed value in bytes.
    +
    +
    long
    + +
    +
    The timestamp of this record, in milliseconds elapsed since unix epoch.
    +
    +
    org.apache.kafka.common.record.TimestampType
    + +
    +
    The timestamp type of this record
    +
    + + +
    +
    The topic this record is received from (never null)
    +
    + + +
     
    + + +
    +
    The value
    +
    +
    +
    +
    +
    +

    Methods inherited from class java.lang.Object

    +equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
    +
    +
  • +
+
+
+
    + +
  • +
    +

    Field Details

    +
      +
    • +
      +

      NO_TIMESTAMP

      +
      public static final long NO_TIMESTAMP
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      NULL_SIZE

      +
      public static final int NULL_SIZE
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ConsumerRecord

      +
      public ConsumerRecord(String topic, + int partition, + long offset, + K key, + V value)
      +
      Creates a record to be received from a specified topic and partition (provided for + compatibility with Kafka 0.9 before the message format supported timestamps and before + serialized metadata were exposed).
      +
      +
      Parameters:
      +
      topic - The topic this record is received from
      +
      partition - The partition of the topic this record is received from
      +
      offset - The offset of this record in the corresponding Kafka partition
      +
      key - The key of the record, if one exists (null is allowed)
      +
      value - The record contents
      +
      +
      +
    • +
    • +
      +

      ConsumerRecord

      +
      public ConsumerRecord(String topic, + int partition, + long offset, + long timestamp, + org.apache.kafka.common.record.TimestampType timestampType, + int serializedKeySize, + int serializedValueSize, + K key, + V value, + Headers headers, + Optional<Integer> leaderEpoch)
      +
      Creates a record to be received from a specified topic and partition.
      +
      +
      Parameters:
      +
      topic - The topic this record is received from
      +
      partition - The partition of the topic this record is received from
      +
      offset - The offset of this record in the corresponding Kafka partition
      +
      timestamp - The timestamp of the record.
      +
      timestampType - The timestamp type
      +
      serializedKeySize - The length of the serialized key
      +
      serializedValueSize - The length of the serialized value
      +
      key - The key of the record, if one exists (null is allowed)
      +
      value - The record contents
      +
      headers - The headers of the record
      +
      leaderEpoch - Optional leader epoch of the record (may be empty for legacy record formats)
      +
      +
      +
    • +
    • +
      +

      ConsumerRecord

      +
      public ConsumerRecord(String topic, + int partition, + long offset, + long timestamp, + org.apache.kafka.common.record.TimestampType timestampType, + int serializedKeySize, + int serializedValueSize, + K key, + V value, + Headers headers, + Optional<Integer> leaderEpoch, + Optional<Short> deliveryCount)
      +
      Creates a record to be received from a specified topic and partition.
      +
      +
      Parameters:
      +
      topic - The topic this record is received from
      +
      partition - The partition of the topic this record is received from
      +
      offset - The offset of this record in the corresponding Kafka partition
      +
      timestamp - The timestamp of the record.
      +
      timestampType - The timestamp type
      +
      serializedKeySize - The length of the serialized key
      +
      serializedValueSize - The length of the serialized value
      +
      key - The key of the record, if one exists (null is allowed)
      +
      value - The record contents
      +
      headers - The headers of the record
      +
      leaderEpoch - Optional leader epoch of the record (may be empty for legacy record formats)
      +
      deliveryCount - Optional delivery count of the record (may be empty when deliveries not counted)
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      topic

      +
      public String topic()
      +
      The topic this record is received from (never null)
      +
      +
    • +
    • +
      +

      partition

      +
      public int partition()
      +
      The partition from which this record is received
      +
      +
    • +
    • +
      +

      headers

      +
      public Headers headers()
      +
      The headers (never null)
      +
      +
    • +
    • +
      +

      key

      +
      public K key()
      +
      The key (or null if no key is specified)
      +
      +
    • +
    • +
      +

      value

      +
      public V value()
      +
      The value
      +
      +
    • +
    • +
      +

      offset

      +
      public long offset()
      +
      The position of this record in the corresponding Kafka partition.
      +
      +
    • +
    • +
      +

      timestamp

      +
      public long timestamp()
      +
      The timestamp of this record, in milliseconds elapsed since unix epoch.
      +
      +
    • +
    • +
      +

      timestampType

      +
      public org.apache.kafka.common.record.TimestampType timestampType()
      +
      The timestamp type of this record
      +
      +
    • +
    • +
      +

      serializedKeySize

      +
      public int serializedKeySize()
      +
      The size of the serialized, uncompressed key in bytes. If key is null, the returned size + is -1.
      +
      +
    • +
    • +
      +

      serializedValueSize

      +
      public int serializedValueSize()
      +
      The size of the serialized, uncompressed value in bytes. If value is null, the + returned size is -1.
      +
      +
    • +
    • +
      +

      leaderEpoch

      +
      public Optional<Integer> leaderEpoch()
      +
      Get the leader epoch for the record if available
      +
      +
      Returns:
      +
      the leader epoch or empty for legacy record formats
      +
      +
      +
    • +
    • +
      +

      deliveryCount

      +
      public Optional<Short> deliveryCount()
      +
      Get the delivery count for the record if available. Deliveries + are counted for records delivered by share groups.
      +
      +
      Returns:
      +
      the delivery count or empty when deliveries not counted
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerRecords.html b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerRecords.html new file mode 100644 index 000000000..cddf8d418 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/ConsumerRecords.html @@ -0,0 +1,312 @@ + + + + +ConsumerRecords (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ConsumerRecords<K,V>

+
+
java.lang.Object +
org.apache.kafka.clients.consumer.ConsumerRecords<K,V>
+
+
+
+
All Implemented Interfaces:
+
Iterable<ConsumerRecord<K,V>>
+
+
+
public class ConsumerRecords<K,V> +extends Object +implements Iterable<ConsumerRecord<K,V>>
+
A container that holds the list ConsumerRecord per partition for a + particular topic. There is one ConsumerRecord list for every topic + partition returned by a Consumer.poll(java.time.Duration) operation.
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/CooperativeStickyAssignor.html b/static/41/javadoc/org/apache/kafka/clients/consumer/CooperativeStickyAssignor.html new file mode 100644 index 000000000..20d664582 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/CooperativeStickyAssignor.html @@ -0,0 +1,324 @@ + + + + +CooperativeStickyAssignor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class CooperativeStickyAssignor

+
+
java.lang.Object +
org.apache.kafka.clients.consumer.internals.AbstractPartitionAssignor +
org.apache.kafka.clients.consumer.internals.AbstractStickyAssignor +
org.apache.kafka.clients.consumer.CooperativeStickyAssignor
+
+
+
+
+
+
All Implemented Interfaces:
+
ConsumerPartitionAssignor
+
+
+
public class CooperativeStickyAssignor +extends org.apache.kafka.clients.consumer.internals.AbstractStickyAssignor
+
A cooperative version of the AbstractStickyAssignor. This follows the same (sticky) + assignment logic as StickyAssignor but allows for cooperative rebalancing while the + StickyAssignor follows the eager rebalancing protocol. See + ConsumerPartitionAssignor.RebalanceProtocol for an explanation of the rebalancing protocols. +

+ Users should prefer this assignor for newer clusters. +

+ To turn on cooperative rebalancing you must set all your consumers to use this PartitionAssignor, + or implement a custom one that returns RebalanceProtocol.COOPERATIVE in + supportedProtocols(). +

+ IMPORTANT: if upgrading from 2.3 or earlier, you must follow a specific upgrade path in order to safely turn on + cooperative rebalancing. See the upgrade guide for details.

+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/GroupProtocol.html b/static/41/javadoc/org/apache/kafka/clients/consumer/GroupProtocol.html new file mode 100644 index 000000000..222230c1a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/GroupProtocol.html @@ -0,0 +1,267 @@ + + + + +GroupProtocol (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Enum Class GroupProtocol

+
+
java.lang.Object +
java.lang.Enum<GroupProtocol> +
org.apache.kafka.clients.consumer.GroupProtocol
+
+
+
+
+
All Implemented Interfaces:
+
Serializable, Comparable<GroupProtocol>, Constable
+
+
+
public enum GroupProtocol +extends Enum<GroupProtocol>
+
+
+ +
+
+
    + +
  • +
    +

    Enum Constant Details

    +
      +
    • +
      +

      CLASSIC

      +
      public static final GroupProtocol CLASSIC
      +
      Classic group protocol.
      +
      +
    • +
    • +
      +

      CONSUMER

      +
      public static final GroupProtocol CONSUMER
      +
      Consumer group protocol
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Field Details

    +
      +
    • +
      +

      name

      +
      public final String name
      +
      String representation of the group protocol.
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      values

      +
      public static GroupProtocol[] values()
      +
      Returns an array containing the constants of this enum class, in +the order they are declared.
      +
      +
      Returns:
      +
      an array containing the constants of this enum class, in the order they are declared
      +
      +
      +
    • +
    • +
      +

      valueOf

      +
      public static GroupProtocol valueOf(String name)
      +
      Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
      +
      +
      Parameters:
      +
      name - the name of the enum constant to be returned.
      +
      Returns:
      +
      the enum constant with the specified name
      +
      Throws:
      +
      IllegalArgumentException - if this enum class has no constant with the specified name
      +
      NullPointerException - if the argument is null
      +
      +
      +
    • +
    • +
      +

      of

      +
      public static GroupProtocol of(String name)
      +
      Case-insensitive group protocol lookup by string name.
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/InvalidOffsetException.html b/static/41/javadoc/org/apache/kafka/clients/consumer/InvalidOffsetException.html new file mode 100644 index 000000000..d77abda90 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/InvalidOffsetException.html @@ -0,0 +1,189 @@ + + + + +InvalidOffsetException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class InvalidOffsetException

+
+ +
+
+
All Implemented Interfaces:
+
Serializable
+
+
+
Direct Known Subclasses:
+
NoOffsetForPartitionException, OffsetOutOfRangeException
+
+
+
public abstract class InvalidOffsetException +extends KafkaException
+
Thrown when the offset for a set of partitions is invalid (either undefined or out of range), + and no reset policy has been configured.
+
+
See Also:
+
+ +
+
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      InvalidOffsetException

      +
      public InvalidOffsetException(String message)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    + +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/KafkaConsumer.html b/static/41/javadoc/org/apache/kafka/clients/consumer/KafkaConsumer.html new file mode 100644 index 000000000..3b5e4588d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/KafkaConsumer.html @@ -0,0 +1,2719 @@ + + + + +KafkaConsumer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class KafkaConsumer<K,V>

+
+
java.lang.Object +
org.apache.kafka.clients.consumer.KafkaConsumer<K,V>
+
+
+
+
All Implemented Interfaces:
+
Closeable, AutoCloseable, Consumer<K,V>
+
+
+
public class KafkaConsumer<K,V> +extends Object +implements Consumer<K,V>
+
A client that consumes records from a Kafka cluster. +

+ This client transparently handles the failure of Kafka brokers, and transparently adapts as topic partitions + it fetches migrate within the cluster. This client also interacts with the broker to allow groups of + consumers to load balance consumption using consumer groups. +

+ The consumer maintains TCP connections to the necessary brokers to fetch data. + Failure to close the consumer after use will leak these connections. + The consumer is not thread-safe. See Multi-threaded Processing for more details. + +

Cross-Version Compatibility

+ This client can communicate with brokers that are version 0.10.0 or newer. Older or newer brokers may not support + certain features. For example, 0.10.0 brokers do not support offsetsForTimes, because this feature was added + in version 0.10.1. You will receive an UnsupportedVersionException + when invoking an API that is not available on the running broker version. +

+ +

Offsets and Consumer Position

+ Kafka maintains a numerical offset for each record in a partition. This offset acts as a unique identifier of + a record within that partition, and also denotes the position of the consumer in the partition. For example, a consumer + which is at position 5 has consumed records with offsets 0 through 4 and will next receive the record with offset 5. + Note that offsets are not guaranteed to be consecutive (such as compacted topic or when records have been produced + using transactions). For example, if the consumer did read a record with offset 4, but 5 is not an offset + with a record, its position might advance to 6 (or higher) directly. Similarly, if the consumer's position is 5, + but there is no record with offset 5, the consumer will return the record with the next higher offset. + There are actually two notions of position relevant to the user of the consumer: +

+ The position of the consumer gives the offset of the next record that will be given + out. It will be one larger than the highest offset the consumer has seen in that partition. It automatically advances + every time the consumer receives messages in a call to poll(Duration). +

+ The committed position is the last offset that has been stored securely. Should the + process fail and restart, this is the offset that the consumer will recover to. The consumer can either automatically commit + offsets periodically; or it can choose to control this committed position manually by calling one of the commit APIs + (e.g. commitSync and commitAsync). +

+ This distinction gives the consumer control over when a record is considered consumed. It is discussed in further + detail below. + +

Consumer Groups and Topic Subscriptions

+ + Kafka uses the concept of consumer groups to allow a pool of processes to divide the work of consuming and + processing records. These processes can either be running on the same machine or they can be + distributed over many machines to provide scalability and fault tolerance for processing. All consumer instances + sharing the same group.id will be part of the same consumer group. +

+ Each consumer in a group can dynamically set the list of topics it wants to subscribe to through one of the + subscribe APIs. Kafka will deliver each message in the + subscribed topics to one process in each consumer group. This is achieved by balancing the partitions between all + members in the consumer group so that each partition is assigned to exactly one consumer in the group. So if there + is a topic with four partitions, and a consumer group with two processes, each process would consume from two partitions. +

+ Membership in a consumer group is maintained dynamically: if a process fails, the partitions assigned to it will + be reassigned to other consumers in the same group. Similarly, if a new consumer joins the group, partitions will be moved + from existing consumers to the new one. This is known as rebalancing the group and is discussed in more + detail below. Group rebalancing is also used when new partitions are added + to one of the subscribed topics or when a new topic matching a subscribed regex + is created. The group will automatically detect the new partitions through periodic metadata refreshes and + assign them to members of the group. +

+ Conceptually you can think of a consumer group as being a single logical subscriber that happens to be made up of + multiple processes. As a multi-subscriber system, Kafka naturally supports having any number of consumer groups for a + given topic without duplicating data (additional consumers are actually quite cheap). +

+ This is a slight generalization of the functionality that is common in messaging systems. To get semantics similar to + a queue in a traditional messaging system all processes would be part of a single consumer group and hence record + delivery would be balanced over the group like with a queue. Unlike a traditional messaging system, though, you can + have multiple such groups. To get semantics similar to pub-sub in a traditional messaging system each process would + have its own consumer group, so each process would subscribe to all the records published to the topic. +

+ In addition, when group reassignment happens automatically, consumers can be notified through a ConsumerRebalanceListener, + which allows them to finish necessary application-level logic such as state cleanup, manual offset + commits, etc. See Storing Offsets Outside Kafka for more details. +

+ It is also possible for the consumer to manually assign specific partitions + (similar to the older "simple" consumer) using assign(Collection). In this case, dynamic partition + assignment and consumer group coordination will be disabled. + +

Detecting Consumer Failures

+ + After subscribing to a set of topics, the consumer will automatically join the group when poll(Duration) is + invoked. The poll API is designed to ensure consumer liveness. As long as you continue to call poll, the consumer + will stay in the group and continue to receive messages from the partitions it was assigned. Underneath the covers, + the consumer sends periodic heartbeats to the server. If the consumer crashes or is unable to send heartbeats for + a duration of session.timeout.ms, then the consumer will be considered dead and its partitions will + be reassigned. +

+ It is also possible that the consumer could encounter a "livelock" situation where it is continuing + to send heartbeats, but no progress is being made. To prevent the consumer from holding onto its partitions + indefinitely in this case, we provide a liveness detection mechanism using the max.poll.interval.ms + setting. Basically if you don't call poll at least as frequently as the configured max interval, + then the client will proactively leave the group so that another consumer can take over its partitions. When this happens, + you may see an offset commit failure (as indicated by a CommitFailedException thrown from a call to commitSync()). + This is a safety mechanism which guarantees that only active members of the group are able to commit offsets. + So to stay in the group, you must continue to call poll. +

+ The consumer provides two configuration settings to control the behavior of the poll loop: +

    +
  1. max.poll.interval.ms: By increasing the interval between expected polls, you can give + the consumer more time to handle a batch of records returned from poll(Duration). The drawback + is that increasing this value may delay a group rebalance since the consumer will only join the rebalance + inside the call to poll. You can use this setting to bound the time to finish a rebalance, but + you risk slower progress if the consumer cannot actually call poll often enough.
  2. +
  3. max.poll.records: Use this setting to limit the total records returned from a single + call to poll. This can make it easier to predict the maximum that must be handled within each poll + interval. By tuning this value, you may be able to reduce the poll interval, which will reduce the + impact of group rebalancing.
  4. +
+

+ For use cases where message processing time varies unpredictably, neither of these options may be sufficient. + The recommended way to handle these cases is to move message processing to another thread, which allows + the consumer to continue calling poll while the processor is still working. + Some care must be taken to ensure that committed offsets do not get ahead of the actual position. + Typically, you must disable automatic commits and manually commit processed offsets for records only after the + thread has finished handling them (depending on the delivery semantics you need). + Note also that you will need to pause the partition so that no new records are received + from poll until after thread has finished handling those previously returned. + +

Usage Examples

+ The consumer APIs offer flexibility to cover a variety of consumption use cases. Here are some examples to + demonstrate how to use them. + +

Automatic Offset Committing

+ This example demonstrates a simple usage of Kafka's consumer api that relies on automatic offset committing. +

+

+     Properties props = new Properties();
+     props.setProperty("bootstrap.servers", "localhost:9092");
+     props.setProperty("group.id", "test");
+     props.setProperty("enable.auto.commit", "true");
+     props.setProperty("auto.commit.interval.ms", "1000");
+     props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
+     props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
+     KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
+     consumer.subscribe(Arrays.asList("foo", "bar"));
+     while (true) {
+         ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));
+         for (ConsumerRecord<String, String> record : records)
+             System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
+     }
+ 
+ + The connection to the cluster is bootstrapped by specifying a list of one or more brokers to contact using the + configuration bootstrap.servers. This list is just used to discover the rest of the brokers in the + cluster and need not be an exhaustive list of servers in the cluster (though you may want to specify more than one in + case there are servers down when the client is connecting). +

+ Setting enable.auto.commit means that offsets are committed automatically with a frequency controlled by + the config auto.commit.interval.ms. +

+ In this example the consumer is subscribing to the topics foo and bar as part of a group of consumers + called test as configured with group.id. +

+ The deserializer settings specify how to turn bytes into objects. For example, by specifying string deserializers, we + are saying that our record's key and value will just be simple strings. + +

Manual Offset Control

+ + Instead of relying on the consumer to periodically commit consumed offsets, users can also control when records + should be considered as consumed and hence commit their offsets. This is useful when the consumption of the messages + is coupled with some processing logic and hence a message should not be considered as consumed until it is completed processing. + +

+

+     Properties props = new Properties();
+     props.setProperty("bootstrap.servers", "localhost:9092");
+     props.setProperty("group.id", "test");
+     props.setProperty("enable.auto.commit", "false");
+     props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
+     props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
+     KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
+     consumer.subscribe(Arrays.asList("foo", "bar"));
+     final int minBatchSize = 200;
+     List<ConsumerRecord<String, String>> buffer = new ArrayList<>();
+     while (true) {
+         ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));
+         for (ConsumerRecord<String, String> record : records) {
+             buffer.add(record);
+         }
+         if (buffer.size() >= minBatchSize) {
+             insertIntoDb(buffer);
+             consumer.commitSync();
+             buffer.clear();
+         }
+     }
+ 
+ + In this example we will consume a batch of records and batch them up in memory. When we have enough records + batched, we will insert them into a database. If we allowed offsets to auto commit as in the previous example, records + would be considered consumed after they were returned to the user in poll. It would then be + possible + for our process to fail after batching the records, but before they had been inserted into the database. +

+ To avoid this, we will manually commit the offsets only after the corresponding records have been inserted into the + database. This gives us exact control of when a record is considered consumed. This raises the opposite possibility: + the process could fail in the interval after the insert into the database but before the commit (even though this + would likely just be a few milliseconds, it is a possibility). In this case the process that took over consumption + would consume from last committed offset and would repeat the insert of the last batch of data. Used in this way + Kafka provides what is often called "at-least-once" delivery guarantees, as each record will likely be delivered one + time but in failure cases could be duplicated. +

+ Note: Using automatic offset commits can also give you "at-least-once" delivery, but the requirement is that + you must consume all data returned from each call to poll(Duration) before any subsequent calls, or before + closing the consumer. If you fail to do either of these, it is possible for the committed offset + to get ahead of the consumed position, which results in missing records. The advantage of using manual offset + control is that you have direct control over when a record is considered "consumed." +

+ The above example uses commitSync to mark all received records as committed. In some cases + you may wish to have even finer control over which records have been committed by specifying an offset explicitly. + In the example below we commit offset after we finish handling the records in each partition. +

+

+     try {
+         while(running) {
+             ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(Long.MAX_VALUE));
+             for (TopicPartition partition : records.partitions()) {
+                 List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
+                 for (ConsumerRecord<String, String> record : partitionRecords) {
+                     System.out.println(record.offset() + ": " + record.value());
+                 }
+                 consumer.commitSync(Collections.singletonMap(partition, records.nextOffsets().get(partition)));
+             }
+         }
+     } finally {
+       consumer.close();
+     }
+ 
+ + Note: The committed offset should always be the offset of the next message that your application will read. + Thus, when calling commitSync(offsets) you should use nextRecordToBeProcessed.offset() + or if ConsumerRecords is exhausted already ConsumerRecords.nextOffsets() instead. + You should also add the leader epoch as commit metadata, which can be obtained from + ConsumerRecord.leaderEpoch() or ConsumerRecords.nextOffsets(). + +

Manual Partition Assignment

+ + In the previous examples, we subscribed to the topics we were interested in and let Kafka dynamically assign a + fair share of the partitions for those topics based on the active consumers in the group. However, in + some cases you may need finer control over the specific partitions that are assigned. For example: +

+

    +
  • If the process is maintaining some kind of local state associated with that partition (like a + local on-disk key-value store), then it should only get records for the partition it is maintaining on disk. +
  • If the process itself is highly available and will be restarted if it fails (perhaps using a + cluster management framework like YARN, Mesos, or AWS facilities, or as part of a stream processing framework). In + this case there is no need for Kafka to detect the failure and reassign the partition since the consuming process + will be restarted on another machine. +
+

+ To use this mode, instead of subscribing to the topic using subscribe, you just call + assign(Collection) with the full list of partitions that you want to consume. + +

+     String topic = "foo";
+     TopicPartition partition0 = new TopicPartition(topic, 0);
+     TopicPartition partition1 = new TopicPartition(topic, 1);
+     consumer.assign(Arrays.asList(partition0, partition1));
+ 
+ + Once assigned, you can call poll in a loop, just as in the preceding examples to consume + records. The group that the consumer specifies is still used for committing offsets, but now the set of partitions + will only change with another call to assign. Manual partition assignment does + not use group coordination, so consumer failures will not cause assigned partitions to be rebalanced. Each consumer + acts independently even if it shares a groupId with another consumer. To avoid offset commit conflicts, you should + usually ensure that the groupId is unique for each consumer instance. +

+ Note that it isn't possible to mix manual partition assignment (i.e. using assign) + with dynamic partition assignment through topic subscription (i.e. using subscribe). + +

Storing Offsets Outside Kafka

+ + The consumer application need not use Kafka's built-in offset storage, it can store offsets in a store of its own + choosing. The primary use case for this is allowing the application to store both the offset and the results of the + consumption in the same system in a way that both the results and offsets are stored atomically. This is not always + possible, but when it is it will make the consumption fully atomic and give "exactly once" semantics that are + stronger than the default "at-least once" semantics you get with Kafka's offset commit functionality. +

+ Here are a couple of examples of this type of usage: +

    +
  • If the results of the consumption are being stored in a relational database, storing the offset in the database + as well can allow committing both the results and offset in a single transaction. Thus either the transaction will + succeed and the offset will be updated based on what was consumed or the result will not be stored and the offset + won't be updated. +
  • If the results are being stored in a local store it may be possible to store the offset there as well. For + example a search index could be built by subscribing to a particular partition and storing both the offset and the + indexed data together. If this is done in a way that is atomic, it is often possible to have it be the case that even + if a crash occurs that causes unsync'd data to be lost, whatever is left has the corresponding offset stored as well. + This means that in this case the indexing process that comes back having lost recent updates just resumes indexing + from what it has ensuring that no updates are lost. +
+

+ Each record comes with its own offset, so to manage your own offset you just need to do the following: + +

+ +

+ This type of usage is simplest when the partition assignment is also done manually (this would be likely in the + search index use case described above). If the partition assignment is done automatically special care is + needed to handle the case where partition assignments change. This can be done by providing a + ConsumerRebalanceListener instance in the call to subscribe(Collection, ConsumerRebalanceListener) + and subscribe(Pattern, ConsumerRebalanceListener). + For example, when partitions are taken from a consumer the consumer will want to commit its offset for those partitions by + implementing ConsumerRebalanceListener.onPartitionsRevoked(Collection). When partitions are assigned to a + consumer, the consumer will want to look up the offset for those new partitions and correctly initialize the consumer + to that position by implementing ConsumerRebalanceListener.onPartitionsAssigned(Collection). +

+ Another common use for ConsumerRebalanceListener is to flush any caches the application maintains for + partitions that are moved elsewhere. + +

Controlling The Consumer's Position

+ + In most use cases the consumer will simply consume records from beginning to end, periodically committing its + position (either automatically or manually). However Kafka allows the consumer to manually control its position, + moving forward or backwards in a partition at will. This means a consumer can re-consume older records, or skip to + the most recent records without actually consuming the intermediate records. +

+ There are several instances where manually controlling the consumer's position can be useful. +

+ One case is for time-sensitive record processing it may make sense for a consumer that falls far enough behind to not + attempt to catch up processing all records, but rather just skip to the most recent records. +

+ Another use case is for a system that maintains local state as described in the previous section. In such a system + the consumer will want to initialize its position on start-up to whatever is contained in the local store. Likewise + if the local state is destroyed (say because the disk is lost) the state may be recreated on a new machine by + re-consuming all the data and recreating the state (assuming that Kafka is retaining sufficient history). +

+ Kafka allows specifying the position using seek(TopicPartition, long) to specify the new position. Special + methods for seeking to the earliest and latest offset the server maintains are also available ( + seekToBeginning(Collection) and seekToEnd(Collection) respectively). + +

Consumption Flow Control

+ + If a consumer is assigned multiple partitions to fetch data from, it will try to consume from all of them at the same time, + effectively giving these partitions the same priority for consumption. However in some cases consumers may want to + first focus on fetching from some subset of the assigned partitions at full speed, and only start fetching other partitions + when these partitions have few or no data to consume. + +

+ One of such cases is stream processing, where processor fetches from two topics and performs the join on these two streams. + When one of the topics is long lagging behind the other, the processor would like to pause fetching from the ahead topic + in order to get the lagging stream to catch up. Another example is bootstrapping upon consumer starting up where there are + a lot of history data to catch up, the applications usually want to get the latest data on some of the topics before consider + fetching other topics. + +

+ Kafka supports dynamic controlling of consumption flows by using pause(Collection) and resume(Collection) + to pause the consumption on the specified assigned partitions and resume the consumption + on the specified paused partitions respectively in the future poll(Duration) calls. + +

Reading Transactional Messages

+ +

+ Transactions were introduced in Kafka 0.11.0 wherein applications can write to multiple topics and partitions atomically. + In order for this to work, consumers reading from these partitions should be configured to only read committed data. + This can be achieved by setting the isolation.level=read_committed in the consumer's configuration. + +

+ In read_committed mode, the consumer will read only those transactional messages which have been + successfully committed. It will continue to read non-transactional messages as before. There is no client-side + buffering in read_committed mode. Instead, the end offset of a partition for a read_committed + consumer would be the offset of the first message in the partition belonging to an open transaction. This offset + is known as the 'Last Stable Offset'(LSO).

+ +

+ A read_committed consumer will only read up to the LSO and filter out any transactional + messages which have been aborted. The LSO also affects the behavior of seekToEnd(Collection) and + endOffsets(Collection) for read_committed consumers, details of which are in each method's documentation. + Finally, the fetch lag metrics are also adjusted to be relative to the LSO for read_committed consumers. + +

+ Partitions with transactional messages will include commit or abort markers which indicate the result of a transaction. + There markers are not returned to applications, yet have an offset in the log. As a result, applications reading from + topics with transactional messages will see gaps in the consumed offsets. These missing messages would be the transaction + markers, and they are filtered out for consumers in both isolation levels. Additionally, applications using + read_committed consumers may also see gaps due to aborted transactions, since those messages would not + be returned by the consumer and yet would have valid offsets. + +

Multi-threaded Processing

+ + The Kafka consumer is NOT thread-safe. It is the responsibility of the user to ensure that multi-threaded access + is properly synchronized. Un-synchronized access will result in ConcurrentModificationException. + +

+ The only exception to this rule is wakeup(), which can safely be used from an external thread to + interrupt an active operation. In this case, a WakeupException will be + thrown from the thread blocking on the operation. This can be used to shutdown the consumer from another thread. + The following snippet shows the typical pattern: + +

+ public class KafkaConsumerRunner implements Runnable {
+     private final AtomicBoolean closed = new AtomicBoolean(false);
+     private final KafkaConsumer consumer;
+
+     public KafkaConsumerRunner(KafkaConsumer consumer) {
+       this.consumer = consumer;
+     }
+
+     @Override
+     public void run() {
+         try {
+             consumer.subscribe(Arrays.asList("topic"));
+             while (!closed.get()) {
+                 ConsumerRecords records = consumer.poll(Duration.ofMillis(10000));
+                 // Handle new records
+             }
+         } catch (WakeupException e) {
+             // Ignore exception if closing
+             if (!closed.get()) throw e;
+         } finally {
+             consumer.close();
+         }
+     }
+
+     // Shutdown hook which can be called from a separate thread
+     public void shutdown() {
+         closed.set(true);
+         consumer.wakeup();
+     }
+ }
+ 
+ + Then in a separate thread, the consumer can be shutdown by setting the closed flag and waking up the consumer. + +

+

+     closed.set(true);
+     consumer.wakeup();
+ 
+ +

+ Note that while it is possible to use thread interrupts instead of wakeup() to abort a blocking operation + (in which case, InterruptException will be raised), we discourage their use since they may cause a clean + shutdown of the consumer to be aborted. Interrupts are mainly supported for those cases where using wakeup() + is impossible, e.g. when a consumer thread is managed by code that is unaware of the Kafka client. + +

+ We have intentionally avoided implementing a particular threading model for processing. This leaves several + options for implementing multi-threaded processing of records. + +

1. One Consumer Per Thread

+ + A simple option is to give each thread its own consumer instance. Here are the pros and cons of this approach: +
    +
  • PRO: It is the easiest to implement +
  • PRO: It is often the fastest as no inter-thread co-ordination is needed +
  • PRO: It makes in-order processing on a per-partition basis very easy to implement (each thread just + processes messages in the order it receives them). +
  • CON: More consumers means more TCP connections to the cluster (one per thread). In general Kafka handles + connections very efficiently so this is generally a small cost. +
  • CON: Multiple consumers means more requests being sent to the server and slightly less batching of data + which can cause some drop in I/O throughput. +
  • CON: The number of total threads across all processes will be limited by the total number of partitions. +
+ +

2. Decouple Consumption and Processing

+ + Another alternative is to have one or more consumer threads that do all data consumption and hands off + ConsumerRecords instances to a blocking queue consumed by a pool of processor threads that actually handle + the record processing. + + This option likewise has pros and cons: +
    +
  • PRO: This option allows independently scaling the number of consumers and processors. This makes it + possible to have a single consumer that feeds many processor threads, avoiding any limitation on partitions. +
  • CON: Guaranteeing order across the processors requires particular care as the threads will execute + independently an earlier chunk of data may actually be processed after a later chunk of data just due to the luck of + thread execution timing. For processing that has no ordering requirements this is not a problem. +
  • CON: Manually committing the position becomes harder as it requires that all threads co-ordinate to ensure + that processing is complete for that partition. +
+ + There are many possible variations on this approach. For example each processor thread can have its own queue, and + the consumer threads can hash into these queues using the TopicPartition to ensure in-order consumption and simplify + commit.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      KafkaConsumer

      +
      public KafkaConsumer(Map<String,Object> configs)
      +
      A consumer is instantiated by providing a set of key-value pairs as configuration. Valid configuration strings + are documented here. Values can be + either strings or objects of the appropriate type (for example a numeric configuration would accept either the + string "42" or the integer 42). +

      + Valid configuration strings are documented at ConsumerConfig. +

      + Note: after creating a KafkaConsumer you must always close() it to avoid resource leaks.

      +
      +
      Parameters:
      +
      configs - The consumer configs
      +
      +
      +
    • +
    • +
      +

      KafkaConsumer

      +
      public KafkaConsumer(Properties properties)
      +
      A consumer is instantiated by providing a Properties object as configuration. +

      + Valid configuration strings are documented at ConsumerConfig. +

      + Note: after creating a KafkaConsumer you must always close() it to avoid resource leaks.

      +
      +
      Parameters:
      +
      properties - The consumer configuration properties
      +
      +
      +
    • +
    • +
      +

      KafkaConsumer

      +
      public KafkaConsumer(Properties properties, + Deserializer<K> keyDeserializer, + Deserializer<V> valueDeserializer)
      +
      A consumer is instantiated by providing a Properties object as configuration, and a + key and a value Deserializer. +

      + Valid configuration strings are documented at ConsumerConfig. +

      + Note: after creating a KafkaConsumer you must always close() it to avoid resource leaks.

      +
      +
      Parameters:
      +
      properties - The consumer configuration properties
      +
      keyDeserializer - The deserializer for key that implements Deserializer. The configure() method + won't be called in the consumer when the deserializer is passed in directly.
      +
      valueDeserializer - The deserializer for value that implements Deserializer. The configure() method + won't be called in the consumer when the deserializer is passed in directly.
      +
      +
      +
    • +
    • +
      +

      KafkaConsumer

      +
      public KafkaConsumer(Map<String,Object> configs, + Deserializer<K> keyDeserializer, + Deserializer<V> valueDeserializer)
      +
      A consumer is instantiated by providing a set of key-value pairs as configuration, and a key and a value Deserializer. +

      + Valid configuration strings are documented at ConsumerConfig. +

      + Note: after creating a KafkaConsumer you must always close() it to avoid resource leaks.

      +
      +
      Parameters:
      +
      configs - The consumer configs
      +
      keyDeserializer - The deserializer for key that implements Deserializer. The configure() method + won't be called in the consumer when the deserializer is passed in directly.
      +
      valueDeserializer - The deserializer for value that implements Deserializer. The configure() method + won't be called in the consumer when the deserializer is passed in directly.
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      assignment

      +
      public Set<TopicPartition> assignment()
      +
      Get the set of partitions currently assigned to this consumer. If subscription happened by directly assigning + partitions using assign(Collection) then this will simply return the same partitions that + were assigned. If topic subscription was used, then this will give the set of topic partitions currently assigned + to the consumer (which may be none if the assignment hasn't happened yet, or the partitions are in the + process of getting reassigned).
      +
      +
      Specified by:
      +
      assignment in interface Consumer<K,V>
      +
      Returns:
      +
      The set of partitions currently assigned to this consumer
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      subscription

      +
      public Set<String> subscription()
      +
      Get the current subscription. Will return the same topics used in the most recent call to + subscribe(Collection, ConsumerRebalanceListener), or an empty set if no such call has been made.
      +
      +
      Specified by:
      +
      subscription in interface Consumer<K,V>
      +
      Returns:
      +
      The set of topics currently subscribed to
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      subscribe

      +
      public void subscribe(Collection<String> topics, + ConsumerRebalanceListener listener)
      +
      Subscribe to the given list of topics to get dynamically + assigned partitions. Topic subscriptions are not incremental. This list will replace the current + assignment (if there is one). Note that it is not possible to combine topic subscription with group management + with manual partition assignment through assign(Collection). + + If the given list of topics is empty, it is treated the same as unsubscribe(). + +

      + As part of group management, the consumer will keep track of the list of consumers that belong to a particular + group and will trigger a rebalance operation if any one of the following events are triggered: +

        +
      • Number of partitions change for any of the subscribed topics +
      • A subscribed topic is created or deleted +
      • An existing member of the consumer group is shutdown or fails +
      • A new member is added to the consumer group +
      +

      + When any of these events are triggered, the provided listener will be invoked first to indicate that + the consumer's assignment has been revoked, and then again when the new assignment has been received. + Note that rebalances will only occur during an active call to poll(Duration), so callbacks will + also only be invoked during that time. + + The provided listener will immediately override any listener set in a previous call to subscribe. + It is guaranteed, however, that the partitions revoked/assigned through this interface are from topics + subscribed in this call. See ConsumerRebalanceListener for more details.

      +
      +
      Specified by:
      +
      subscribe in interface Consumer<K,V>
      +
      Parameters:
      +
      topics - The list of topics to subscribe to
      +
      listener - Non-null listener instance to get notifications on partition assignment/revocation for the + subscribed topics
      +
      Throws:
      +
      IllegalArgumentException - If topics is null or contains null or empty elements, or if listener is null
      +
      IllegalStateException - If subscribe() is called previously with pattern, or assign is called + previously (without a subsequent call to unsubscribe()), or if not + configured at-least one partition assignment strategy
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      subscribe

      +
      public void subscribe(Collection<String> topics)
      +
      Subscribe to the given list of topics to get dynamically assigned partitions. + Topic subscriptions are not incremental. This list will replace the current + assignment (if there is one). It is not possible to combine topic subscription with group management + with manual partition assignment through assign(Collection). + + If the given list of topics is empty, it is treated the same as unsubscribe(). + +

      + This is a short-hand for subscribe(Collection, ConsumerRebalanceListener), which + uses a no-op listener. If you need the ability to seek to particular offsets, you should prefer + subscribe(Collection, ConsumerRebalanceListener), since group rebalances will cause partition offsets + to be reset. You should also provide your own listener if you are doing your own offset + management since the listener gives you an opportunity to commit offsets before a rebalance finishes.

      +
      +
      Specified by:
      +
      subscribe in interface Consumer<K,V>
      +
      Parameters:
      +
      topics - The list of topics to subscribe to
      +
      Throws:
      +
      IllegalArgumentException - If topics is null or contains null or empty elements
      +
      IllegalStateException - If subscribe() is called previously with pattern, or assign is called + previously (without a subsequent call to unsubscribe()), or if not + configured at-least one partition assignment strategy
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      subscribe

      +
      public void subscribe(Pattern pattern, + ConsumerRebalanceListener listener)
      +
      Subscribe to all topics matching specified pattern to get dynamically assigned partitions. + The pattern matching will be done periodically against all topics existing at the time of check. + This can be controlled through the metadata.max.age.ms configuration: by lowering + the max metadata age, the consumer will refresh metadata more often and check for matching topics. +

      + See subscribe(Collection, ConsumerRebalanceListener) for details on the + use of the ConsumerRebalanceListener. Generally rebalances are triggered when there + is a change to the topics matching the provided pattern and when consumer group membership changes. + Group rebalances only take place during an active call to poll(Duration).

      +
      +
      Specified by:
      +
      subscribe in interface Consumer<K,V>
      +
      Parameters:
      +
      pattern - Pattern to subscribe to
      +
      listener - Non-null listener instance to get notifications on partition assignment/revocation for the + subscribed topics
      +
      Throws:
      +
      IllegalArgumentException - If pattern or listener is null
      +
      IllegalStateException - If subscribe() is called previously with topics, or assign is called + previously (without a subsequent call to unsubscribe()), or if not + configured at-least one partition assignment strategy
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      subscribe

      +
      public void subscribe(Pattern pattern)
      +
      Subscribe to all topics matching specified pattern to get dynamically assigned partitions. + The pattern matching will be done periodically against topics existing at the time of check. +

      + This is a short-hand for subscribe(Pattern, ConsumerRebalanceListener), which + uses a no-op listener. If you need the ability to seek to particular offsets, you should prefer + subscribe(Pattern, ConsumerRebalanceListener), since group rebalances will cause partition offsets + to be reset. You should also provide your own listener if you are doing your own offset + management since the listener gives you an opportunity to commit offsets before a rebalance finishes.

      +
      +
      Specified by:
      +
      subscribe in interface Consumer<K,V>
      +
      Parameters:
      +
      pattern - Pattern to subscribe to
      +
      Throws:
      +
      IllegalArgumentException - If pattern is null
      +
      IllegalStateException - If subscribe() is called previously with topics, or assign is called + previously (without a subsequent call to unsubscribe()), or if not + configured at-least one partition assignment strategy
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      subscribe

      +
      public void subscribe(SubscriptionPattern pattern, + ConsumerRebalanceListener listener)
      +
      Subscribe to all topics matching the specified pattern, to get dynamically assigned partitions. + The pattern matching will be done periodically against all topics. This is only supported under the + CONSUMER group protocol (see ConsumerConfig.GROUP_PROTOCOL_CONFIG). +

      + If the provided pattern is not compatible with Google RE2/J, an InvalidRegularExpression will be + eventually thrown on a call to poll(Duration) following this call to subscribe. +

      + See subscribe(Collection, ConsumerRebalanceListener) for details on the + use of the ConsumerRebalanceListener. Generally, rebalances are triggered when there + is a change to the topics matching the provided pattern and when consumer group membership changes. + Group rebalances only take place during an active call to poll(Duration).

      +
      +
      Specified by:
      +
      subscribe in interface Consumer<K,V>
      +
      Parameters:
      +
      pattern - Pattern to subscribe to, that must be compatible with Google RE2/J.
      +
      listener - Non-null listener instance to get notifications on partition assignment/revocation for the + subscribed topics.
      +
      Throws:
      +
      IllegalArgumentException - If pattern is null or empty, or if the listener is null.
      +
      IllegalStateException - If subscribe() is called previously with topics, or assign is called + previously (without a subsequent call to unsubscribe()).
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      subscribe

      +
      public void subscribe(SubscriptionPattern pattern)
      +
      Subscribe to all topics matching the specified pattern, to get dynamically assigned partitions. + The pattern matching will be done periodically against topics. This is only supported under the + CONSUMER group protocol (see ConsumerConfig.GROUP_PROTOCOL_CONFIG) +

      + If the provided pattern is not compatible with Google RE2/J, an InvalidRegularExpression will be + eventually thrown on a call to poll(Duration) following this call to subscribe. +

      + This is a short-hand for subscribe(Pattern, ConsumerRebalanceListener), which + uses a no-op listener. If you need the ability to seek to particular offsets, you should prefer + subscribe(Pattern, ConsumerRebalanceListener), since group rebalances will cause partition offsets + to be reset. You should also provide your own listener if you are doing your own offset + management since the listener gives you an opportunity to commit offsets before a rebalance finishes.

      +
      +
      Specified by:
      +
      subscribe in interface Consumer<K,V>
      +
      Parameters:
      +
      pattern - Pattern to subscribe to, that must be compatible with Google RE2/J.
      +
      Throws:
      +
      IllegalArgumentException - If pattern is null or empty.
      +
      IllegalStateException - If subscribe() is called previously with topics, or assign is called + previously (without a subsequent call to unsubscribe()).
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      unsubscribe

      +
      public void unsubscribe()
      +
      Unsubscribe from topics currently subscribed with subscribe(Collection) or subscribe(Pattern). + This also clears any partitions directly assigned through assign(Collection).
      +
      +
      Specified by:
      +
      unsubscribe in interface Consumer<K,V>
      +
      Throws:
      +
      KafkaException - for any other unrecoverable errors (e.g. rebalance callback errors)
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      assign

      +
      public void assign(Collection<TopicPartition> partitions)
      +
      Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment + and will replace the previous assignment (if there is one). +

      + If the given list of topic partitions is empty, it is treated the same as unsubscribe(). +

      + Manual topic assignment through this method does not use the consumer's group management + functionality. As such, there will be no rebalance operation triggered when group membership or cluster and topic + metadata change. Note that it is not possible to use both manual partition assignment with assign(Collection) + and group assignment with subscribe(Collection, ConsumerRebalanceListener). +

      + If auto-commit is enabled, an async commit (based on the old assignment) will be triggered before the new + assignment replaces the old one.

      +
      +
      Specified by:
      +
      assign in interface Consumer<K,V>
      +
      Parameters:
      +
      partitions - The list of partitions to assign this consumer
      +
      Throws:
      +
      IllegalArgumentException - If partitions is null or contains null or empty topics
      +
      IllegalStateException - If subscribe() is called previously with topics or pattern + (without a subsequent call to unsubscribe())
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      poll

      +
      public ConsumerRecords<K,V> poll(Duration timeout)
      +
      Fetch data for the topics or partitions specified using one of the subscribe/assign APIs. It is an error to not have + subscribed to any topics or partitions before polling for data. +

      + On each poll, consumer will try to use the last consumed offset as the starting offset and fetch sequentially. The last + consumed offset can be manually set through seek(TopicPartition, long) or automatically set as the last committed + offset for the subscribed list of partitions + +

      + This method returns immediately if there are records available or if the position advances past control records + or aborted transactions when isolation.level=read_committed. + Otherwise, it will await the passed timeout. If the timeout expires, an empty record set will be returned. + Note that this method may block beyond the timeout in order to execute custom + ConsumerRebalanceListener callbacks.

      +
      +
      Specified by:
      +
      poll in interface Consumer<K,V>
      +
      Parameters:
      +
      timeout - The maximum time to block (must not be greater than Long.MAX_VALUE milliseconds)
      +
      Returns:
      +
      map of topic to records since the last fetch for the subscribed list of topics and partitions
      +
      Throws:
      +
      InvalidOffsetException - if the offset for a partition or set of + partitions is undefined or out of range and no offset reset policy has been configured
      +
      WakeupException - if wakeup() is called before or while this + function is called
      +
      InterruptException - if the calling thread is interrupted before or while + this function is called
      +
      AuthenticationException - if authentication fails. See the exception for more details
      +
      AuthorizationException - if caller lacks Read access to any of the subscribed + topics or to the configured groupId. See the exception for more details
      +
      KafkaException - for any other unrecoverable errors (e.g. invalid groupId or + session timeout, errors deserializing key/value pairs, your rebalance callback thrown exceptions, + or any new error cases in future versions)
      +
      IllegalArgumentException - if the timeout value is negative
      +
      IllegalStateException - if the consumer is not subscribed to any topics or manually assigned any + partitions to consume from
      +
      ArithmeticException - if the timeout is greater than Long.MAX_VALUE milliseconds.
      +
      InvalidTopicException - if the current subscription contains any invalid + topic (per Topic.validate(String))
      +
      UnsupportedVersionException - if the consumer attempts to fetch stable offsets + when the broker doesn't support this feature. Also, if the consumer attempts to subscribe to a + SubscriptionPattern via subscribe(SubscriptionPattern) or + subscribe(SubscriptionPattern, ConsumerRebalanceListener) and the broker doesn't + support this feature.
      +
      FencedInstanceIdException - if this consumer instance gets fenced by broker.
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      commitSync

      +
      public void commitSync()
      +
      Commit offsets returned on the last poll() for all the subscribed list of topics and + partitions. +

      + This commits offsets only to Kafka. The offsets committed using this API will be used on the first fetch after + every rebalance and also on startup. As such, if you need to store offsets in anything other than Kafka, this API + should not be used. +

      + This is a synchronous commit and will block until either the commit succeeds, an unrecoverable error is + encountered (in which case it is thrown to the caller), or the timeout specified by default.api.timeout.ms expires + (in which case a TimeoutException is thrown to the caller). +

      + Note that asynchronous offset commits sent previously with the commitAsync(OffsetCommitCallback) + (or similar) are guaranteed to have their callbacks invoked prior to completion of this method, + but only when the consumer is using the consumer group protocol.

      +
      +
      Specified by:
      +
      commitSync in interface Consumer<K,V>
      +
      Throws:
      +
      CommitFailedException - if the commit failed and cannot be retried. + This fatal error can only occur if you are using automatic group management with subscribe(Collection), + or if there is an active group with the same group.id which is using group management. In such cases, + when you are trying to commit to partitions that are no longer assigned to this consumer because the + consumer is for example no longer part of the group this exception would be thrown.
      +
      RebalanceInProgressException - if the consumer instance is in the middle of a rebalance + so it is not yet determined which partitions would be assigned to the consumer. In such cases you can first + complete the rebalance by calling poll(Duration) and commit can be reconsidered afterwards. + NOTE when you reconsider committing after the rebalance, the assigned partitions may have changed, + and also for those partitions that are still assigned their fetch positions may have changed too + if more records are returned from the poll(Duration) call.
      +
      WakeupException - if wakeup() is called before or while this + function is called
      +
      InterruptException - if the calling thread is interrupted before or while + this function is called
      +
      AuthenticationException - if authentication fails. See the exception for more details
      +
      AuthorizationException - if not authorized to the topic or to the + configured groupId. See the exception for more details
      +
      KafkaException - for any other unrecoverable errors (e.g. if offset metadata + is too large or if the topic does not exist).
      +
      TimeoutException - if the timeout specified by default.api.timeout.ms expires + before successful completion of the offset commit
      +
      FencedInstanceIdException - if this consumer is using the classic group protocol + and this instance gets fenced by broker.
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      commitSync

      +
      public void commitSync(Duration timeout)
      +
      Commit offsets returned on the last poll() for all the subscribed list of topics and + partitions. +

      + This commits offsets only to Kafka. The offsets committed using this API will be used on the first fetch after + every rebalance and also on startup. As such, if you need to store offsets in anything other than Kafka, this API + should not be used. +

      + This is a synchronous commit and will block until either the commit succeeds, an unrecoverable error is + encountered (in which case it is thrown to the caller), or the passed timeout expires. +

      + Note that asynchronous offset commits sent previously with the commitAsync(OffsetCommitCallback) + (or similar) are guaranteed to have their callbacks invoked prior to completion of this method, + but only when the consumer is using the consumer group protocol.

      +
      +
      Specified by:
      +
      commitSync in interface Consumer<K,V>
      +
      Throws:
      +
      CommitFailedException - if the commit failed and cannot be retried. + This can only occur if you are using automatic group management with subscribe(Collection), + or if there is an active group with the same group.id which is using group management. In such cases, + when you are trying to commit to partitions that are no longer assigned to this consumer because the + consumer is for example no longer part of the group this exception would be thrown.
      +
      RebalanceInProgressException - if the consumer instance is in the middle of a rebalance + so it is not yet determined which partitions would be assigned to the consumer. In such cases you can first + complete the rebalance by calling poll(Duration) and commit can be reconsidered afterwards. + NOTE when you reconsider committing after the rebalance, the assigned partitions may have changed, + and also for those partitions that are still assigned their fetch positions may have changed too + if more records are returned from the poll(Duration) call.
      +
      WakeupException - if wakeup() is called before or while this + function is called
      +
      InterruptException - if the calling thread is interrupted before or while + this function is called
      +
      AuthenticationException - if authentication fails. See the exception for more details
      +
      AuthorizationException - if not authorized to the topic or to the + configured groupId. See the exception for more details
      +
      KafkaException - for any other unrecoverable errors (e.g. if offset metadata + is too large or if the topic does not exist).
      +
      TimeoutException - if the timeout expires before successful completion + of the offset commit
      +
      FencedInstanceIdException - if this consumer is using the classic group protocol + and this instance gets fenced by broker.
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      commitSync

      +
      public void commitSync(Map<TopicPartition,OffsetAndMetadata> offsets)
      +
      Commit the specified offsets for the specified list of topics and partitions. +

      + This commits offsets to Kafka. The offsets committed using this API will be used on the first fetch after every + rebalance and also on startup. As such, if you need to store offsets in anything other than Kafka, this API + should not be used. The committed offset should be the next message your application will consume, + i.e. nextRecordToBeProcessed.offset() (or ConsumerRecords.nextOffsets()). + You should also add the leader epoch as commit metadata, which can be obtained from + ConsumerRecord.leaderEpoch() or ConsumerRecords.nextOffsets(). + If automatic group management with subscribe(Collection) is used, + then the committed offsets must belong to the currently auto-assigned partitions. +

      + This is a synchronous commit and will block until either the commit succeeds or an unrecoverable error is + encountered (in which case it is thrown to the caller), or the timeout specified by default.api.timeout.ms expires + (in which case a TimeoutException is thrown to the caller). +

      + Note that asynchronous offset commits sent previously with the commitAsync(OffsetCommitCallback) + (or similar) are guaranteed to have their callbacks invoked prior to completion of this method, + but only when the consumer is using the consumer group protocol.

      +
      +
      Specified by:
      +
      commitSync in interface Consumer<K,V>
      +
      Parameters:
      +
      offsets - A map of offsets by partition with associated metadata. This map will be copied internally, so it + is safe to mutate the map after returning.
      +
      Throws:
      +
      CommitFailedException - if the commit failed and cannot be retried. + This can only occur if you are using automatic group management with subscribe(Collection), + or if there is an active group with the same group.id which is using group management. In such cases, + when you are trying to commit to partitions that are no longer assigned to this consumer because the + consumer is for example no longer part of the group this exception would be thrown.
      +
      RebalanceInProgressException - if the consumer instance is in the middle of a rebalance + so it is not yet determined which partitions would be assigned to the consumer. In such cases you can first + complete the rebalance by calling poll(Duration) and commit can be reconsidered afterwards. + NOTE when you reconsider committing after the rebalance, the assigned partitions may have changed, + and also for those partitions that are still assigned their fetch positions may have changed too + if more records are returned from the poll(Duration) call, so when you retry committing + you should consider updating the passed in offset parameter.
      +
      WakeupException - if wakeup() is called before or while this + function is called
      +
      InterruptException - if the calling thread is interrupted before or while + this function is called
      +
      AuthenticationException - if authentication fails. See the exception for more details
      +
      AuthorizationException - if not authorized to the topic or to the + configured groupId. See the exception for more details
      +
      IllegalArgumentException - if the committed offset is negative
      +
      KafkaException - for any other unrecoverable errors (e.g. if offset metadata + is too large or if the topic does not exist).
      +
      TimeoutException - if the timeout expires before successful completion + of the offset commit
      +
      FencedInstanceIdException - if this consumer is using the classic group protocol + and this instance gets fenced by broker.
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      commitSync

      +
      public void commitSync(Map<TopicPartition,OffsetAndMetadata> offsets, + Duration timeout)
      +
      Commit the specified offsets for the specified list of topics and partitions. +

      + This commits offsets to Kafka. The offsets committed using this API will be used on the first fetch after every + rebalance and also on startup. As such, if you need to store offsets in anything other than Kafka, this API + should not be used. The committed offset should be the next message your application will consume, + i.e. nextRecordToBeProcessed.offset() (or ConsumerRecords.nextOffsets()). + You should also add the leader epoch as commit metadata, which can be obtained from + ConsumerRecord.leaderEpoch() or ConsumerRecords.nextOffsets(). + If automatic group management with subscribe(Collection) is used, + then the committed offsets must belong to the currently auto-assigned partitions. +

      + This is a synchronous commit and will block until either the commit succeeds, an unrecoverable error is + encountered (in which case it is thrown to the caller), or the timeout expires. +

      + Note that asynchronous offset commits sent previously with the commitAsync(OffsetCommitCallback) + (or similar) are guaranteed to have their callbacks invoked prior to completion of this method, + but only when the consumer is using the consumer group protocol.

      +
      +
      Specified by:
      +
      commitSync in interface Consumer<K,V>
      +
      Parameters:
      +
      offsets - A map of offsets by partition with associated metadata. This map will be copied internally, so it + is safe to mutate the map after returning.
      +
      timeout - The maximum amount of time to await completion of the offset commit
      +
      Throws:
      +
      CommitFailedException - if the commit failed and cannot be retried. + This can only occur if you are using automatic group management with subscribe(Collection), + or if there is an active group with the same group.id which is using group management. In such cases, + when you are trying to commit to partitions that are no longer assigned to this consumer because the + consumer is for example no longer part of the group this exception would be thrown.
      +
      RebalanceInProgressException - if the consumer instance is in the middle of a rebalance + so it is not yet determined which partitions would be assigned to the consumer. In such cases you can first + complete the rebalance by calling poll(Duration) and commit can be reconsidered afterwards. + NOTE when you reconsider committing after the rebalance, the assigned partitions may have changed, + and also for those partitions that are still assigned their fetch positions may have changed too + if more records are returned from the poll(Duration) call, so when you retry committing + you should consider updating the passed in offset parameter.
      +
      WakeupException - if wakeup() is called before or while this + function is called
      +
      InterruptException - if the calling thread is interrupted before or while + this function is called
      +
      AuthenticationException - if authentication fails. See the exception for more details
      +
      AuthorizationException - if not authorized to the topic or to the + configured groupId. See the exception for more details
      +
      IllegalArgumentException - if the committed offset is negative
      +
      KafkaException - for any other unrecoverable errors (e.g. if offset metadata + is too large or if the topic does not exist).
      +
      TimeoutException - if the timeout expires before successful completion + of the offset commit
      +
      FencedInstanceIdException - if this consumer is using the classic group protocol + and this instance gets fenced by broker.
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      commitAsync

      +
      public void commitAsync()
      +
      Commit offsets returned on the last poll(Duration) for all the subscribed list of topics and partition. + Same as commitAsync(null)
      +
      +
      Specified by:
      +
      commitAsync in interface Consumer<K,V>
      +
      Throws:
      +
      FencedInstanceIdException - if this consumer is using the classic group protocol + and this instance gets fenced by broker.
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      commitAsync

      +
      public void commitAsync(OffsetCommitCallback callback)
      +
      Commit offsets returned on the last poll() for the subscribed list of topics and partitions. +

      + This commits offsets only to Kafka. The offsets committed using this API will be used on the first fetch after + every rebalance and also on startup. As such, if you need to store offsets in anything other than Kafka, this API + should not be used. +

      + This is an asynchronous call and will not block. Any errors encountered are either passed to the callback + (if provided) or discarded. +

      + Offsets committed through multiple calls to this API are guaranteed to be sent in the same order as + the invocations. Corresponding commit callbacks are also invoked in the same order. Additionally note that + offsets committed through this API are guaranteed to complete before a subsequent call to commitSync() + (and variants) returns.

      +
      +
      Specified by:
      +
      commitAsync in interface Consumer<K,V>
      +
      Parameters:
      +
      callback - Callback to invoke when the commit completes
      +
      Throws:
      +
      FencedInstanceIdException - if this consumer is using the classic group protocol + and this instance gets fenced by broker.
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      commitAsync

      +
      public void commitAsync(Map<TopicPartition,OffsetAndMetadata> offsets, + OffsetCommitCallback callback)
      +
      Commit the specified offsets for the specified list of topics and partitions to Kafka. +

      + This commits offsets to Kafka. The offsets committed using this API will be used on the first fetch after every + rebalance and also on startup. As such, if you need to store offsets in anything other than Kafka, this API + should not be used. The committed offset should be the next message your application will consume, + i.e. nextRecordToBeProcessed.offset() (or ConsumerRecords.nextOffsets()). + You should also add the leader epoch as commit metadata, which can be obtained from + ConsumerRecord.leaderEpoch() or ConsumerRecords.nextOffsets(). + If automatic group management with subscribe(Collection) is used, + then the committed offsets must belong to the currently auto-assigned partitions. +

      + This is an asynchronous call and will not block. Any errors encountered are either passed to the callback + (if provided) or discarded. +

      + Offsets committed through multiple calls to this API are guaranteed to be sent in the same order as + the invocations. Corresponding commit callbacks are also invoked in the same order. Additionally note that + offsets committed through this API are guaranteed to complete before a subsequent call to commitSync() + (and variants) returns.

      +
      +
      Specified by:
      +
      commitAsync in interface Consumer<K,V>
      +
      Parameters:
      +
      offsets - A map of offsets by partition with associated metadata. This map will be copied internally, so it + is safe to mutate the map after returning.
      +
      callback - Callback to invoke when the commit completes
      +
      Throws:
      +
      FencedInstanceIdException - if this consumer is using the classic group protocol + and this instance gets fenced by broker.
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      seek

      +
      public void seek(TopicPartition partition, + long offset)
      +
      Overrides the fetch offsets that the consumer will use on the next poll(timeout). If this API + is invoked for the same partition more than once, the latest offset will be used on the next poll(). Note that + you may lose data if this API is arbitrarily used in the middle of consumption, to reset the fetch offsets +

      + The next Consumer Record which will be retrieved when poll() is invoked will have the offset specified, given that + a record with that offset exists (i.e. it is a valid offset). +

      + seekToBeginning(Collection) will go to the first offset in the topic. + seek(0) is equivalent to seekToBeginning for a TopicPartition with beginning offset 0, + assuming that there is a record at offset 0 still available. + seekToEnd(Collection) is equivalent to seeking to the last offset of the partition, but behavior depends on + isolation.level, so see seekToEnd(Collection) documentation for more details. +

      + Seeking to the offset smaller than the log start offset or larger than the log end offset + means an invalid offset is reached. + Invalid offset behaviour is controlled by the auto.offset.reset property. + If this is set to "earliest", the next poll will return records from the starting offset. + If it is set to "latest", it will seek to the last offset (similar to seekToEnd()). + If it is set to "none", an OffsetOutOfRangeException will be thrown. +

      + Note that, the seek offset won't change to the in-flight fetch request, it will take effect in next fetch request. + So, the consumer might wait for fetch.max.wait.ms before starting to fetch the records from desired offset.

      +
      +
      Specified by:
      +
      seek in interface Consumer<K,V>
      +
      Parameters:
      +
      partition - the TopicPartition on which the seek will be performed.
      +
      offset - the next offset returned by poll().
      +
      Throws:
      +
      IllegalArgumentException - if the provided offset is negative
      +
      IllegalStateException - if the provided TopicPartition is not assigned to this consumer
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      seek

      +
      public void seek(TopicPartition partition, + OffsetAndMetadata offsetAndMetadata)
      +
      Overrides the fetch offsets that the consumer will use on the next poll(timeout). If this API + is invoked for the same partition more than once, the latest offset will be used on the next poll(). Note that + you may lose data if this API is arbitrarily used in the middle of consumption, to reset the fetch offsets. This + method allows for setting the leaderEpoch along with the desired offset.
      +
      +
      Specified by:
      +
      seek in interface Consumer<K,V>
      +
      Throws:
      +
      IllegalArgumentException - if the provided offset is negative
      +
      IllegalStateException - if the provided TopicPartition is not assigned to this consumer
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      seekToBeginning

      +
      public void seekToBeginning(Collection<TopicPartition> partitions)
      +
      Seek to the first offset for each of the given partitions. This function evaluates lazily, seeking to the + first offset in all partitions only when poll(Duration) or position(TopicPartition) are called. + If no partitions are provided, seek to the first offset for all of the currently assigned partitions.
      +
      +
      Specified by:
      +
      seekToBeginning in interface Consumer<K,V>
      +
      Throws:
      +
      IllegalArgumentException - if partitions is null
      +
      IllegalStateException - if any of the provided partitions are not currently assigned to this consumer
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      seekToEnd

      +
      public void seekToEnd(Collection<TopicPartition> partitions)
      +
      Seek to the last offset for each of the given partitions. This function evaluates lazily, seeking to the + final offset in all partitions only when poll(Duration) or position(TopicPartition) are called. + If no partitions are provided, seek to the final offset for all of the currently assigned partitions. +

      + If isolation.level=read_committed, the end offset will be the Last Stable Offset, i.e., the offset + of the first message with an open transaction.

      +
      +
      Specified by:
      +
      seekToEnd in interface Consumer<K,V>
      +
      Throws:
      +
      IllegalArgumentException - if partitions is null
      +
      IllegalStateException - if any of the provided partitions are not currently assigned to this consumer
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      position

      +
      public long position(TopicPartition partition)
      +
      Get the offset of the next record that will be fetched (if a record with that offset exists). + This method may issue a remote call to the server if there is no current position for the given partition. +

      + This call will block until either the position could be determined or an unrecoverable error is + encountered (in which case it is thrown to the caller), or the timeout specified by default.api.timeout.ms expires + (in which case a TimeoutException is thrown to the caller).

      +
      +
      Specified by:
      +
      position in interface Consumer<K,V>
      +
      Parameters:
      +
      partition - The partition to get the position for
      +
      Returns:
      +
      The current position of the consumer (that is, the offset of the next record to be fetched)
      +
      Throws:
      +
      IllegalStateException - if the provided TopicPartition is not assigned to this consumer
      +
      InvalidOffsetException - if no offset is currently defined for + the partition
      +
      WakeupException - if wakeup() is called before or while this + function is called
      +
      InterruptException - if the calling thread is interrupted before or while + this function is called
      +
      AuthenticationException - if authentication fails. See the exception for more details
      +
      AuthorizationException - if not authorized to the topic or to the + configured groupId. See the exception for more details
      +
      UnsupportedVersionException - if the consumer attempts to fetch stable offsets + when the broker doesn't support this feature
      +
      KafkaException - for any other unrecoverable errors
      +
      TimeoutException - if the position cannot be determined before the + timeout specified by default.api.timeout.ms expires
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      position

      +
      public long position(TopicPartition partition, + Duration timeout)
      +
      Get the offset of the next record that will be fetched (if a record with that offset exists). + This method may issue a remote call to the server if there is no current position + for the given partition. +

      + This call will block until the position can be determined, an unrecoverable error is + encountered (in which case it is thrown to the caller), or the timeout expires.

      +
      +
      Specified by:
      +
      position in interface Consumer<K,V>
      +
      Parameters:
      +
      partition - The partition to get the position for
      +
      timeout - The maximum amount of time to await determination of the current position
      +
      Returns:
      +
      The current position of the consumer (that is, the offset of the next record to be fetched)
      +
      Throws:
      +
      IllegalStateException - if the provided TopicPartition is not assigned to this consumer
      +
      InvalidOffsetException - if no offset is currently defined for + the partition
      +
      WakeupException - if wakeup() is called before or while this + function is called
      +
      InterruptException - if the calling thread is interrupted before or while + this function is called
      +
      TimeoutException - if the position cannot be determined before the + passed timeout expires
      +
      AuthenticationException - if authentication fails. See the exception for more details
      +
      AuthorizationException - if not authorized to the topic or to the + configured groupId. See the exception for more details
      +
      KafkaException - for any other unrecoverable errors
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      committed

      +
      public Map<TopicPartition,OffsetAndMetadata> committed(Set<TopicPartition> partitions)
      +
      Get the last committed offsets for the given partitions (whether the commit happened by this process or + another). The returned offsets will be used as the position for the consumer in the event of a failure. +

      + If any of the partitions requested do not exist, an exception would be thrown. +

      + This call will do a remote call to get the latest committed offsets from the server, and will block until the + committed offsets are gotten successfully, an unrecoverable error is encountered (in which case it is thrown to + the caller), or the timeout specified by default.api.timeout.ms expires (in which case a + TimeoutException is thrown to the caller).

      +
      +
      Specified by:
      +
      committed in interface Consumer<K,V>
      +
      Parameters:
      +
      partitions - The partitions to check
      +
      Returns:
      +
      The latest committed offsets for the given partitions; null will be returned for the + partition if there is no such message.
      +
      Throws:
      +
      WakeupException - if wakeup() is called before or while this + function is called
      +
      InterruptException - if the calling thread is interrupted before or while + this function is called
      +
      AuthenticationException - if authentication fails. See the exception for more details
      +
      AuthorizationException - if not authorized to the topic or to the + configured groupId. See the exception for more details
      +
      UnsupportedVersionException - if the consumer attempts to fetch stable offsets + when the broker doesn't support this feature
      +
      KafkaException - for any other unrecoverable errors
      +
      TimeoutException - if the committed offset cannot be found before + the timeout specified by default.api.timeout.ms expires.
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      committed

      +
      public Map<TopicPartition,OffsetAndMetadata> committed(Set<TopicPartition> partitions, + Duration timeout)
      +
      Get the last committed offsets for the given partitions (whether the commit happened by this process or + another). The returned offsets will be used as the position for the consumer in the event of a failure. +

      + If any of the partitions requested do not exist, an exception would be thrown. +

      + This call will block to do a remote call to get the latest committed offsets from the server.

      +
      +
      Specified by:
      +
      committed in interface Consumer<K,V>
      +
      Parameters:
      +
      partitions - The partitions to check
      +
      timeout - The maximum amount of time to await the latest committed offsets
      +
      Returns:
      +
      The latest committed offsets for the given partitions; null will be returned for the + partition if there is no such message.
      +
      Throws:
      +
      WakeupException - if wakeup() is called before or while this + function is called
      +
      InterruptException - if the calling thread is interrupted before or while + this function is called
      +
      AuthenticationException - if authentication fails. See the exception for more details
      +
      AuthorizationException - if not authorized to the topic or to the + configured groupId. See the exception for more details
      +
      KafkaException - for any other unrecoverable errors
      +
      TimeoutException - if the committed offset cannot be found before + expiration of the timeout
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      clientInstanceId

      +
      public Uuid clientInstanceId(Duration timeout)
      +
      Determines the client's unique client instance ID used for telemetry. This ID is unique to + this specific client instance and will not change after it is initially generated. + The ID is useful for correlating client operations with telemetry sent to the broker and + to its eventual monitoring destinations. +

      + If telemetry is enabled, this will first require a connection to the cluster to generate + the unique client instance ID. This method waits up to timeout for the consumer + client to complete the request. +

      + Client telemetry is controlled by the ConsumerConfig.ENABLE_METRICS_PUSH_CONFIG + configuration option.

      +
      +
      Specified by:
      +
      clientInstanceId in interface Consumer<K,V>
      +
      Parameters:
      +
      timeout - The maximum time to wait for consumer client to determine its client instance ID. + The value must be non-negative. Specifying a timeout of zero means do not + wait for the initial request to complete if it hasn't already.
      +
      Returns:
      +
      The client's assigned instance id used for metrics collection.
      +
      Throws:
      +
      InterruptException - If the thread is interrupted while blocked.
      +
      KafkaException - If an unexpected error occurs while trying to determine the client + instance ID, though this error does not necessarily imply the + consumer client is otherwise unusable.
      +
      IllegalArgumentException - If the timeout is negative.
      +
      IllegalStateException - If telemetry is not enabled ie, config `enable.metrics.push` + is set to `false`.
      +
      +
      +
    • +
    • +
      +

      metrics

      +
      public Map<MetricName,? extends Metric> metrics()
      +
      Get the metrics kept by the consumer
      +
      +
      Specified by:
      +
      metrics in interface Consumer<K,V>
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      partitionsFor

      +
      public List<PartitionInfo> partitionsFor(String topic)
      +
      Get metadata about the partitions for a given topic. This method will issue a remote call to the server if it + does not already have any metadata about the given topic.
      +
      +
      Specified by:
      +
      partitionsFor in interface Consumer<K,V>
      +
      Parameters:
      +
      topic - The topic to get partition metadata for
      +
      Returns:
      +
      The list of partitions, which will be empty when the given topic is not found
      +
      Throws:
      +
      WakeupException - if wakeup() is called before or while this + function is called
      +
      InterruptException - if the calling thread is interrupted before or while + this function is called
      +
      AuthenticationException - if authentication fails. See the exception for more details
      +
      AuthorizationException - if not authorized to the specified topic. See the exception for more details
      +
      KafkaException - for any other unrecoverable errors
      +
      TimeoutException - if the offset metadata could not be fetched before + the amount of time allocated by default.api.timeout.ms expires.
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      partitionsFor

      +
      public List<PartitionInfo> partitionsFor(String topic, + Duration timeout)
      +
      Get metadata about the partitions for a given topic. This method will issue a remote call to the server if it + does not already have any metadata about the given topic.
      +
      +
      Specified by:
      +
      partitionsFor in interface Consumer<K,V>
      +
      Parameters:
      +
      topic - The topic to get partition metadata for
      +
      timeout - The maximum of time to await topic metadata
      +
      Returns:
      +
      The list of partitions, which will be empty when the given topic is not found
      +
      Throws:
      +
      WakeupException - if wakeup() is called before or while this + function is called
      +
      InterruptException - if the calling thread is interrupted before or while + this function is called
      +
      AuthenticationException - if authentication fails. See the exception for more details
      +
      AuthorizationException - if not authorized to the specified topic. See + the exception for more details
      +
      TimeoutException - if topic metadata cannot be fetched before expiration + of the passed timeout
      +
      KafkaException - for any other unrecoverable errors
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      listTopics

      +
      public Map<String,List<PartitionInfo>> listTopics()
      +
      Get metadata about partitions for all topics that the user is authorized to view. This method will issue a + remote call to the server.
      +
      +
      Specified by:
      +
      listTopics in interface Consumer<K,V>
      +
      Returns:
      +
      The map of topics and its partitions
      +
      Throws:
      +
      WakeupException - if wakeup() is called before or while this + function is called
      +
      InterruptException - if the calling thread is interrupted before or while + this function is called
      +
      KafkaException - for any other unrecoverable errors
      +
      TimeoutException - if the offset metadata could not be fetched before + the amount of time allocated by default.api.timeout.ms expires.
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      listTopics

      +
      public Map<String,List<PartitionInfo>> listTopics(Duration timeout)
      +
      Get metadata about partitions for all topics that the user is authorized to view. This method will issue a + remote call to the server.
      +
      +
      Specified by:
      +
      listTopics in interface Consumer<K,V>
      +
      Parameters:
      +
      timeout - The maximum time this operation will block to fetch topic metadata
      +
      Returns:
      +
      The map of topics and its partitions
      +
      Throws:
      +
      WakeupException - if wakeup() is called before or while this + function is called
      +
      InterruptException - if the calling thread is interrupted before or while + this function is called
      +
      TimeoutException - if the topic metadata could not be fetched before + expiration of the passed timeout
      +
      KafkaException - for any other unrecoverable errors
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      pause

      +
      public void pause(Collection<TopicPartition> partitions)
      +
      Suspend fetching from the requested partitions. Future calls to poll(Duration) will not return + any records from these partitions until they have been resumed using resume(Collection). + Note that this method does not affect partition subscription. In particular, it does not cause a group + rebalance when automatic assignment is used. + + Note: Rebalance will not preserve the pause/resume state.
      +
      +
      Specified by:
      +
      pause in interface Consumer<K,V>
      +
      Parameters:
      +
      partitions - The partitions which should be paused
      +
      Throws:
      +
      IllegalStateException - if any of the provided partitions are not currently assigned to this consumer
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      resume

      +
      public void resume(Collection<TopicPartition> partitions)
      +
      Resume specified partitions which have been paused with pause(Collection). New calls to + poll(Duration) will return records from these partitions if there are any to be fetched. + If the partitions were not previously paused, this method is a no-op.
      +
      +
      Specified by:
      +
      resume in interface Consumer<K,V>
      +
      Parameters:
      +
      partitions - The partitions which should be resumed
      +
      Throws:
      +
      IllegalStateException - if any of the provided partitions are not currently assigned to this consumer
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      registerMetricForSubscription

      +
      public void registerMetricForSubscription(KafkaMetric metric)
      +
      Add the provided application metric for subscription. + This metric will be added to this client's metrics + that are available for subscription and sent as + telemetry data to the broker. + The provided metric must map to an OTLP metric data point + type in the OpenTelemetry v1 metrics protobuf message types. + Specifically, the metric should be one of the following: +
        +
      • + `Sum`: Monotonic total count meter (Counter). Suitable for metrics like total number of X, e.g., total bytes sent. +
      • +
      • + `Gauge`: Non-monotonic current value meter (UpDownCounter). Suitable for metrics like current value of Y, e.g., current queue count. +
      • +
      + Metrics not matching these types are silently ignored. + Executing this method for a previously registered metric is a benign operation and results in updating that metrics entry.
      +
      +
      Specified by:
      +
      registerMetricForSubscription in interface Consumer<K,V>
      +
      Parameters:
      +
      metric - The application metric to register
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      unregisterMetricFromSubscription

      +
      public void unregisterMetricFromSubscription(KafkaMetric metric)
      +
      Remove the provided application metric for subscription. + This metric is removed from this client's metrics + and will not be available for subscription any longer. + Executing this method with a metric that has not been registered is a + benign operation and does not result in any action taken (no-op).
      +
      +
      Specified by:
      +
      unregisterMetricFromSubscription in interface Consumer<K,V>
      +
      Parameters:
      +
      metric - The application metric to remove
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      paused

      +
      public Set<TopicPartition> paused()
      +
      Get the set of partitions that were previously paused by a call to pause(Collection).
      +
      +
      Specified by:
      +
      paused in interface Consumer<K,V>
      +
      Returns:
      +
      The set of paused partitions
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      offsetsForTimes

      +
      public Map<TopicPartition,OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition,Long> timestampsToSearch)
      +
      Look up the offsets for the given partitions by timestamp. The returned offset for each partition is the + earliest offset whose timestamp is greater than or equal to the given timestamp in the corresponding partition. + + This is a blocking call. The consumer does not have to be assigned the partitions. + If the message format version in a partition is before 0.10.0, i.e. the messages do not have timestamps, null + will be returned for that partition.
      +
      +
      Specified by:
      +
      offsetsForTimes in interface Consumer<K,V>
      +
      Parameters:
      +
      timestampsToSearch - the mapping from partition to the timestamp to look up.
      +
      Returns:
      +
      a mapping from partition to the timestamp and offset of the first message with timestamp greater + than or equal to the target timestamp. If the timestamp and offset for a specific partition cannot be found within + the default timeout, and no corresponding message exists, the entry in the returned map will be null
      +
      Throws:
      +
      AuthenticationException - if authentication fails. See the exception for more details
      +
      AuthorizationException - if not authorized to the topic(s). See the exception for more details
      +
      IllegalArgumentException - if the target timestamp is negative
      +
      TimeoutException - if the offset metadata could not be fetched before + the amount of time allocated by default.api.timeout.ms expires.
      +
      UnsupportedVersionException - if the broker does not support looking up + the offsets by timestamp
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      offsetsForTimes

      +
      public Map<TopicPartition,OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition,Long> timestampsToSearch, + Duration timeout)
      +
      Look up the offsets for the given partitions by timestamp. The returned offset for each partition is the + earliest offset whose timestamp is greater than or equal to the given timestamp in the corresponding partition. + + This is a blocking call. The consumer does not have to be assigned the partitions. + If the message format version in a partition is before 0.10.0, i.e. the messages do not have timestamps, null + will be returned for that partition.
      +
      +
      Specified by:
      +
      offsetsForTimes in interface Consumer<K,V>
      +
      Parameters:
      +
      timestampsToSearch - the mapping from partition to the timestamp to look up.
      +
      timeout - The maximum amount of time to await retrieval of the offsets
      +
      Returns:
      +
      a mapping from partition to the timestamp and offset of the first message with timestamp greater + than or equal to the target timestamp. If the timestamp and offset for a specific partition cannot be found within + timeout, and no corresponding message exists, the entry in the returned map will be null
      +
      Throws:
      +
      AuthenticationException - if authentication fails. See the exception for more details
      +
      AuthorizationException - if not authorized to the topic(s). See the exception for more details
      +
      IllegalArgumentException - if the target timestamp is negative
      +
      TimeoutException - if the offset metadata could not be fetched before + expiration of the passed timeout
      +
      UnsupportedVersionException - if the broker does not support looking up + the offsets by timestamp
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      beginningOffsets

      +
      public Map<TopicPartition,Long> beginningOffsets(Collection<TopicPartition> partitions)
      +
      Get the first offset for the given partitions. +

      + This method does not change the current consumer position of the partitions.

      +
      +
      Specified by:
      +
      beginningOffsets in interface Consumer<K,V>
      +
      Parameters:
      +
      partitions - the partitions to get the earliest offsets.
      +
      Returns:
      +
      The earliest available offsets for the given partitions
      +
      Throws:
      +
      AuthenticationException - if authentication fails. See the exception for more details
      +
      AuthorizationException - if not authorized to the topic(s). See the exception for more details
      +
      TimeoutException - if the offset metadata could not be fetched before + expiration of the configured default.api.timeout.ms
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      beginningOffsets

      +
      public Map<TopicPartition,Long> beginningOffsets(Collection<TopicPartition> partitions, + Duration timeout)
      +
      Get the first offset for the given partitions. +

      + This method does not change the current consumer position of the partitions.

      +
      +
      Specified by:
      +
      beginningOffsets in interface Consumer<K,V>
      +
      Parameters:
      +
      partitions - the partitions to get the earliest offsets
      +
      timeout - The maximum amount of time to await retrieval of the beginning offsets
      +
      Returns:
      +
      The earliest available offsets for the given partitions, and it will return empty map if zero timeout is provided
      +
      Throws:
      +
      AuthenticationException - if authentication fails. See the exception for more details
      +
      AuthorizationException - if not authorized to the topic(s). See the exception for more details
      +
      TimeoutException - if the offset metadata could not be fetched before + expiration of the passed timeout
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      endOffsets

      +
      public Map<TopicPartition,Long> endOffsets(Collection<TopicPartition> partitions)
      +
      Get the end offsets for the given partitions. In the default read_uncommitted isolation level, the end + offset is the high watermark (that is, the offset of the last successfully replicated message plus one). For + read_committed consumers, the end offset is the last stable offset (LSO), which is the minimum of + the high watermark and the smallest offset of any open transaction. Finally, if the partition has never been + written to, the end offset is 0. + +

      + This method does not change the current consumer position of the partitions.

      +
      +
      Specified by:
      +
      endOffsets in interface Consumer<K,V>
      +
      Parameters:
      +
      partitions - the partitions to get the end offsets.
      +
      Returns:
      +
      The end offsets for the given partitions.
      +
      Throws:
      +
      AuthenticationException - if authentication fails. See the exception for more details
      +
      AuthorizationException - if not authorized to the topic(s). See the exception for more details
      +
      TimeoutException - if the offset metadata could not be fetched before + the amount of time allocated by default.api.timeout.ms expires
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      endOffsets

      +
      public Map<TopicPartition,Long> endOffsets(Collection<TopicPartition> partitions, + Duration timeout)
      +
      Get the end offsets for the given partitions. In the default read_uncommitted isolation level, the end + offset is the high watermark (that is, the offset of the last successfully replicated message plus one). For + read_committed consumers, the end offset is the last stable offset (LSO), which is the minimum of + the high watermark and the smallest offset of any open transaction. Finally, if the partition has never been + written to, the end offset is 0. + +

      + This method does not change the current consumer position of the partitions.

      +
      +
      Specified by:
      +
      endOffsets in interface Consumer<K,V>
      +
      Parameters:
      +
      partitions - the partitions to get the end offsets.
      +
      timeout - The maximum amount of time to await retrieval of the end offsets
      +
      Returns:
      +
      The end offsets for the given partitions, and it will return empty map if zero timeout is provided
      +
      Throws:
      +
      AuthenticationException - if authentication fails. See the exception for more details
      +
      AuthorizationException - if not authorized to the topic(s). See the exception for more details
      +
      TimeoutException - if the offsets could not be fetched before + expiration of the passed timeout
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      currentLag

      +
      public OptionalLong currentLag(TopicPartition topicPartition)
      +
      Get the consumer's current lag on the partition. Returns an "empty" OptionalLong if the lag is not known, + for example if there is no position yet, or if the end offset is not known yet. + +

      + This method uses locally cached metadata. If the log end offset is not known yet, it triggers a request to fetch + the log end offset, but returns immediately.

      +
      +
      Specified by:
      +
      currentLag in interface Consumer<K,V>
      +
      Parameters:
      +
      topicPartition - The partition to get the lag for.
      +
      Returns:
      +
      This Consumer instance's current lag for the given partition.
      +
      Throws:
      +
      IllegalStateException - if the topicPartition is not assigned
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      groupMetadata

      +
      public ConsumerGroupMetadata groupMetadata()
      +
      Return the current group metadata associated with this consumer.
      +
      +
      Specified by:
      +
      groupMetadata in interface Consumer<K,V>
      +
      Returns:
      +
      consumer group metadata
      +
      Throws:
      +
      InvalidGroupIdException - if consumer does not have a group
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      enforceRebalance

      +
      public void enforceRebalance(String reason)
      +
      Alert the consumer to trigger a new rebalance by rejoining the group. This is a nonblocking call that forces + the consumer to trigger a new rebalance on the next poll(Duration) call. Note that this API does not + itself initiate the rebalance, so you must still call poll(Duration). If a rebalance is already in + progress this call will be a no-op. If you wish to force an additional rebalance you must complete the current + one by calling poll before retrying this API. +

      + You do not need to call this during normal processing, as the consumer group will manage itself + automatically and rebalance when necessary. However there may be situations where the application wishes to + trigger a rebalance that would otherwise not occur. For example, if some condition external and invisible to + the Consumer and its group changes in a way that would affect the userdata encoded in the + Subscription, the Consumer + will not be notified and no rebalance will occur. This API can be used to force the group to rebalance so that + the assignor can perform a partition reassignment based on the latest userdata. If your assignor does not use + this userdata, or you do not use a custom + ConsumerPartitionAssignor, you should not + use this API.

      +
      +
      Specified by:
      +
      enforceRebalance in interface Consumer<K,V>
      +
      Parameters:
      +
      reason - The reason why the new rebalance is needed.
      +
      Throws:
      +
      IllegalStateException - if the consumer does not use group subscription
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      enforceRebalance

      +
      public void enforceRebalance()
      +
      +
      Specified by:
      +
      enforceRebalance in interface Consumer<K,V>
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      close

      +
      public void close()
      +
      Close the consumer with default leave group behavior, + waiting for up to the default timeout of 30 seconds for any needed cleanup. + If auto-commit is enabled, this will commit the current offsets if possible within the default + timeout. See close(CloseOptions) for details. Note that wakeup() + cannot be used to interrupt close.
      +
      +
      Specified by:
      +
      close in interface AutoCloseable
      +
      Specified by:
      +
      close in interface Closeable
      +
      Specified by:
      +
      close in interface Consumer<K,V>
      +
      Throws:
      +
      InterruptException - if the calling thread is interrupted + before or while this function is called
      +
      KafkaException - for any other error during close
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      close

      +
      @Deprecated(since="4.1") +public void close(Duration timeout)
      +
      Deprecated.
      +
      This method has been deprecated since Kafka 4.1 and should use close(CloseOptions) instead. +

      + Close the consumer with default leave group behavior + cleanly within the specified timeout. This method waits up to + timeout for the consumer to complete pending commits and maybe leave the group (if the member is dynamic). + If auto-commit is enabled, this will commit the current offsets if possible within the + timeout. If the consumer is unable to complete offset commits and to gracefully leave the group (if applicable) + before the timeout expires, the consumer is force closed. Note that wakeup() cannot be + used to interrupt close. +

      + The actual maximum wait time is bounded by the ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG setting, which + only applies to operations performed with the broker (coordinator-related requests and + fetch sessions). Even if a larger timeout is specified, the consumer will not wait longer than + ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG for these requests to complete during the close operation. + Note that the execution time of callbacks (such as OffsetCommitCallback and + ConsumerRebalanceListener) does not consume time from the close timeout.

      +
      +
      Specified by:
      +
      close in interface Consumer<K,V>
      +
      Parameters:
      +
      timeout - The maximum time to wait for consumer to close gracefully. The value must be + non-negative. Specifying a timeout of zero means do not wait for pending requests to complete.
      +
      Throws:
      +
      IllegalArgumentException - If the timeout is negative.
      +
      InterruptException - If the thread is interrupted before or while this function is called
      +
      KafkaException - for any other error during close
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      close

      +
      public void close(CloseOptions option)
      +
      Close the consumer cleanly. CloseOptions allows to specify a timeout and a + leave group behavior. + If no timeout is specified, the default timeout of 30 seconds is used. + If no leave group behavior is specified, the default + leave group behavior is used. +

      + This method waits up to the timeout for the consumer to complete pending commits and maybe leave the group, + depending on the specified leave group behavior. + If auto-commit is enabled, this will commit the current offsets if possible within the + timeout. If the consumer is unable to complete offset commits and to gracefully leave the group (if applicable) + before the timeout expires, the consumer is force closed. Note that wakeup() cannot be + used to interrupt close. +

      + The actual maximum wait time is bounded by the ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG setting, which + only applies to operations performed with the broker (coordinator-related requests and + fetch sessions). Even if a larger timeout is specified, the consumer will not wait longer than + ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG for these requests to complete during the close operation. + Note that the execution time of callbacks (such as OffsetCommitCallback and + ConsumerRebalanceListener) does not consume time from the close timeout.

      +
      +
      Specified by:
      +
      close in interface Consumer<K,V>
      +
      Parameters:
      +
      option - see CloseOptions; cannot be null
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      wakeup

      +
      public void wakeup()
      +
      Wakeup the consumer. This method is thread-safe and is useful in particular to abort a long poll. + The thread which is blocking in an operation will throw WakeupException. + If no thread is blocking in a method which can throw WakeupException, the next call to such a method will raise it instead.
      +
      +
      Specified by:
      +
      wakeup in interface Consumer<K,V>
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/KafkaShareConsumer.html b/static/41/javadoc/org/apache/kafka/clients/consumer/KafkaShareConsumer.html new file mode 100644 index 000000000..d90d192a1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/KafkaShareConsumer.html @@ -0,0 +1,1046 @@ + + + + +KafkaShareConsumer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class KafkaShareConsumer<K,V>

+
+
java.lang.Object +
org.apache.kafka.clients.consumer.KafkaShareConsumer<K,V>
+
+
+
+
All Implemented Interfaces:
+
Closeable, AutoCloseable, ShareConsumer<K,V>
+
+
+
@Evolving +public class KafkaShareConsumer<K,V> +extends Object +implements ShareConsumer<K,V>
+
A client that consumes records from a Kafka cluster using a share group. +

+ This is a preview feature introduced by KIP-932. It is not yet recommended for production use. + +

Cross-Version Compatibility

+ This client can communicate with brokers that are a version that supports share groups. You will receive an + UnsupportedVersionException when invoking an API that is not + available on the running broker version. + +

Share Groups and Topic Subscriptions

+ Kafka uses the concept of share groups to allow a pool of consumers to cooperate on the work of + consuming and processing records. All consumer instances sharing the same group.id will be part of + the same share group. +

+ Each consumer in a group can dynamically set the list of topics it wants to subscribe to using the + subscribe(Collection) method. Kafka will deliver each message in the subscribed topics to one + consumer in the share group. Unlike consumer groups, share groups balance the partitions between all + members of the share group permitting multiple consumers to consume from the same partitions. This gives + more flexible sharing of records than a consumer group, at the expense of record ordering. +

+ Membership in a share group is maintained dynamically: if a consumer fails, the partitions assigned to + it will be reassigned to other consumers in the same group. Similarly, if a new consumer joins the group, + the partition assignment is re-evaluated and partitions can be moved from existing consumers to the new one. + This is known as rebalancing the group and is discussed in more detail below. + Group rebalancing is also used when new partitions are added to one of the subscribed topics. The group will + automatically detect the new partitions through periodic metadata refreshes and assign them to the members of the group. +

+ Conceptually, you can think of a share group as a single logical subscriber made up of multiple consumers. + In fact, in other messaging systems, a share group is roughly equivalent to a durable shared subscription. + You can have multiple share groups and consumer groups independently consuming from the same topics. + +

Detecting Consumer Failures

+ After subscribing to a set of topics, the consumer will automatically join the group when poll(Duration) is + invoked. This method is designed to ensure consumer liveness. As long as you continue to call poll, the consumer + will stay in the group and continue to receive records from the partitions it was assigned. Under the covers, + the consumer sends periodic heartbeats to the broker. If the consumer crashes or is unable to send heartbeats for + the duration of the share group's session time-out, then the consumer will be considered dead and its partitions + will be reassigned. +

+ It is also possible that the consumer could encounter a "livelock" situation where it is continuing to send heartbeats + in the background, but no progress is being made. To prevent the consumer from holding onto its partitions + indefinitely in this case, we provide a liveness detection mechanism using the max.poll.interval.ms setting. + If you don't call poll at least as frequently as this, the client will proactively leave the share group. + So to stay in the group, you must continue to call poll. + +

Record Delivery and Acknowledgement

+ When a consumer in a share-group fetches records using poll(Duration), it receives available records from any + of the topic-partitions that match its subscriptions. Records are acquired for delivery to this consumer with a + time-limited acquisition lock. While a record is acquired, it is not available for another consumer. By default, + the lock duration is 30 seconds, but it can also be controlled using the group group.share.record.lock.duration.ms + configuration property. The idea is that the lock is automatically released once the lock duration has elapsed, and + then the record is available to be given to another consumer. The consumer which holds the lock can deal with it in + the following ways: +
    +
  • The consumer can acknowledge successful processing of the record
  • +
  • The consumer can release the record, which makes the record available for another delivery attempt
  • +
  • The consumer can reject the record, which indicates that the record is unprocessable and does not make + the record available for another delivery attempt
  • +
  • The consumer can do nothing, in which case the lock is automatically released when the lock duration has elapsed
  • +
+ The cluster limits the number of records acquired for consumers for each topic-partition in a share group. Once the limit + is reached, fetching records will temporarily yield no further records until the number of acquired records reduces, + as naturally happens when the locks time out. This limit is controlled by the broker configuration property + group.share.record.lock.partition.limit. By limiting the duration of the acquisition lock and automatically + releasing the locks, the broker ensures delivery progresses even in the presence of consumer failures. +

+ The consumer can choose to use implicit or explicit acknowledgement of the records it processes by using the + consumer share.acknowledgement.mode configuration property. +

+ If the application sets the property to "implicit" or does not set it at all, then the consumer is using + implicit acknowledgement. In this mode, the application acknowledges delivery by: +

    +
  • Calling poll(Duration) without committing, which also implicitly acknowledges all + the delivered records and commits the acknowledgements to Kafka asynchronously. In this case, no exception is + thrown by a failure to commit the acknowledgements.
  • +
  • Calling commitSync() or commitAsync() which implicitly acknowledges all + the delivered records as processed successfully and commits the acknowledgements to Kafka.
  • +
  • Calling close() which releases any acquired records without acknowledgement.
  • +
+ If the application sets the property to "explicit", then the consumer is using explicit acknowledgement. + The application must acknowledge all records returned from poll(Duration) using + acknowledge(ConsumerRecord, AcknowledgeType) before its next call to poll(Duration). + If the application calls poll(Duration) without having acknowledged all records, an + IllegalStateException is thrown. The remaining unacknowledged records can still be acknowledged. + In this mode, the application acknowledges delivery by: +
    +
  • Calling poll(Duration) after it has acknowledged all records, which commits the acknowledgements + to Kafka asynchronously. In this case, no exception is thrown by a failure to commit the acknowledgements.
  • +
  • Calling commitSync() or commitAsync() which commits any pending + acknowledgements to Kafka.
  • +
  • Calling close() which attempts to commit any pending acknowledgements and releases + any remaining acquired records.
  • +
+ The consumer guarantees that the records returned in the ConsumerRecords object for a specific topic-partition + are in order of increasing offset. For each topic-partition, Kafka guarantees that acknowledgements for the records + in a batch are performed atomically. This makes error handling significantly more straightforward because there can be + one error code per partition. + +

Usage Examples

+ The share consumer APIs offer flexibility to cover a variety of consumption use cases. Here are some examples to + demonstrate how to use them. + +

Acknowledging a batch of records (implicit acknowledgement)

+ This example demonstrates implicit acknowledgement using poll(Duration) to acknowledge the records which + were delivered in the previous poll. All the records delivered are implicitly marked as successfully consumed and + acknowledged synchronously with Kafka as the consumer fetches more records. +
+     Properties props = new Properties();
+     props.setProperty("bootstrap.servers", "localhost:9092");
+     props.setProperty("group.id", "test");
+     props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
+     props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
+
+     KafkaShareConsumer<String, String> consumer = new KafkaShareConsumer<>(props);
+     consumer.subscribe(Arrays.asList("foo"));
+     while (true) {
+         ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));
+         for (ConsumerRecord<String, String> record : records) {
+             System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
+             doProcessing(record);
+         }
+     }
+ 
+ + Alternatively, you can use commitSync() or commitAsync() to commit the acknowledgements, but this is + slightly less efficient because there is an additional request sent to Kafka. +
+     Properties props = new Properties();
+     props.setProperty("bootstrap.servers", "localhost:9092");
+     props.setProperty("group.id", "test");
+     props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
+     props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
+
+     KafkaShareConsumer<String, String> consumer = new KafkaShareConsumer<>(props);
+     consumer.subscribe(Arrays.asList("foo"));
+     while (true) {
+         ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));
+         for (ConsumerRecord<String, String> record : records) {
+             System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
+             doProcessing(record);
+         }
+         consumer.commitSync();
+     }
+ 
+ +

Per-record acknowledgement (explicit acknowledgement)

+ This example demonstrates using different acknowledgement types depending on the outcome of processing the records. + Here the share.acknowledgement.mode property is set to "explicit" so the consumer must explicitly acknowledge each record. +
+     Properties props = new Properties();
+     props.setProperty("bootstrap.servers", "localhost:9092");
+     props.setProperty("group.id", "test");
+     props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
+     props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
+     props.setProperty("share.acknowledgement.mode", "explicit");
+
+     KafkaShareConsumer<String, String> consumer = new KafkaShareConsumer<>(props);
+     consumer.subscribe(Arrays.asList("foo"));
+     while (true) {
+         ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));
+         for (ConsumerRecord<String, String> record : records) {
+             try {
+                 doProcessing(record);
+                 consumer.acknowledge(record, AcknowledgeType.ACCEPT);
+             } catch (Exception e) {
+                 consumer.acknowledge(record, AcknowledgeType.REJECT);
+             }
+         }
+         consumer.commitSync();
+     }
+ 
+ + Each record processed is separately acknowledged using a call to acknowledge(ConsumerRecord, AcknowledgeType). + The AcknowledgeType argument indicates whether the record was processed successfully or not. In this case, + the bad records are rejected meaning that they’re not eligible for further delivery attempts. For a permanent error + such as a semantic error, this is appropriate. For a transient error which might not affect a subsequent processing + attempt, AcknowledgeType.RELEASE is more appropriate because the record remains eligible for further delivery attempts. +

+ The calls to acknowledge(ConsumerRecord, AcknowledgeType) are simply updating local information in the consumer. + It is only once commitSync() is called that the acknowledgements are committed by sending the new state + information to Kafka. + +

Reading Transactional Records

+ The way that share groups handle transactional records is controlled by the group.share.isolation.level + configuration property. In a share group, the isolation level applies to the entire share group, not just individual + consumers. +

+ In read_uncommitted isolation level, the share group consumes all non-transactional and transactional + records. The consumption is bounded by the high-water mark. +

+ In read_committed isolation level (not yet supported), the share group only consumes non-transactional + records and committed transactional records. The set of records which are eligible to become in-flight records are + non-transactional records and committed transactional records only. The consumption is bounded by the last stable + offset, so an open transaction blocks the progress of the share group with read_committed isolation level. + +

Multithreaded Processing

+ The consumer is NOT thread-safe. It is the responsibility of the user to ensure that multithreaded access + is properly synchronized. Unsynchronized access will result in ConcurrentModificationException. +

+ The only exception to this rule is wakeup() which can safely be used from an external thread to + interrupt an active operation. In this case, a WakeupException will be + thrown from the thread blocking on the operation. This can be used to shut down the consumer from another thread. + The following snippet shows the typical pattern: + +

+ public class KafkaShareConsumerRunner implements Runnable {
+     private final AtomicBoolean closed = new AtomicBoolean(false);
+     private final KafkaShareConsumer consumer;
+
+     public KafkaShareConsumerRunner(KafkaShareConsumer consumer) {
+       this.consumer = consumer;
+     }
+
+     @Override
+     public void run() {
+         try {
+             consumer.subscribe(Arrays.asList("topic"));
+             while (!closed.get()) {
+                 ConsumerRecords records = consumer.poll(Duration.ofMillis(10000));
+                 // Handle new records
+             }
+         } catch (WakeupException e) {
+             // Ignore exception if closing
+             if (!closed.get()) throw e;
+         } finally {
+             consumer.close();
+         }
+     }
+
+     // Shutdown hook which can be called from a separate thread
+     public void shutdown() {
+         closed.set(true);
+         consumer.wakeup();
+     }
+ }
+ 
+ + Then in a separate thread, the consumer can be shutdown by setting the closed flag and waking up the consumer. +
+     closed.set(true);
+     consumer.wakeup();
+ 
+ +

+ Note that while it is possible to use thread interrupts instead of wakeup() to abort a blocking operation + (in which case, InterruptException will be raised), we discourage their use since they may cause a clean + shutdown of the consumer to be aborted. Interrupts are mainly supported for those cases where using wakeup() + is impossible, such as when a consumer thread is managed by code that is unaware of the Kafka client. +

+ We have intentionally avoided implementing a particular threading model for processing. Various options for + multithreaded processing are possible, of which the most straightforward is to dedicate a thread to each consumer.

+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      KafkaShareConsumer

      +
      public KafkaShareConsumer(Map<String,Object> configs)
      +
      A consumer is instantiated by providing a set of key-value pairs as configuration. Valid configuration strings + are documented here. Values can be + either strings or objects of the appropriate type (for example a numeric configuration would accept either the + string "42" or the integer 42). +

      + Valid configuration strings are documented at ConsumerConfig. +

      + Note: after creating a KafkaShareConsumer you must always close() it to avoid resource leaks.

      +
      +
      Parameters:
      +
      configs - The consumer configs
      +
      +
      +
    • +
    • +
      +

      KafkaShareConsumer

      +
      public KafkaShareConsumer(Properties properties)
      +
      A consumer is instantiated by providing a Properties object as configuration. +

      + Valid configuration strings are documented at ConsumerConfig. +

      + Note: after creating a KafkaShareConsumer you must always close() it to avoid resource leaks.

      +
      +
      Parameters:
      +
      properties - The consumer configuration properties
      +
      +
      +
    • +
    • +
      +

      KafkaShareConsumer

      +
      public KafkaShareConsumer(Properties properties, + Deserializer<K> keyDeserializer, + Deserializer<V> valueDeserializer)
      +
      A consumer is instantiated by providing a Properties object as configuration, and a + key and a value Deserializer. +

      + Valid configuration strings are documented at ConsumerConfig. +

      + Note: after creating a KafkaShareConsumer you must always close() it to avoid resource leaks.

      +
      +
      Parameters:
      +
      properties - The consumer configuration properties
      +
      keyDeserializer - The deserializer for key that implements Deserializer. The configure() method + won't be called in the consumer when the deserializer is passed in directly.
      +
      valueDeserializer - The deserializer for value that implements Deserializer. The configure() method + won't be called in the consumer when the deserializer is passed in directly.
      +
      +
      +
    • +
    • +
      +

      KafkaShareConsumer

      +
      public KafkaShareConsumer(Map<String,Object> configs, + Deserializer<K> keyDeserializer, + Deserializer<V> valueDeserializer)
      +
      A consumer is instantiated by providing a set of key-value pairs as configuration, and a key and a value Deserializer. +

      + Valid configuration strings are documented at ConsumerConfig. +

      + Note: after creating a KafkaShareConsumer you must always close() it to avoid resource leaks.

      +
      +
      Parameters:
      +
      configs - The consumer configs
      +
      keyDeserializer - The deserializer for key that implements Deserializer. The configure() method + won't be called in the consumer when the deserializer is passed in directly.
      +
      valueDeserializer - The deserializer for value that implements Deserializer. The configure() method + won't be called in the consumer when the deserializer is passed in directly.
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      subscription

      +
      public Set<String> subscription()
      +
      Get the current subscription. Will return the same topics used in the most recent call to + subscribe(Collection), or an empty set if no such call has been made.
      +
      +
      Specified by:
      +
      subscription in interface ShareConsumer<K,V>
      +
      Returns:
      +
      The set of topics currently subscribed to
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      subscribe

      +
      public void subscribe(Collection<String> topics)
      +
      Subscribe to the given list of topics to get dynamically assigned partitions. + Topic subscriptions are not incremental. This list will replace the current + assignment, if there is one. If the given list of topics is empty, it is treated the same as unsubscribe(). + +

      + As part of group management, the coordinator will keep track of the list of consumers that belong to a particular + group and will trigger a rebalance operation if any one of the following events are triggered: +

        +
      • A member joins or leaves the share group +
      • An existing member of the share group is shut down or fails +
      • The number of partitions changes for any of the subscribed topics +
      • A subscribed topic is created or deleted +
      +
      +
      Specified by:
      +
      subscribe in interface ShareConsumer<K,V>
      +
      Parameters:
      +
      topics - The list of topics to subscribe to
      +
      Throws:
      +
      IllegalArgumentException - if topics is null or contains null or empty elements
      +
      KafkaException - for any other unrecoverable errors
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      unsubscribe

      +
      public void unsubscribe()
      +
      Unsubscribe from topics currently subscribed with subscribe(Collection).
      +
      +
      Specified by:
      +
      unsubscribe in interface ShareConsumer<K,V>
      +
      Throws:
      +
      KafkaException - for any other unrecoverable errors
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      poll

      +
      public ConsumerRecords<K,V> poll(Duration timeout)
      +
      Deliver records for the topics specified using subscribe(Collection). It is an error to not have + subscribed to any topics before polling for data. + +

      + This method returns immediately if there are records available. Otherwise, it will await the passed timeout. + If the timeout expires, an empty record set will be returned.

      +
      +
      Specified by:
      +
      poll in interface ShareConsumer<K,V>
      +
      Parameters:
      +
      timeout - The maximum time to block (must not be greater than Long.MAX_VALUE milliseconds)
      +
      Returns:
      +
      map of topic to records
      +
      Throws:
      +
      AuthenticationException - if authentication fails. See the exception for more details
      +
      AuthorizationException - if caller lacks Read access to any of the subscribed + topics or to the share group. See the exception for more details
      +
      IllegalArgumentException - if the timeout value is negative
      +
      IllegalStateException - if the consumer is not subscribed to any topics, or it is using + explicit acknowledgement and has not acknowledged all records previously delivered
      +
      ArithmeticException - if the timeout is greater than Long.MAX_VALUE milliseconds.
      +
      InvalidTopicException - if the current subscription contains any invalid + topic (per Topic.validate(String))
      +
      WakeupException - if wakeup() is called before or while this method is called
      +
      InterruptException - if the calling thread is interrupted before or while this method is called
      +
      KafkaException - for any other unrecoverable errors
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      acknowledge

      +
      public void acknowledge(ConsumerRecord<K,V> record)
      +
      Acknowledge successful delivery of a record returned on the last poll(Duration) call. + The acknowledgement is committed on the next commitSync(), commitAsync() or + poll(Duration) call. +

      This method can only be used if the consumer is using explicit acknowledgement.

      +
      +
      Specified by:
      +
      acknowledge in interface ShareConsumer<K,V>
      +
      Parameters:
      +
      record - The record to acknowledge
      +
      Throws:
      +
      IllegalStateException - if the record is not waiting to be acknowledged, or the consumer is not using + explicit acknowledgement
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      acknowledge

      +
      public void acknowledge(ConsumerRecord<K,V> record, + AcknowledgeType type)
      +
      Acknowledge delivery of a record returned on the last poll(Duration) call indicating whether + it was processed successfully. The acknowledgement is committed on the next commitSync(), + commitAsync() or poll(Duration) call. +

      This method can only be used if the consumer is using explicit acknowledgement.

      +
      +
      Specified by:
      +
      acknowledge in interface ShareConsumer<K,V>
      +
      Parameters:
      +
      record - The record to acknowledge
      +
      type - The acknowledgement type which indicates whether it was processed successfully
      +
      Throws:
      +
      IllegalStateException - if the record is not waiting to be acknowledged, or the consumer is not using + explicit acknowledgement
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      commitSync

      + +
      Commit the acknowledgements for the records returned. If the consumer is using explicit acknowledgement, + the acknowledgements to commit have been indicated using acknowledge(ConsumerRecord) or + acknowledge(ConsumerRecord, AcknowledgeType). If the consumer is using implicit acknowledgement, + all the records returned by the latest call to poll(Duration) are acknowledged. + +

      + This is a synchronous commit and will block until either the commit succeeds, an unrecoverable error is + encountered (in which case it is thrown to the caller), or the timeout specified by default.api.timeout.ms + expires.

      +
      +
      Specified by:
      +
      commitSync in interface ShareConsumer<K,V>
      +
      Returns:
      +
      A map of the results for each topic-partition for which delivery was acknowledged. + If the acknowledgement failed for a topic-partition, an exception is present.
      +
      Throws:
      +
      WakeupException - if wakeup() is called before or while this method is called
      +
      InterruptException - if the thread is interrupted while blocked
      +
      KafkaException - for any other unrecoverable errors
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      commitSync

      +
      public Map<TopicIdPartition,Optional<KafkaException>> commitSync(Duration timeout)
      +
      Commit the acknowledgements for the records returned. If the consumer is using explicit acknowledgement, + the acknowledgements to commit have been indicated using acknowledge(ConsumerRecord) or + acknowledge(ConsumerRecord, AcknowledgeType). If the consumer is using implicit acknowledgement, + all the records returned by the latest call to poll(Duration) are acknowledged. + +

      + This is a synchronous commit and will block until either the commit succeeds, an unrecoverable error is + encountered (in which case it is thrown to the caller), or the timeout expires.

      +
      +
      Specified by:
      +
      commitSync in interface ShareConsumer<K,V>
      +
      Parameters:
      +
      timeout - The maximum amount of time to await completion of the acknowledgement
      +
      Returns:
      +
      A map of the results for each topic-partition for which delivery was acknowledged. + If the acknowledgement failed for a topic-partition, an exception is present.
      +
      Throws:
      +
      IllegalArgumentException - if the timeout is negative
      +
      WakeupException - if wakeup() is called before or while this method is called
      +
      InterruptException - if the thread is interrupted while blocked
      +
      KafkaException - for any other unrecoverable errors
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      commitAsync

      +
      public void commitAsync()
      +
      Commit the acknowledgements for the records returned. If the consumer is using explicit acknowledgement, + the acknowledgements to commit have been indicated using acknowledge(ConsumerRecord) or + acknowledge(ConsumerRecord, AcknowledgeType). If the consumer is using implicit acknowledgement, + all the records returned by the latest call to poll(Duration) are acknowledged.
      +
      +
      Specified by:
      +
      commitAsync in interface ShareConsumer<K,V>
      +
      Throws:
      +
      KafkaException - for any other unrecoverable errors
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      setAcknowledgementCommitCallback

      +
      public void setAcknowledgementCommitCallback(AcknowledgementCommitCallback callback)
      +
      Sets the acknowledgement commit callback which can be used to handle acknowledgement completion.
      +
      +
      Specified by:
      +
      setAcknowledgementCommitCallback in interface ShareConsumer<K,V>
      +
      Parameters:
      +
      callback - The acknowledgement commit callback
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      clientInstanceId

      +
      public Uuid clientInstanceId(Duration timeout)
      +
      Determines the client's unique client instance ID used for telemetry. This ID is unique to + this specific client instance and will not change after it is initially generated. + The ID is useful for correlating client operations with telemetry sent to the broker and + to its eventual monitoring destinations. +

      + If telemetry is enabled, this will first require a connection to the cluster to generate + the unique client instance ID. This method waits up to timeout for the consumer + client to complete the request. +

      + Client telemetry is controlled by the ConsumerConfig.ENABLE_METRICS_PUSH_CONFIG + configuration property.

      +
      +
      Specified by:
      +
      clientInstanceId in interface ShareConsumer<K,V>
      +
      Parameters:
      +
      timeout - The maximum time to wait for consumer client to determine its client instance ID. + The value must be non-negative. Specifying a timeout of zero means do not + wait for the initial request to complete if it hasn't already.
      +
      Returns:
      +
      The client's assigned instance id used for metrics collection.
      +
      Throws:
      +
      IllegalArgumentException - if the timeout is negative
      +
      IllegalStateException - if telemetry is not enabled
      +
      WakeupException - if wakeup() is called before or while this method is called
      +
      InterruptException - if the thread is interrupted while blocked
      +
      KafkaException - if an unexpected error occurs while trying to determine the client + instance ID, though this error does not necessarily imply the + consumer client is otherwise unusable
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      metrics

      +
      public Map<MetricName,? extends Metric> metrics()
      +
      Get the metrics kept by the consumer
      +
      +
      Specified by:
      +
      metrics in interface ShareConsumer<K,V>
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      registerMetricForSubscription

      +
      public void registerMetricForSubscription(KafkaMetric metric)
      +
      Add the provided application metric for subscription. This metric will be added to this client's metrics + that are available for subscription and sent as telemetry data to the broker. + The provided metric must map to an OTLP metric data point type in the OpenTelemetry v1 metrics protobuf message types. + Specifically, the metric should be one of the following: +
        +
      • + Sum: Monotonic total count meter (Counter). Suitable for metrics like total number of X, e.g., total bytes sent. +
      • +
      • + Gauge: Non-monotonic current value meter (UpDownCounter). Suitable for metrics like current value of Y, e.g., current queue count. +
      • +
      + Metrics not matching these types are silently ignored. Executing this method for a previously registered metric + is a benign operation and results in updating that metric's entry.
      +
      +
      Specified by:
      +
      registerMetricForSubscription in interface ShareConsumer<K,V>
      +
      Parameters:
      +
      metric - The application metric to register
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      unregisterMetricFromSubscription

      +
      public void unregisterMetricFromSubscription(KafkaMetric metric)
      +
      Remove the provided application metric for subscription. This metric is removed from this client's metrics + and will not be available for subscription any longer. Executing this method with a metric that has not been registered is a + benign operation and does not result in any action taken (no-op).
      +
      +
      Specified by:
      +
      unregisterMetricFromSubscription in interface ShareConsumer<K,V>
      +
      Parameters:
      +
      metric - The application metric to remove
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      close

      +
      public void close()
      +
      Close the consumer, waiting for up to the default timeout of 30 seconds for any needed cleanup. + This will commit acknowledgements if possible within the default timeout. + See close(Duration) for details. Note that wakeup() cannot be used to interrupt close.
      +
      +
      Specified by:
      +
      close in interface AutoCloseable
      +
      Specified by:
      +
      close in interface Closeable
      +
      Specified by:
      +
      close in interface ShareConsumer<K,V>
      +
      Throws:
      +
      WakeupException - if wakeup() is called before or while this method is called
      +
      InterruptException - if the thread is interrupted before or while this method is called
      +
      KafkaException - for any other error during close
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      close

      +
      public void close(Duration timeout)
      +
      Tries to close the consumer cleanly within the specified timeout. This method waits up to + timeout for the consumer to complete acknowledgements and leave the group. + If the consumer is unable to complete acknowledgements and gracefully leave the group + before the timeout expires, the consumer is force closed. Note that wakeup() cannot be + used to interrupt close. +

      + The actual maximum wait time is bounded by the ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG setting, which + only applies to operations performed with the broker (coordinator-related requests). + Even if a larger timeout is specified, the consumer will not wait longer than + ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG for these requests to complete during the close operation. + Note that the execution time of callbacks (such as AcknowledgementCommitCallback) do not consume time from the close timeout.

      +
      +
      Specified by:
      +
      close in interface ShareConsumer<K,V>
      +
      Parameters:
      +
      timeout - The maximum time to wait for consumer to close gracefully. The value must be + non-negative. Specifying a timeout of zero means do not wait for pending requests to complete.
      +
      Throws:
      +
      IllegalArgumentException - if the timeout is negative
      +
      WakeupException - if wakeup() is called before or while this method is called
      +
      InterruptException - if the thread is interrupted before or while this method is called
      +
      KafkaException - for any other error during close
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      wakeup

      +
      public void wakeup()
      +
      Wake up the consumer. This method is thread-safe and is useful in particular to abort a long poll. + The thread which is blocking in an operation will throw WakeupException. + If no thread is blocking in a method which can throw WakeupException, + the next call to such a method will raise it instead.
      +
      +
      Specified by:
      +
      wakeup in interface ShareConsumer<K,V>
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/LogTruncationException.html b/static/41/javadoc/org/apache/kafka/clients/consumer/LogTruncationException.html new file mode 100644 index 000000000..bcba71014 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/LogTruncationException.html @@ -0,0 +1,217 @@ + + + + +LogTruncationException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class LogTruncationException

+
+ +
+
+
All Implemented Interfaces:
+
Serializable
+
+
+
public class LogTruncationException +extends OffsetOutOfRangeException
+
In the event of an unclean leader election, the log will be truncated, + previously committed data will be lost, and new data will be written + over these offsets. When this happens, the consumer will detect the + truncation and raise this exception (if no automatic reset policy + has been defined) with the first offset known to diverge from what the + consumer previously read.
+
+
See Also:
+
+ +
+
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/MockConsumer.html b/static/41/javadoc/org/apache/kafka/clients/consumer/MockConsumer.html new file mode 100644 index 000000000..b647349dc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/MockConsumer.html @@ -0,0 +1,1377 @@ + + + + +MockConsumer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class MockConsumer<K,V>

+
+
java.lang.Object +
org.apache.kafka.clients.consumer.MockConsumer<K,V>
+
+
+
+
All Implemented Interfaces:
+
Closeable, AutoCloseable, Consumer<K,V>
+
+
+
public class MockConsumer<K,V> +extends Object +implements Consumer<K,V>
+
A mock of the Consumer interface you can use for testing code that uses Kafka. This class is not + threadsafe . However, you can use the schedulePollTask(Runnable) method to write multithreaded tests + where a driver thread waits for poll(Duration) to be called by a background thread and then can safely perform + operations during a callback.
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/MockShareConsumer.html b/static/41/javadoc/org/apache/kafka/clients/consumer/MockShareConsumer.html new file mode 100644 index 000000000..2bb37adcf --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/MockShareConsumer.html @@ -0,0 +1,503 @@ + + + + +MockShareConsumer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class MockShareConsumer<K,V>

+
+
java.lang.Object +
org.apache.kafka.clients.consumer.MockShareConsumer<K,V>
+
+
+
+
All Implemented Interfaces:
+
Closeable, AutoCloseable, ShareConsumer<K,V>
+
+
+
public class MockShareConsumer<K,V> +extends Object +implements ShareConsumer<K,V>
+
A mock of the ShareConsumer interface you can use for testing code that uses Kafka. This class is not + thread-safe .
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/NoOffsetForPartitionException.html b/static/41/javadoc/org/apache/kafka/clients/consumer/NoOffsetForPartitionException.html new file mode 100644 index 000000000..80c8c5b79 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/NoOffsetForPartitionException.html @@ -0,0 +1,202 @@ + + + + +NoOffsetForPartitionException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class NoOffsetForPartitionException

+
+ +
+
+
All Implemented Interfaces:
+
Serializable
+
+
+
public class NoOffsetForPartitionException +extends InvalidOffsetException
+
Indicates that there is no stored offset for a partition and no defined offset + reset policy.
+
+
See Also:
+
+ +
+
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      NoOffsetForPartitionException

      +
      public NoOffsetForPartitionException(TopicPartition partition)
      +
      +
    • +
    • +
      +

      NoOffsetForPartitionException

      +
      public NoOffsetForPartitionException(Collection<TopicPartition> partitions)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    + +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/OffsetAndMetadata.html b/static/41/javadoc/org/apache/kafka/clients/consumer/OffsetAndMetadata.html new file mode 100644 index 000000000..bff1ea9f8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/OffsetAndMetadata.html @@ -0,0 +1,287 @@ + + + + +OffsetAndMetadata (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class OffsetAndMetadata

+
+
java.lang.Object +
org.apache.kafka.clients.consumer.OffsetAndMetadata
+
+
+
+
All Implemented Interfaces:
+
Serializable
+
+
+
public class OffsetAndMetadata +extends Object +implements Serializable
+
The Kafka offset commit API allows users to provide additional metadata (in the form of a string) + when an offset is committed. This can be useful (for example) to store information about which + node made the commit, what time the commit was made, etc.
+
+
See Also:
+
+ +
+
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      OffsetAndMetadata

      +
      public OffsetAndMetadata(long offset, + Optional<Integer> leaderEpoch, + String metadata)
      +
      Construct a new OffsetAndMetadata object for committing through KafkaConsumer.
      +
      +
      Parameters:
      +
      offset - The offset to be committed
      +
      leaderEpoch - Optional leader epoch of the last consumed record
      +
      metadata - Non-null metadata
      +
      +
      +
    • +
    • +
      +

      OffsetAndMetadata

      +
      public OffsetAndMetadata(long offset, + String metadata)
      +
      Construct a new OffsetAndMetadata object for committing through KafkaConsumer.
      +
      +
      Parameters:
      +
      offset - The offset to be committed
      +
      metadata - Non-null metadata
      +
      +
      +
    • +
    • +
      +

      OffsetAndMetadata

      +
      public OffsetAndMetadata(long offset)
      +
      Construct a new OffsetAndMetadata object for committing through KafkaConsumer. The metadata + associated with the commit will be empty.
      +
      +
      Parameters:
      +
      offset - The offset to be committed
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      offset

      +
      public long offset()
      +
      +
    • +
    • +
      +

      metadata

      +
      public String metadata()
      +
      +
    • +
    • +
      +

      leaderEpoch

      +
      public Optional<Integer> leaderEpoch()
      +
      Get the leader epoch of the previously consumed record (if one is known). Log truncation is detected + if there exists a leader epoch which is larger than this epoch and begins at an offset earlier than + the committed offset.
      +
      +
      Returns:
      +
      the leader epoch or empty if not known
      +
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/OffsetAndTimestamp.html b/static/41/javadoc/org/apache/kafka/clients/consumer/OffsetAndTimestamp.html new file mode 100644 index 000000000..260c06536 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/OffsetAndTimestamp.html @@ -0,0 +1,238 @@ + + + + +OffsetAndTimestamp (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class OffsetAndTimestamp

+
+
java.lang.Object +
org.apache.kafka.clients.consumer.OffsetAndTimestamp
+
+
+
+
public final class OffsetAndTimestamp +extends Object
+
A container class for offset and timestamp.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      OffsetAndTimestamp

      +
      public OffsetAndTimestamp(long offset, + long timestamp)
      +
      +
    • +
    • +
      +

      OffsetAndTimestamp

      +
      public OffsetAndTimestamp(long offset, + long timestamp, + Optional<Integer> leaderEpoch)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      timestamp

      +
      public long timestamp()
      +
      +
    • +
    • +
      +

      offset

      +
      public long offset()
      +
      +
    • +
    • +
      +

      leaderEpoch

      +
      public Optional<Integer> leaderEpoch()
      +
      Get the leader epoch corresponding to the offset that was found (if one exists). + This can be provided to seek() to ensure that the log hasn't been truncated prior to fetching.
      +
      +
      Returns:
      +
      The leader epoch or empty if it is not known
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/OffsetCommitCallback.html b/static/41/javadoc/org/apache/kafka/clients/consumer/OffsetCommitCallback.html new file mode 100644 index 000000000..1a7f6f7fd --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/OffsetCommitCallback.html @@ -0,0 +1,157 @@ + + + + +OffsetCommitCallback (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Interface OffsetCommitCallback

+
+
+
+
Functional Interface:
+
This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
+
+
+
@FunctionalInterface +public interface OffsetCommitCallback
+
A callback interface that the user can implement to trigger custom actions when a commit request completes. The callback + may be executed in any thread calling poll().
+
+
+
    + +
  • +
    +

    Method Summary

    +
    +
    +
    +
    +
    Modifier and Type
    +
    Method
    +
    Description
    +
    void
    + +
    +
    A callback method the user can implement to provide asynchronous handling of commit request completion.
    +
    +
    +
    +
    +
    +
  • +
+
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      onComplete

      +
      void onComplete(Map<TopicPartition,OffsetAndMetadata> offsets, + Exception exception)
      +
      A callback method the user can implement to provide asynchronous handling of commit request completion. + This method will be called when the commit request sent to the server has been acknowledged.
      +
      +
      Parameters:
      +
      offsets - A map of the offsets and associated metadata that this callback applies to
      +
      exception - The exception thrown during processing of the request, or null if the commit completed successfully
      +
      Throws:
      +
      CommitFailedException - if the commit failed and cannot be retried. + This can only occur if you are using automatic group management with KafkaConsumer.subscribe(Collection), + or if there is an active group with the same groupId which is using group management.
      +
      RebalanceInProgressException - if the commit failed because + it is in the middle of a rebalance. In such cases + commit could be retried after the rebalance is completed with the KafkaConsumer.poll(Duration) call.
      +
      WakeupException - if KafkaConsumer.wakeup() is called before or while this + function is called
      +
      InterruptException - if the calling thread is interrupted before or while + this function is called
      +
      AuthorizationException - if not authorized to the topic or to the + configured groupId. See the exception for more details
      +
      KafkaException - for any other unrecoverable errors (e.g. if offset metadata + is too large or if the committed offset is invalid).
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/OffsetOutOfRangeException.html b/static/41/javadoc/org/apache/kafka/clients/consumer/OffsetOutOfRangeException.html new file mode 100644 index 000000000..75a2e0bf0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/OffsetOutOfRangeException.html @@ -0,0 +1,215 @@ + + + + +OffsetOutOfRangeException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class OffsetOutOfRangeException

+
+ +
+
+
All Implemented Interfaces:
+
Serializable
+
+
+
Direct Known Subclasses:
+
LogTruncationException
+
+
+
public class OffsetOutOfRangeException +extends InvalidOffsetException
+
No reset policy has been defined, and the offsets for these partitions are either larger or smaller + than the range of offsets the server has for the given partition.
+
+
See Also:
+
+ +
+
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      OffsetOutOfRangeException

      +
      public OffsetOutOfRangeException(Map<TopicPartition,Long> offsetOutOfRangePartitions)
      +
      +
    • +
    • +
      +

      OffsetOutOfRangeException

      +
      public OffsetOutOfRangeException(String message, + Map<TopicPartition,Long> offsetOutOfRangePartitions)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    + +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/OffsetResetStrategy.html b/static/41/javadoc/org/apache/kafka/clients/consumer/OffsetResetStrategy.html new file mode 100644 index 000000000..630687c11 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/OffsetResetStrategy.html @@ -0,0 +1,258 @@ + + + + +OffsetResetStrategy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Enum Class OffsetResetStrategy

+
+
java.lang.Object +
java.lang.Enum<OffsetResetStrategy> +
org.apache.kafka.clients.consumer.OffsetResetStrategy
+
+
+
+
+
All Implemented Interfaces:
+
Serializable, Comparable<OffsetResetStrategy>, Constable
+
+
+
@Deprecated +public enum OffsetResetStrategy +extends Enum<OffsetResetStrategy>
+
Deprecated. +
Since 4.0. Use AutoOffsetResetStrategy instead.
+
+
+
+ +
+
+
    + +
  • +
    +

    Enum Constant Details

    + +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      values

      +
      public static OffsetResetStrategy[] values()
      +
      Deprecated.
      +
      Returns an array containing the constants of this enum class, in +the order they are declared.
      +
      +
      Returns:
      +
      an array containing the constants of this enum class, in the order they are declared
      +
      +
      +
    • +
    • +
      +

      valueOf

      +
      public static OffsetResetStrategy valueOf(String name)
      +
      Deprecated.
      +
      Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
      +
      +
      Parameters:
      +
      name - the name of the enum constant to be returned.
      +
      Returns:
      +
      the enum constant with the specified name
      +
      Throws:
      +
      IllegalArgumentException - if this enum class has no constant with the specified name
      +
      NullPointerException - if the argument is null
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      Deprecated.
      +
      +
      Overrides:
      +
      toString in class Enum<OffsetResetStrategy>
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/RangeAssignor.html b/static/41/javadoc/org/apache/kafka/clients/consumer/RangeAssignor.html new file mode 100644 index 000000000..75218d305 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/RangeAssignor.html @@ -0,0 +1,318 @@ + + + + +RangeAssignor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class RangeAssignor

+
+
java.lang.Object +
org.apache.kafka.clients.consumer.internals.AbstractPartitionAssignor +
org.apache.kafka.clients.consumer.RangeAssignor
+
+
+
+
+
All Implemented Interfaces:
+
ConsumerPartitionAssignor
+
+
+
public class RangeAssignor +extends org.apache.kafka.clients.consumer.internals.AbstractPartitionAssignor
+

The range assignor works on a per-topic basis. For each topic, we lay out the available partitions in numeric order + and the consumers in lexicographic order. We then divide the number of partitions by the total number of + consumers to determine the number of partitions to assign to each consumer. If it does not evenly + divide, then the first few consumers will have one extra partition. + +

For example, suppose there are two consumers C0 and C1, two topics t0 and + t1, and each topic has 3 partitions, resulting in partitions t0p0, t0p1, + t0p2, t1p0, t1p1, and t1p2. + +

The assignment will be: +

    +
  • C0: [t0p0, t0p1, t1p0, t1p1]
  • +
  • C1: [t0p2, t1p2]
  • +
+ + Since the introduction of static membership, we could leverage group.instance.id to make the assignment behavior more sticky. + For the above example, after one rolling bounce, group coordinator will attempt to assign new member.id towards consumers, + for example C0 -> C3 C1 -> C2. + +

The assignment could be completely shuffled to: +

    +
  • C3 (was C0): [t0p2, t1p2] (before was [t0p0, t0p1, t1p0, t1p1]) +
  • C2 (was C1): [t0p0, t0p1, t1p0, t1p1] (before was [t0p2, t1p2]) +
+ + The assignment change was caused by the change of member.id relative order, and + can be avoided by setting the group.instance.id. + Consumers will have individual instance ids I1, I2. As long as + 1. Number of members remain the same across generation + 2. Static members' identities persist across generation + 3. Subscription pattern doesn't change for any member + +

The assignment will always be: +

    +
  • I0: [t0p0, t0p1, t1p0, t1p1] +
  • I1: [t0p2, t1p2] +
+

+ Rack-aware assignment is used if both consumer and partition replica racks are available and + some partitions have replicas only on a subset of racks. We attempt to match consumer racks with + partition replica racks on a best-effort basis, prioritizing balanced assignment over rack alignment. + Topics with equal partition count and same set of subscribers guarantee co-partitioning by prioritizing + co-partitioning over rack alignment. In this case, aligning partition replicas of these topics on the + same racks will improve locality for consumers. For example, if partitions 0 of all topics have a replica + on rack 'a', partition 1 on rack 'b' etc., partition 0 of all topics can be assigned to a consumer + on rack 'a', partition 1 to a consumer on rack 'b' and so on. +

+ Note that rack-aware assignment currently takes all replicas into account, including any offline replicas + and replicas that are not in the ISR. This is based on the assumption that these replicas are likely + to join the ISR relatively soon. Since consumers don't rebalance on ISR change, this avoids unnecessary + cross-rack traffic for long durations after replicas rejoin the ISR. In the future, we may consider + rebalancing when replicas are added or removed to improve consumer rack alignment. +

+
+
+ +
+
+
    + +
  • +
    +

    Field Details

    + +
    +
  • + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      RangeAssignor

      +
      public RangeAssignor()
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      name

      +
      public String name()
      +
      Description copied from interface: ConsumerPartitionAssignor
      +
      Unique name for this assignor (e.g. "range" or "roundrobin" or "sticky"). Note, this is not required + to be the same as the class name specified in ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG
      +
      +
      Returns:
      +
      non-null unique name
      +
      +
      +
    • +
    • +
      +

      assignPartitions

      +
      public Map<String,List<TopicPartition>> assignPartitions(Map<String,List<PartitionInfo>> partitionsPerTopic, + Map<String,ConsumerPartitionAssignor.Subscription> subscriptions)
      +
      Performs range assignment of the specified partitions for the consumers with the provided subscriptions. + If rack-awareness is enabled for one or more consumers, we perform rack-aware assignment first to assign + the subset of partitions that can be aligned on racks, while retaining the same co-partitioning and + per-topic balancing guarantees as non-rack-aware range assignment. The remaining partitions are assigned + using standard non-rack-aware range assignment logic, which may result in mis-aligned racks.
      +
      +
      Overrides:
      +
      assignPartitions in class org.apache.kafka.clients.consumer.internals.AbstractPartitionAssignor
      +
      +
      +
    • +
    • +
      +

      assign

      +
      public Map<String,List<TopicPartition>> assign(Map<String,Integer> partitionsPerTopic, + Map<String,ConsumerPartitionAssignor.Subscription> subscriptions)
      +
      +
      Specified by:
      +
      assign in class org.apache.kafka.clients.consumer.internals.AbstractPartitionAssignor
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/RetriableCommitFailedException.html b/static/41/javadoc/org/apache/kafka/clients/consumer/RetriableCommitFailedException.html new file mode 100644 index 000000000..a1b8db21a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/RetriableCommitFailedException.html @@ -0,0 +1,175 @@ + + + + +RetriableCommitFailedException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class RetriableCommitFailedException

+
+ +
+
+
All Implemented Interfaces:
+
Serializable
+
+
+
public class RetriableCommitFailedException +extends RetriableException
+
+
See Also:
+
+ +
+
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      RetriableCommitFailedException

      +
      public RetriableCommitFailedException(Throwable t)
      +
      +
    • +
    • +
      +

      RetriableCommitFailedException

      +
      public RetriableCommitFailedException(String message)
      +
      +
    • +
    • +
      +

      RetriableCommitFailedException

      +
      public RetriableCommitFailedException(String message, + Throwable t)
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/RoundRobinAssignor.html b/static/41/javadoc/org/apache/kafka/clients/consumer/RoundRobinAssignor.html new file mode 100644 index 000000000..46839fb3a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/RoundRobinAssignor.html @@ -0,0 +1,310 @@ + + + + +RoundRobinAssignor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class RoundRobinAssignor

+
+
java.lang.Object +
org.apache.kafka.clients.consumer.internals.AbstractPartitionAssignor +
org.apache.kafka.clients.consumer.RoundRobinAssignor
+
+
+
+
+
All Implemented Interfaces:
+
ConsumerPartitionAssignor
+
+
+
public class RoundRobinAssignor +extends org.apache.kafka.clients.consumer.internals.AbstractPartitionAssignor
+

The round robin assignor lays out all the available partitions and all the available consumers. It + then proceeds to do a round robin assignment from partition to consumer. If the subscriptions of all consumer + instances are identical, then the partitions will be uniformly distributed. (i.e., the partition ownership counts + will be within a delta of exactly one across all consumers.) + +

For example, suppose there are two consumers C0 and C1, two topics t0 and t1, + and each topic has 3 partitions, resulting in partitions t0p0, t0p1, t0p2, + t1p0, t1p1, and t1p2. + +

The assignment will be: +

    +
  • C0: [t0p0, t0p2, t1p1] +
  • C1: [t0p1, t1p0, t1p2] +
+ +

When subscriptions differ across consumer instances, the assignment process still considers each + consumer instance in round robin fashion but skips over an instance if it is not subscribed to + the topic. Unlike the case when subscriptions are identical, this can result in imbalanced + assignments. For example, we have three consumers C0, C1, C2, + and three topics t0, t1, t2, with 1, 2, and 3 partitions, respectively. + Therefore, the partitions are t0p0, t1p0, t1p1, t2p0, t2p1, t2p2. + C0 is subscribed to t0; + C1 is subscribed to t0, t1; + and C2 is subscribed to t0, t1, t2. + +

That assignment will be: +

    +
  • C0: [t0p0] +
  • C1: [t1p0] +
  • C2: [t1p1, t2p0, t2p1, t2p2] +
+ + Since the introduction of static membership, we could leverage group.instance.id to make the assignment behavior more sticky. + For example, we have three consumers with assigned member.id C0, C1, C2, + two topics t0 and t1, and each topic has 3 partitions, resulting in partitions t0p0, + t0p1, t0p2, t1p0, t1p1, and t1p2. We choose to honor + the sorted order based on ephemeral member.id. + +

The assignment will be: +

    +
  • C0: [t0p0, t1p0] +
  • C1: [t0p1, t1p1] +
  • C2: [t0p2, t1p2] +
+ + After one rolling bounce, group coordinator will attempt to assign new member.id towards consumers, + for example C0 -> C5 C1 -> C3, C2 -> C4. + +

The assignment could be completely shuffled to: +

    +
  • C3 (was C1): [t0p0, t1p0] (before was [t0p1, t1p1]) +
  • C4 (was C2): [t0p1, t1p1] (before was [t0p2, t1p2]) +
  • C5 (was C0): [t0p2, t1p2] (before was [t0p0, t1p0]) +
+ + This issue could be mitigated by the introduction of static membership. Consumers will have individual instance ids + I1, I2, I3. As long as + 1. Number of members remain the same across generation + 2. Static members' identities persist across generation + 3. Subscription pattern doesn't change for any member + +

The assignment will always be: +

    +
  • I0: [t0p0, t1p0] +
  • I1: [t0p1, t1p1] +
  • I2: [t0p2, t1p2] +
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/ShareConsumer.html b/static/41/javadoc/org/apache/kafka/clients/consumer/ShareConsumer.html new file mode 100644 index 000000000..9f195acf5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/ShareConsumer.html @@ -0,0 +1,430 @@ + + + + +ShareConsumer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Interface ShareConsumer<K,V>

+
+
+
+
All Superinterfaces:
+
AutoCloseable, Closeable
+
+
+
All Known Implementing Classes:
+
KafkaShareConsumer, MockShareConsumer
+
+
+
@Evolving +public interface ShareConsumer<K,V> +extends Closeable
+
A client that consumes records from a Kafka cluster using a share group.
+
+
See Also:
+
+ +
+
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/StickyAssignor.html b/static/41/javadoc/org/apache/kafka/clients/consumer/StickyAssignor.html new file mode 100644 index 000000000..57bcd8c99 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/StickyAssignor.html @@ -0,0 +1,420 @@ + + + + +StickyAssignor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class StickyAssignor

+
+
java.lang.Object +
org.apache.kafka.clients.consumer.internals.AbstractPartitionAssignor +
org.apache.kafka.clients.consumer.internals.AbstractStickyAssignor +
org.apache.kafka.clients.consumer.StickyAssignor
+
+
+
+
+
+
All Implemented Interfaces:
+
ConsumerPartitionAssignor
+
+
+
public class StickyAssignor +extends org.apache.kafka.clients.consumer.internals.AbstractStickyAssignor
+

The sticky assignor serves two purposes. First, it guarantees an assignment that is as balanced as possible, meaning either: +

    +
  • the numbers of topic partitions assigned to consumers differ by at most one; or
  • +
  • each consumer that has 2+ fewer topic partitions than some other consumer cannot get any of those topic partitions transferred to it.
  • +
+ Second, it preserved as many existing assignment as possible when a reassignment occurs. This helps in saving some of the + overhead processing when topic partitions move from one consumer to another.

+ +

Starting fresh it would work by distributing the partitions over consumers as evenly as possible. Even though this may sound similar to + how round robin assignor works, the second example below shows that it is not. + During a reassignment it would perform the reassignment in such a way that in the new assignment +

    +
  1. topic partitions are still distributed as evenly as possible, and
  2. +
  3. topic partitions stay with their previously assigned consumers as much as possible.
  4. +
+ Of course, the first goal above takes precedence over the second one.

+ +

Example 1. Suppose there are three consumers C0, C1, C2, + four topics t0, t1, t2, t3, and each topic has 2 partitions, + resulting in partitions t0p0, t0p1, t1p0, t1p1, t2p0, + t2p1, t3p0, t3p1. Each consumer is subscribed to all three topics. + + The assignment with both sticky and round robin assignors will be: +

    +
  • C0: [t0p0, t1p1, t3p0]
  • +
  • C1: [t0p1, t2p0, t3p1]
  • +
  • C2: [t1p0, t2p1]
  • +
+ + Now, let's assume C1 is removed and a reassignment is about to happen. The round robin assignor would produce: +
    +
  • C0: [t0p0, t1p0, t2p0, t3p0]
  • +
  • C2: [t0p1, t1p1, t2p1, t3p1]
  • +
+ + while the sticky assignor would result in: +
    +
  • C0 [t0p0, t1p1, t3p0, t2p0]
  • +
  • C2 [t1p0, t2p1, t0p1, t3p1]
  • +
+ preserving all the previous assignments (unlike the round robin assignor). +

+

Example 2. There are three consumers C0, C1, C2, + and three topics t0, t1, t2, with 1, 2, and 3 partitions respectively. + Therefore, the partitions are t0p0, t1p0, t1p1, t2p0, + t2p1, t2p2. C0 is subscribed to t0; C1 is subscribed to + t0, t1; and C2 is subscribed to t0, t1, t2. + + The round robin assignor would come up with the following assignment: +

    +
  • C0 [t0p0]
  • +
  • C1 [t1p0]
  • +
  • C2 [t1p1, t2p0, t2p1, t2p2]
  • +
+ + which is not as balanced as the assignment suggested by sticky assignor: +
    +
  • C0 [t0p0]
  • +
  • C1 [t1p0, t1p1]
  • +
  • C2 [t2p0, t2p1, t2p2]
  • +
+ + Now, if consumer C0 is removed, these two assignors would produce the following assignments. + Round Robin (preserves 3 partition assignments): +
    +
  • C1 [t0p0, t1p1]
  • +
  • C2 [t1p0, t2p0, t2p1, t2p2]
  • +
+ + Sticky (preserves 5 partition assignments): +
    +
  • C1 [t1p0, t1p1, t0p0]
  • +
  • C2 [t2p0, t2p1, t2p2]
  • +
+

+

Impact on ConsumerRebalanceListener

+ The sticky assignment strategy can provide some optimization to those consumers that have some partition cleanup code + in their onPartitionsRevoked() callback listeners. The cleanup code is placed in that callback listener + because the consumer has no assumption or hope of preserving any of its assigned partitions after a rebalance when it + is using range or round robin assignor. The listener code would look like this: +
+ 
+ class TheOldRebalanceListener implements ConsumerRebalanceListener {
+
+   void onPartitionsRevoked(Collection<TopicPartition> partitions) {
+     for (TopicPartition partition: partitions) {
+       commitOffsets(partition);
+       cleanupState(partition);
+     }
+   }
+
+   void onPartitionsAssigned(Collection<TopicPartition> partitions) {
+     for (TopicPartition partition: partitions) {
+       initializeState(partition);
+       initializeOffset(partition);
+     }
+   }
+ }
+ 
+ 
+ + As mentioned above, one advantage of the sticky assignor is that, in general, it reduces the number of partitions that + actually move from one consumer to another during a reassignment. Therefore, it allows consumers to do their cleanup + more efficiently. Of course, they still can perform the partition cleanup in the onPartitionsRevoked() + listener, but they can be more efficient and make a note of their partitions before and after the rebalance, and do the + cleanup after the rebalance only on the partitions they have lost (which is normally not a lot). The code snippet below + clarifies this point: +
+ 
+ class TheNewRebalanceListener implements ConsumerRebalanceListener {
+   Collection<TopicPartition> lastAssignment = Collections.emptyList();
+
+   void onPartitionsRevoked(Collection<TopicPartition> partitions) {
+     for (TopicPartition partition: partitions)
+       commitOffsets(partition);
+   }
+
+   void onPartitionsAssigned(Collection<TopicPartition> assignment) {
+     for (TopicPartition partition: difference(lastAssignment, assignment))
+       cleanupState(partition);
+
+     for (TopicPartition partition: difference(assignment, lastAssignment))
+       initializeState(partition);
+
+     for (TopicPartition partition: assignment)
+       initializeOffset(partition);
+
+     this.lastAssignment = assignment;
+   }
+ }
+ 
+ 
+ + Any consumer that uses sticky assignment can leverage this listener like this: + consumer.subscribe(topics, new TheNewRebalanceListener()); + + Note that you can leverage the CooperativeStickyAssignor so that only partitions which are being + reassigned to another consumer will be revoked. That is the preferred assignor for newer cluster. See + ConsumerPartitionAssignor.RebalanceProtocol for a detailed explanation of cooperative rebalancing.
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/SubscriptionPattern.html b/static/41/javadoc/org/apache/kafka/clients/consumer/SubscriptionPattern.html new file mode 100644 index 000000000..3e03b87e9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/SubscriptionPattern.html @@ -0,0 +1,204 @@ + + + + +SubscriptionPattern (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class SubscriptionPattern

+
+
java.lang.Object +
org.apache.kafka.clients.consumer.SubscriptionPattern
+
+
+
+
public class SubscriptionPattern +extends Object
+
Represents a regular expression compatible with Google RE2/J, used to subscribe to topics. + This just keeps the String representation of the pattern, and all validations to ensure + it is RE2/J compatible are delegated to the broker.
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      SubscriptionPattern

      +
      public SubscriptionPattern(String pattern)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      pattern

      +
      public String pattern()
      +
      +
      Returns:
      +
      Regular expression pattern compatible with RE2/J.
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      +
      Overrides:
      +
      toString in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object obj)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/package-summary.html b/static/41/javadoc/org/apache/kafka/clients/consumer/package-summary.html new file mode 100644 index 000000000..682a12016 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/package-summary.html @@ -0,0 +1,236 @@ + + + + +org.apache.kafka.clients.consumer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+
+

Package org.apache.kafka.clients.consumer

+
+
+
package org.apache.kafka.clients.consumer
+
+
Provides a Kafka client for consuming records from topics and/or partitions in a Kafka cluster.
+
+
+ +
+
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/consumer/package-tree.html b/static/41/javadoc/org/apache/kafka/clients/consumer/package-tree.html new file mode 100644 index 000000000..d54e8e05c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/consumer/package-tree.html @@ -0,0 +1,181 @@ + + + + +org.apache.kafka.clients.consumer Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+
+

Hierarchy For Package org.apache.kafka.clients.consumer

+Package Hierarchies: + +
+
+

Class Hierarchy

+ +
+
+

Interface Hierarchy

+ +
+
+

Enum Class Hierarchy

+ +
+
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/producer/BufferExhaustedException.html b/static/41/javadoc/org/apache/kafka/clients/producer/BufferExhaustedException.html new file mode 100644 index 000000000..9f23660e5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/producer/BufferExhaustedException.html @@ -0,0 +1,164 @@ + + + + +BufferExhaustedException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class BufferExhaustedException

+
+ +
+
+
All Implemented Interfaces:
+
Serializable
+
+
+
public class BufferExhaustedException +extends TimeoutException
+
This exception is thrown if the producer cannot allocate memory for a record within max.block.ms due to the buffer + being too full. + + In earlier versions a TimeoutException was thrown instead of this. To keep existing catch-clauses working + this class extends TimeoutException.
+
+
See Also:
+
+ +
+
+
+
+ +
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      BufferExhaustedException

      +
      public BufferExhaustedException(String message)
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/producer/Callback.html b/static/41/javadoc/org/apache/kafka/clients/producer/Callback.html new file mode 100644 index 000000000..f06029316 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/producer/Callback.html @@ -0,0 +1,170 @@ + + + + +Callback (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Interface Callback

+
+
+
+
Functional Interface:
+
This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
+
+
+
@FunctionalInterface +public interface Callback
+
A callback interface that the user can implement to allow code to execute when the request is complete. This callback + will generally execute in the background I/O thread so it should be fast.
+
+
+
    + +
  • +
    +

    Method Summary

    +
    +
    +
    +
    +
    Modifier and Type
    +
    Method
    +
    Description
    +
    void
    +
    onCompletion(RecordMetadata metadata, + Exception exception)
    +
    +
    A callback method the user can implement to provide asynchronous handling of request completion.
    +
    +
    +
    +
    +
    +
  • +
+
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/producer/KafkaProducer.html b/static/41/javadoc/org/apache/kafka/clients/producer/KafkaProducer.html new file mode 100644 index 000000000..295d02928 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/producer/KafkaProducer.html @@ -0,0 +1,1050 @@ + + + + +KafkaProducer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class KafkaProducer<K,V>

+
+
java.lang.Object +
org.apache.kafka.clients.producer.KafkaProducer<K,V>
+
+
+
+
All Implemented Interfaces:
+
Closeable, AutoCloseable, Producer<K,V>
+
+
+
public class KafkaProducer<K,V> +extends Object +implements Producer<K,V>
+
A Kafka client that publishes records to the Kafka cluster. +

+ The producer is thread safe and sharing a single producer instance across threads will generally be faster than + having multiple instances. +

+ Here is a simple example of using the producer to send records with strings containing sequential numbers as the key/value + pairs. +

+ 
+ Properties props = new Properties();
+ props.put("bootstrap.servers", "localhost:9092");
+ props.put("linger.ms", 1);
+ props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
+ props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
+
+ Producer<String, String> producer = new KafkaProducer<>(props);
+ for (int i = 0; i < 100; i++)
+     producer.send(new ProducerRecord<String, String>("my-topic", Integer.toString(i), Integer.toString(i)));
+
+ producer.close();
+ 
+

+ The producer consists of a pool of buffer space that holds records that haven't yet been transmitted to the server + as well as a background I/O thread that is responsible for turning these records into requests and transmitting them + to the cluster. Failure to close the producer after use will leak these resources. +

+ The send() method is asynchronous. When called, it adds the record to a buffer of pending record sends + and immediately returns. This allows the producer to batch together individual records for efficiency. +

+ The acks config controls the criteria under which requests are considered complete. The default setting "all" + will result in blocking on the full commit of the record, the slowest but most durable setting. +

+ If the request fails, the producer can automatically retry. The retries setting defaults to Integer.MAX_VALUE, and + it's recommended to use delivery.timeout.ms to control retry behavior, instead of retries. +

+ The producer maintains buffers of unsent records for each partition. These buffers are of a size specified by + the batch.size config. Making this larger can result in more batching, but requires more memory (since we will + generally have one of these buffers for each active partition). +

+ By default a buffer is available to send immediately even if there is additional unused space in the buffer. However if you + want to reduce the number of requests you can set linger.ms to something greater than 0. This will + instruct the producer to wait up to that number of milliseconds before sending a request in hope that more records will + arrive to fill up the same batch. This is analogous to Nagle's algorithm in TCP. For example, in the code snippet above, + likely all 100 records would be sent in a single request since we set our linger time to 1 millisecond. However this setting + would add 1 millisecond of latency to our request waiting for more records to arrive if we didn't fill up the buffer. Note that + records that arrive close together in time will generally batch together even with linger.ms=0. So, under heavy load, + batching will occur regardless of the linger configuration; however setting this to something larger than 0 can lead to fewer, more + efficient requests when not under maximal load at the cost of a small amount of latency. +

+ The buffer.memory controls the total amount of memory available to the producer for buffering. If records + are sent faster than they can be transmitted to the server then this buffer space will be exhausted. When the buffer space is + exhausted additional send calls will block. The threshold for time to block is determined by max.block.ms after which it returns + a failed future with BufferExhaustedException. +

+ The key.serializer and value.serializer instruct how to turn the key and value objects the user provides with + their ProducerRecord into bytes. You can use the included ByteArraySerializer or + StringSerializer for simple byte or string types. +

+ From Kafka 0.11, the KafkaProducer supports two additional modes: the idempotent producer and the transactional producer. + The idempotent producer strengthens Kafka's delivery semantics from at least once to exactly once delivery. In particular + producer retries will no longer introduce duplicates. The transactional producer allows an application to send messages + to multiple partitions (and topics!) atomically. +

+

+ From Kafka 3.0, the enable.idempotence configuration defaults to true. When enabling idempotence, + retries config will default to Integer.MAX_VALUE and the acks config will + default to all. There are no API changes for the idempotent producer, so existing applications will + not need to be modified to take advantage of this feature. +

+

+ To take advantage of the idempotent producer, it is imperative to avoid application level re-sends since these cannot + be de-duplicated. As such, if an application enables idempotence, it is recommended to leave the retries + config unset, as it will be defaulted to Integer.MAX_VALUE. Additionally, if a send(ProducerRecord) + returns an error even with infinite retries (for instance if the message expires in the buffer before being sent), + then it is recommended to shut down the producer and check the contents of the last produced message to ensure that + it is not duplicated. Finally, the producer can only guarantee idempotence for messages sent within a single session. +

+

To use the transactional producer and the attendant APIs, you must set the transactional.id + configuration property. If the transactional.id is set, idempotence is automatically enabled along with + the producer configs which idempotence depends on. Further, topics which are included in transactions should be configured + for durability. In particular, the replication.factor should be at least 3, and the + min.insync.replicas for these topics should be set to 2. Finally, in order for transactional guarantees + to be realized from end-to-end, the consumers must be configured to read only committed messages as well. +

+

+ The purpose of the transactional.id is to enable transaction recovery across multiple sessions of a + single producer instance. It would typically be derived from the shard identifier in a partitioned, stateful, application. + As such, it should be unique to each producer instance running within a partitioned application. +

+

All the new transactional APIs are blocking and will throw exceptions on failure. The example + below illustrates how the new APIs are meant to be used. It is similar to the example above, except that all + 100 messages are part of a single transaction. +

+

+

+ 
+ Properties props = new Properties();
+ props.put("bootstrap.servers", "localhost:9092");
+ props.put("transactional.id", "my-transactional-id");
+ Producer<String, String> producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer());
+
+ producer.initTransactions();
+
+ try {
+     producer.beginTransaction();
+     for (int i = 0; i < 100; i++)
+         producer.send(new ProducerRecord<>("my-topic", Integer.toString(i), Integer.toString(i)));
+     producer.commitTransaction();
+ } catch (ProducerFencedException | OutOfOrderSequenceException | AuthorizationException e) {
+     // We can't recover from these exceptions, so our only option is to close the producer and exit.
+     producer.close();
+ } catch (KafkaException e) {
+     // For all other exceptions, just abort the transaction and try again.
+     producer.abortTransaction();
+ }
+ producer.close();
+  
+

+

+ As is hinted at in the example, there can be only one open transaction per producer. All messages sent between the + beginTransaction() and commitTransaction() calls will be part of a single transaction. When the + transactional.id is specified, all messages sent by the producer must be part of a transaction. +

+

+ The transactional producer uses exceptions to communicate error states. In particular, it is not required + to specify callbacks for producer.send() or to call .get() on the returned Future: a + KafkaException would be thrown if any of the + producer.send() or transactional calls hit an irrecoverable error during a transaction. See the send(ProducerRecord) + documentation for more details about detecting errors from a transactional send. +

+

By calling + producer.abortTransaction() upon receiving a KafkaException we can ensure that any + successful writes are marked as aborted, hence keeping the transactional guarantees. +

+

+ This client can communicate with brokers that are version 0.10.0 or newer. Older or newer brokers may not support + certain client features. For instance, the transactional APIs need broker versions 0.11.0 or later. You will receive an + UnsupportedVersionException when invoking an API that is not available in the running broker version. +

+
+
+ +
+
+
    + +
  • +
    +

    Field Details

    +
      +
    • +
      +

      NETWORK_THREAD_PREFIX

      +
      public static final String NETWORK_THREAD_PREFIX
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      PRODUCER_METRIC_GROUP_NAME

      +
      public static final String PRODUCER_METRIC_GROUP_NAME
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      KafkaProducer

      +
      public KafkaProducer(Map<String,Object> configs)
      +
      A producer is instantiated by providing a set of key-value pairs as configuration. Valid configuration strings + are documented here. Values can be + either strings or Objects of the appropriate type (for example a numeric configuration would accept either the + string "42" or the integer 42). +

      + Note: after creating a KafkaProducer you must always close() it to avoid resource leaks.

      +
      +
      Parameters:
      +
      configs - The producer configs
      +
      +
      +
    • +
    • +
      +

      KafkaProducer

      +
      public KafkaProducer(Map<String,Object> configs, + Serializer<K> keySerializer, + Serializer<V> valueSerializer)
      +
      A producer is instantiated by providing a set of key-value pairs as configuration, a key and a value Serializer. + Valid configuration strings are documented here. + Values can be either strings or Objects of the appropriate type (for example a numeric configuration would accept + either the string "42" or the integer 42). +

      + Note: after creating a KafkaProducer you must always close() it to avoid resource leaks.

      +
      +
      Parameters:
      +
      configs - The producer configs
      +
      keySerializer - The serializer for key that implements Serializer. The configure() method won't be + called in the producer when the serializer is passed in directly.
      +
      valueSerializer - The serializer for value that implements Serializer. The configure() method won't + be called in the producer when the serializer is passed in directly.
      +
      +
      +
    • +
    • +
      +

      KafkaProducer

      +
      public KafkaProducer(Properties properties)
      +
      A producer is instantiated by providing a set of key-value pairs as configuration. Valid configuration strings + are documented here. +

      + Note: after creating a KafkaProducer you must always close() it to avoid resource leaks.

      +
      +
      Parameters:
      +
      properties - The producer configs
      +
      +
      +
    • +
    • +
      +

      KafkaProducer

      +
      public KafkaProducer(Properties properties, + Serializer<K> keySerializer, + Serializer<V> valueSerializer)
      +
      A producer is instantiated by providing a set of key-value pairs as configuration, a key and a value Serializer. + Valid configuration strings are documented here. +

      + Note: after creating a KafkaProducer you must always close() it to avoid resource leaks.

      +
      +
      Parameters:
      +
      properties - The producer configs
      +
      keySerializer - The serializer for key that implements Serializer. The configure() method won't be + called in the producer when the serializer is passed in directly.
      +
      valueSerializer - The serializer for value that implements Serializer. The configure() method won't + be called in the producer when the serializer is passed in directly.
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      initTransactions

      +
      public void initTransactions()
      +
      Needs to be called before any other methods when the transactional.id is set in the configuration. + This method does the following: +
        +
      1. Ensures any transactions initiated by previous instances of the producer with the same + transactional.id are completed. If the previous instance had failed with a transaction in + progress, it will be aborted. If the last transaction had begun completion, + but not yet finished, this method awaits its completion.
      2. +
      3. Gets the internal producer id and epoch, used in all future transactional + messages issued by the producer.
      4. +
      + Note that this method will raise TimeoutException if the transactional state cannot + be initialized before expiration of max.block.ms. Additionally, it will raise InterruptException + if interrupted. It is safe to retry in either case, but once the transactional state has been successfully + initialized, this method should no longer be used.
      +
      +
      Specified by:
      +
      initTransactions in interface Producer<K,V>
      +
      Throws:
      +
      IllegalStateException - if no transactional.id has been configured
      +
      UnsupportedVersionException - fatal error indicating the broker + does not support transactions (i.e. if its version is lower than 0.11.0.0)
      +
      AuthorizationException - error indicating that the configured + transactional.id is not authorized, or the idempotent producer id is unavailable. See the exception for + more details. User may retry this function call after fixing the permission.
      +
      KafkaException - if the producer has encountered a previous fatal error or for any other unexpected error
      +
      TimeoutException - if the time taken for initialize the transaction has surpassed max.block.ms.
      +
      InterruptException - if the thread is interrupted while blocked
      +
      +
      +
    • +
    • +
      +

      beginTransaction

      +
      public void beginTransaction() + throws ProducerFencedException
      +
      Should be called before the start of each new transaction. Note that prior to the first invocation + of this method, you must invoke initTransactions() exactly one time.
      +
      +
      Specified by:
      +
      beginTransaction in interface Producer<K,V>
      +
      Throws:
      +
      IllegalStateException - if no transactional.id has been configured or if initTransactions() + has not yet been invoked
      +
      ProducerFencedException - if another producer with the same transactional.id is active
      +
      InvalidProducerEpochException - if the producer has attempted to produce with an old epoch + to the partition leader. See the exception for more details
      +
      UnsupportedVersionException - fatal error indicating the broker + does not support transactions (i.e. if its version is lower than 0.11.0.0)
      +
      AuthorizationException - fatal error indicating that the configured + transactional.id is not authorized. See the exception for more details
      +
      KafkaException - if the producer has encountered a previous fatal error or for any other unexpected error
      +
      +
      +
    • +
    • +
      +

      sendOffsetsToTransaction

      +
      public void sendOffsetsToTransaction(Map<TopicPartition,OffsetAndMetadata> offsets, + ConsumerGroupMetadata groupMetadata) + throws ProducerFencedException
      +
      Sends a list of specified offsets to the consumer group coordinator, and also marks + those offsets as part of the current transaction. These offsets will be considered + committed only if the transaction is committed successfully. The committed offset should + be the next message your application will consume, i.e. nextRecordToBeProcessed.offset() + (or ConsumerRecords.nextOffsets()). You should also add the leader epoch as commit metadata, + which can be obtained from ConsumerRecord.leaderEpoch() or ConsumerRecords.nextOffsets(). +

      + This method should be used when you need to batch consumed and produced messages + together, typically in a consume-transform-produce pattern. Thus, the specified + groupMetadata should be extracted from the used consumer via + KafkaConsumer.groupMetadata() to leverage consumer group metadata. This will provide + stronger fencing than just supplying the consumerGroupId and passing in new ConsumerGroupMetadata(consumerGroupId), + however note that the full set of consumer group metadata returned by KafkaConsumer.groupMetadata() + requires the brokers to be on version 2.5 or newer to understand. + +

      + This method is a blocking call that waits until the request has been received and acknowledged by the consumer group + coordinator; but the offsets are not considered as committed until the transaction itself is successfully committed later (via + the commitTransaction() call). + +

      + Note, that the consumer should have enable.auto.commit=false and should + also not commit offsets manually (via sync or + async commits). + This method will raise TimeoutException if the producer cannot send offsets before expiration of max.block.ms. + Additionally, it will raise InterruptException if interrupted.

      +
      +
      Specified by:
      +
      sendOffsetsToTransaction in interface Producer<K,V>
      +
      Throws:
      +
      IllegalStateException - if no transactional.id has been configured or no transaction has been started.
      +
      ProducerFencedException - fatal error indicating another producer with the same transactional.id is active
      +
      UnsupportedVersionException - fatal error indicating the broker + does not support transactions (i.e. if its version is lower than 0.11.0.0) or + the broker doesn't support the latest version of transactional API with all consumer group metadata + (i.e. if its version is lower than 2.5.0).
      +
      UnsupportedForMessageFormatException - fatal error indicating the message + format used for the offsets topic on the broker does not support transactions
      +
      AuthorizationException - fatal error indicating that the configured + transactional.id is not authorized, or the consumer group id is not authorized.
      +
      CommitFailedException - if the commit failed and cannot be retried + (e.g. if the consumer has been kicked out of the group). Users should handle this by aborting the transaction.
      +
      FencedInstanceIdException - if this producer instance gets fenced by broker due to a + mis-configured consumer instance id within group metadata.
      +
      InvalidProducerEpochException - if the producer has attempted to produce with an old epoch + to the partition leader. See the exception for more details
      +
      KafkaException - if the producer has encountered a previous fatal or abortable error, or for any + other unexpected error
      +
      TimeoutException - if the time taken for sending the offsets has surpassed max.block.ms.
      +
      InterruptException - if the thread is interrupted while blocked
      +
      +
      +
    • +
    • +
      +

      commitTransaction

      +
      public void commitTransaction() + throws ProducerFencedException
      +
      Commits the ongoing transaction. This method will flush any unsent records before actually committing the transaction. +

      + Further, if any of the send(ProducerRecord) calls which were part of the transaction hit irrecoverable + errors, this method will throw the last received exception immediately and the transaction will not be committed. + So all send(ProducerRecord) calls in a transaction must succeed in order for this method to succeed. +

      + If the transaction is committed successfully and this method returns without throwing an exception, it is guaranteed + that all callbacks for records in the transaction will have been invoked and completed. + Note that exceptions thrown by callbacks are ignored; the producer proceeds to commit the transaction in any case. +

      + Note that this method will raise TimeoutException if the transaction cannot be committed before expiration + of max.block.ms, but this does not mean the request did not actually reach the broker. In fact, it only indicates + that we cannot get the acknowledgement response in time, so it's up to the application's logic + to decide how to handle timeouts. + Additionally, it will raise InterruptException if interrupted. + It is safe to retry in either case, but it is not possible to attempt a different operation (such as abortTransaction) + since the commit may already be in the progress of completing. If not retrying, the only option is to close the producer.

      +
      +
      Specified by:
      +
      commitTransaction in interface Producer<K,V>
      +
      Throws:
      +
      IllegalStateException - if no transactional.id has been configured or no transaction has been started
      +
      ProducerFencedException - fatal error indicating another producer with the same transactional.id is active
      +
      UnsupportedVersionException - fatal error indicating the broker + does not support transactions (i.e. if its version is lower than 0.11.0.0)
      +
      AuthorizationException - fatal error indicating that the configured + transactional.id is not authorized. See the exception for more details
      +
      InvalidProducerEpochException - if the producer has attempted to produce with an old epoch + to the partition leader. See the exception for more details
      +
      KafkaException - if the producer has encountered a previous fatal or abortable error, or for any + other unexpected error
      +
      TimeoutException - if the time taken for committing the transaction has surpassed max.block.ms.
      +
      InterruptException - if the thread is interrupted while blocked
      +
      +
      +
    • +
    • +
      +

      abortTransaction

      +
      public void abortTransaction() + throws ProducerFencedException
      +
      Aborts the ongoing transaction. Any unflushed produce messages will be aborted when this call is made. + This call will throw an exception immediately if any prior send(ProducerRecord) calls failed with a + ProducerFencedException or an instance of AuthorizationException. +

      + Note that this method will raise TimeoutException if the transaction cannot be aborted before expiration + of max.block.ms, but this does not mean the request did not actually reach the broker. In fact, it only indicates + that we cannot get the acknowledgement response in time, so it's up to the application's logic + to decide how to handle timeouts. Additionally, it will raise InterruptException if interrupted. + It is safe to retry in either case, but it is not possible to attempt a different operation (such as commitTransaction()) + since the abort may already be in the progress of completing. If not retrying, the only option is to close the producer.

      +
      +
      Specified by:
      +
      abortTransaction in interface Producer<K,V>
      +
      Throws:
      +
      IllegalStateException - if no transactional.id has been configured or no transaction has been started
      +
      ProducerFencedException - fatal error indicating another producer with the same transactional.id is active
      +
      InvalidProducerEpochException - if the producer has attempted to produce with an old epoch + to the partition leader. See the exception for more details
      +
      UnsupportedVersionException - fatal error indicating the broker + does not support transactions (i.e. if its version is lower than 0.11.0.0)
      +
      AuthorizationException - fatal error indicating that the configured + transactional.id is not authorized. See the exception for more details
      +
      KafkaException - if the producer has encountered a previous fatal error or for any other unexpected error
      +
      TimeoutException - if the time taken for aborting the transaction has surpassed max.block.ms.
      +
      InterruptException - if the thread is interrupted while blocked
      +
      +
      +
    • +
    • +
      +

      send

      +
      public Future<RecordMetadata> send(ProducerRecord<K,V> record)
      +
      Asynchronously send a record to a topic. Equivalent to send(record, null). + See send(ProducerRecord, Callback) for details.
      +
      +
      Specified by:
      +
      send in interface Producer<K,V>
      +
      +
      +
    • +
    • +
      +

      send

      +
      public Future<RecordMetadata> send(ProducerRecord<K,V> record, + Callback callback)
      +
      Asynchronously send a record to a topic and invoke the provided callback when the send has been acknowledged. +

      + The send is asynchronous and this method will return immediately (except for rare cases described below) + once the record has been stored in the buffer of records waiting to be sent. + This allows sending many records in parallel without blocking to wait for the response after each one. + Can block for the following cases: 1) For the first record being sent to + the cluster by this client for the given topic. In this case it will block for up to max.block.ms milliseconds if + Kafka cluster is unreachable; 2) Allocating a buffer if buffer pool doesn't have any free buffers. +

      + The result of the send is a RecordMetadata specifying the partition the record was sent to, the offset + it was assigned and the timestamp of the record. If the producer is configured with acks = 0, the RecordMetadata + will have offset = -1 because the producer does not wait for the acknowledgement from the broker. + If CreateTime is used by the topic, the timestamp + will be the user provided timestamp or the record send time if the user did not specify a timestamp for the + record. If LogAppendTime is used for the + topic, the timestamp will be the Kafka broker local time when the message is appended. +

      + Since the send call is asynchronous it returns a Future for the + RecordMetadata that will be assigned to this record. Invoking get() on this future will block until the associated request completes and then return the metadata for the record + or throw any exception that occurred while sending the record. +

      + If you want to simulate a simple blocking call you can call the get() method immediately: + +

      + 
      + byte[] key = "key".getBytes();
      + byte[] value = "value".getBytes();
      + ProducerRecord<byte[],byte[]> record = new ProducerRecord<byte[],byte[]>("my-topic", key, value)
      + producer.send(record).get();
      + 
      +

      + Fully non-blocking usage can make use of the Callback parameter to provide a callback that + will be invoked when the request is complete. + +

      + 
      + ProducerRecord<byte[],byte[]> record = new ProducerRecord<byte[],byte[]>("the-topic", key, value);
      + producer.send(myRecord,
      +               new Callback() {
      +                   public void onCompletion(RecordMetadata metadata, Exception e) {
      +                       if(e != null) {
      +                          e.printStackTrace();
      +                       } else {
      +                          System.out.println("The offset of the record we just sent is: " + metadata.offset());
      +                       }
      +                   }
      +               });
      + 
      + 
      + + Callbacks for records being sent to the same partition are guaranteed to execute in order. That is, in the + following example callback1 is guaranteed to execute before callback2: + +
      + 
      + producer.send(new ProducerRecord<byte[],byte[]>(topic, partition, key1, value1), callback1);
      + producer.send(new ProducerRecord<byte[],byte[]>(topic, partition, key2, value2), callback2);
      + 
      + 
      +

      + When used as part of a transaction, it is not necessary to define a callback or check the result of the future + in order to detect errors from send. If any of the send calls failed with an irrecoverable error, + the final commitTransaction() call will fail and throw the exception from the last failed send. When + this happens, your application should call abortTransaction() to reset the state and continue to send + data. +

      +

      + Some transactional send errors cannot be resolved with a call to abortTransaction(). In particular, + if a transactional send finishes with a ProducerFencedException, a OutOfOrderSequenceException, + a UnsupportedVersionException, or an + AuthorizationException, then the only option left is to call close(). + Fatal errors cause the producer to enter a defunct state in which future API calls will continue to raise + the same underlying error wrapped in a new KafkaException. +

      +

      + It is a similar picture when idempotence is enabled, but no transactional.id has been configured. + In this case, UnsupportedVersionException and + AuthorizationException are considered fatal errors. However, + ProducerFencedException does not need to be handled. Additionally, it is possible to continue + sending after receiving an OutOfOrderSequenceException, but doing so + can result in out of order delivery of pending messages. To ensure proper ordering, you should close the + producer and create a new instance. +

      +

      + If the message format of the destination topic is not upgraded to 0.11.0.0, idempotent and transactional + produce requests will fail with an UnsupportedForMessageFormatException + error. If this is encountered during a transaction, it is possible to abort and continue. But note that future + sends to the same topic will continue receiving the same exception until the topic is upgraded. +

      +

      + Note that callbacks will generally execute in the I/O thread of the producer and so should be reasonably fast or + they will delay the sending of messages from other threads. If you want to execute blocking or computationally + expensive callbacks it is recommended to use your own Executor in the callback body + to parallelize processing.

      +
      +
      Specified by:
      +
      send in interface Producer<K,V>
      +
      Parameters:
      +
      record - The record to send
      +
      callback - A user-supplied callback to execute when the record has been acknowledged by the server (null + indicates no callback)
      +
      Throws:
      +
      IllegalStateException - if a transactional.id has been configured and no transaction has been started, or + when send is invoked after producer has been closed.
      +
      InterruptException - If the thread is interrupted while blocked
      +
      SerializationException - If the key or value are not valid objects given the configured serializers
      +
      KafkaException - If a Kafka related error occurs that does not belong to the public API exceptions.
      +
      +
      +
    • +
    • +
      +

      flush

      +
      public void flush()
      +
      Invoking this method makes all buffered records immediately available to send (even if linger.ms is + greater than 0) and blocks on the completion of the requests associated with these records. The post-condition + of flush() is that any previously sent record will have completed (e.g. Future.isDone() == true + and callbacks passed to send(ProducerRecord,Callback) have been called). + A request is considered completed when it is successfully acknowledged + according to the acks configuration you have specified or else it results in an error. +

      + Other threads can continue sending records while one thread is blocked waiting for a flush call to complete, + however no guarantee is made about the completion of records sent after the flush call begins. +

      + This method can be useful when consuming from some input system and producing into Kafka. The flush() call + gives a convenient way to ensure all previously sent messages have actually completed. +

      + This example shows how to consume from one Kafka topic and produce to another Kafka topic: +

      + 
      + for(ConsumerRecord<String, String> record: consumer.poll(100))
      +     producer.send(new ProducerRecord("my-topic", record.key(), record.value());
      + producer.flush();
      + consumer.commitSync();
      + 
      + 
      + + Note that the above example may drop records if the produce request fails. If we want to ensure that this does not occur + we need to set retries=<large_number> in our config. +

      +

      + Applications don't need to call this method for transactional producers, since the commitTransaction() will + flush all buffered records before performing the commit. This ensures that all the send(ProducerRecord) + calls made since the previous beginTransaction() are completed before the commit. +

      +

      + Important: This method must not be called from within the callback provided to + send(ProducerRecord, Callback). Invoking flush() in this context will result in a + KafkaException being thrown, as it will cause a deadlock. +

      +
      +
      Specified by:
      +
      flush in interface Producer<K,V>
      +
      Throws:
      +
      InterruptException - If the thread is interrupted while blocked
      +
      KafkaException - If the method is invoked inside a send(ProducerRecord, Callback) callback
      +
      +
      +
    • +
    • +
      +

      partitionsFor

      +
      public List<PartitionInfo> partitionsFor(String topic)
      +
      Get the partition metadata for the given topic. This can be used for custom partitioning.
      +
      +
      Specified by:
      +
      partitionsFor in interface Producer<K,V>
      +
      Throws:
      +
      AuthenticationException - if authentication fails. See the exception for more details
      +
      AuthorizationException - if not authorized to the specified topic. See the exception for more details
      +
      InterruptException - if the thread is interrupted while blocked
      +
      TimeoutException - if metadata could not be refreshed within max.block.ms
      +
      KafkaException - for all Kafka-related exceptions, including the case where this method is called after producer close
      +
      +
      +
    • +
    • +
      +

      metrics

      +
      public Map<MetricName,? extends Metric> metrics()
      +
      Get the full set of internal metrics maintained by the producer.
      +
      +
      Specified by:
      +
      metrics in interface Producer<K,V>
      +
      +
      +
    • +
    • +
      +

      registerMetricForSubscription

      +
      public void registerMetricForSubscription(KafkaMetric metric)
      +
      Add the provided application metric for subscription. + This metric will be added to this client's metrics + that are available for subscription and sent as + telemetry data to the broker. + The provided metric must map to an OTLP metric data point + type in the OpenTelemetry v1 metrics protobuf message types. + Specifically, the metric should be one of the following: +
        +
      • + `Sum`: Monotonic total count meter (Counter). Suitable for metrics like total number of X, e.g., total bytes sent. +
      • +
      • + `Gauge`: Non-monotonic current value meter (UpDownCounter). Suitable for metrics like current value of Y, e.g., current queue count. +
      • +
      + Metrics not matching these types are silently ignored. + Executing this method for a previously registered metric is a benign operation and results in updating that metrics entry.
      +
      +
      Specified by:
      +
      registerMetricForSubscription in interface Producer<K,V>
      +
      Parameters:
      +
      metric - The application metric to register
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      unregisterMetricFromSubscription

      +
      public void unregisterMetricFromSubscription(KafkaMetric metric)
      +
      Remove the provided application metric for subscription. + This metric is removed from this client's metrics + and will not be available for subscription any longer. + Executing this method with a metric that has not been registered is a + benign operation and does not result in any action taken (no-op).
      +
      +
      Specified by:
      +
      unregisterMetricFromSubscription in interface Producer<K,V>
      +
      Parameters:
      +
      metric - The application metric to remove
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      clientInstanceId

      +
      public Uuid clientInstanceId(Duration timeout)
      +
      Determines the client's unique client instance ID used for telemetry. This ID is unique to + this specific client instance and will not change after it is initially generated. + The ID is useful for correlating client operations with telemetry sent to the broker and + to its eventual monitoring destinations. +

      + If telemetry is enabled, this will first require a connection to the cluster to generate + the unique client instance ID. This method waits up to timeout for the producer + client to complete the request. +

      + Client telemetry is controlled by the ProducerConfig.ENABLE_METRICS_PUSH_CONFIG + configuration option.

      +
      +
      Specified by:
      +
      clientInstanceId in interface Producer<K,V>
      +
      Parameters:
      +
      timeout - The maximum time to wait for producer client to determine its client instance ID. + The value must be non-negative. Specifying a timeout of zero means do not + wait for the initial request to complete if it hasn't already.
      +
      Returns:
      +
      The client's assigned instance id used for metrics collection.
      +
      Throws:
      +
      InterruptException - If the thread is interrupted while blocked.
      +
      KafkaException - If an unexpected error occurs while trying to determine the client + instance ID, though this error does not necessarily imply the + producer client is otherwise unusable.
      +
      IllegalArgumentException - If the timeout is negative.
      +
      IllegalStateException - If telemetry is not enabled ie, config `enable.metrics.push` + is set to `false`.
      +
      +
      +
    • +
    • +
      +

      close

      +
      public void close()
      +
      Close this producer. This method blocks until all previously sent requests complete. + This method is equivalent to close(Long.MAX_VALUE, TimeUnit.MILLISECONDS). +

      + If close() is called from Callback, a warning message will be logged and close(0, TimeUnit.MILLISECONDS) + will be called instead. We do this because the sender thread would otherwise try to join itself and + block forever. +

      +
      +
      Specified by:
      +
      close in interface AutoCloseable
      +
      Specified by:
      +
      close in interface Closeable
      +
      Specified by:
      +
      close in interface Producer<K,V>
      +
      Throws:
      +
      InterruptException - If the thread is interrupted while blocked.
      +
      KafkaException - If an unexpected error occurs while trying to close the client, this error should be treated + as fatal and indicate the client is no longer usable.
      +
      +
      +
    • +
    • +
      +

      close

      +
      public void close(Duration timeout)
      +
      This method waits up to timeout for the producer to complete the sending of all incomplete requests. +

      + If the producer is unable to complete all requests before the timeout expires, this method will fail + any unsent and unacknowledged records immediately. It will also abort the ongoing transaction if it's not + already completing. +

      + If invoked from within a Callback this method will not block and will be equivalent to + close(Duration.ofMillis(0)). This is done since no further sending will happen while + blocking the I/O thread of the producer.

      +
      +
      Specified by:
      +
      close in interface Producer<K,V>
      +
      Parameters:
      +
      timeout - The maximum time to wait for producer to complete any pending requests. The value should be + non-negative. Specifying a timeout of zero means do not wait for pending send requests to complete.
      +
      Throws:
      +
      InterruptException - If the thread is interrupted while blocked.
      +
      KafkaException - If an unexpected error occurs while trying to close the client, this error should be treated + as fatal and indicate the client is no longer usable.
      +
      IllegalArgumentException - If the timeout is negative.
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/producer/MockProducer.html b/static/41/javadoc/org/apache/kafka/clients/producer/MockProducer.html new file mode 100644 index 000000000..aa436dbf8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/producer/MockProducer.html @@ -0,0 +1,822 @@ + + + + +MockProducer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class MockProducer<K,V>

+
+
java.lang.Object +
org.apache.kafka.clients.producer.MockProducer<K,V>
+
+
+
+
All Implemented Interfaces:
+
Closeable, AutoCloseable, Producer<K,V>
+
+
+
public class MockProducer<K,V> +extends Object +implements Producer<K,V>
+
A mock of the producer interface you can use for testing code that uses Kafka. +

+ By default this mock will synchronously complete each send call successfully. However it can be configured to allow + the user to control the completion of the call and supply an optional error for the producer to throw.

+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/producer/Partitioner.html b/static/41/javadoc/org/apache/kafka/clients/producer/Partitioner.html new file mode 100644 index 000000000..954e219df --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/producer/Partitioner.html @@ -0,0 +1,180 @@ + + + + +Partitioner (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Interface Partitioner

+
+
+
+
All Superinterfaces:
+
AutoCloseable, Closeable, Configurable
+
+
+
All Known Implementing Classes:
+
RoundRobinPartitioner
+
+
+
public interface Partitioner +extends Configurable, Closeable
+
Partitioner Interface +
+ Implement Monitorable to enable the partitioner to register metrics. The following tags are automatically added to + all metrics registered: config set to partitioner.class, and class set to the Partitioner class name.
+
+
+
    + +
  • +
    +

    Method Summary

    +
    +
    +
    +
    +
    Modifier and Type
    +
    Method
    +
    Description
    +
    void
    + +
    +
    This is called when partitioner is closed.
    +
    +
    int
    +
    partition(String topic, + Object key, + byte[] keyBytes, + Object value, + byte[] valueBytes, + Cluster cluster)
    +
    +
    Compute the partition for the given record.
    +
    +
    +
    +
    +
    +

    Methods inherited from interface org.apache.kafka.common.Configurable

    +configure
    +
    +
  • +
+
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      partition

      +
      int partition(String topic, + Object key, + byte[] keyBytes, + Object value, + byte[] valueBytes, + Cluster cluster)
      +
      Compute the partition for the given record.
      +
      +
      Parameters:
      +
      topic - The topic name
      +
      key - The key to partition on (or null if no key)
      +
      keyBytes - The serialized key to partition on( or null if no key)
      +
      value - The value to partition on or null
      +
      valueBytes - The serialized value to partition on or null
      +
      cluster - The current cluster metadata
      +
      +
      +
    • +
    • +
      +

      close

      +
      void close()
      +
      This is called when partitioner is closed.
      +
      +
      Specified by:
      +
      close in interface AutoCloseable
      +
      Specified by:
      +
      close in interface Closeable
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/producer/PreparedTxnState.html b/static/41/javadoc/org/apache/kafka/clients/producer/PreparedTxnState.html new file mode 100644 index 000000000..b22a100d5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/producer/PreparedTxnState.html @@ -0,0 +1,253 @@ + + + + +PreparedTxnState (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class PreparedTxnState

+
+
java.lang.Object +
org.apache.kafka.clients.producer.PreparedTxnState
+
+
+
+
public class PreparedTxnState +extends Object
+
Class containing the state of a transaction after it has been prepared for a two-phase commit. + This state includes the producer ID and epoch, which are needed to commit or abort the transaction.
+
+
+
    + +
  • +
    +

    Constructor Summary

    +
    Constructors
    +
    +
    Constructor
    +
    Description
    + +
    +
    Creates a new empty PreparedTxnState
    +
    +
    PreparedTxnState(String serializedState)
    +
    +
    Creates a new PreparedTxnState from a serialized string representation
    +
    +
    +
    +
  • + +
  • +
    +

    Method Summary

    +
    +
    +
    +
    +
    Modifier and Type
    +
    Method
    +
    Description
    +
    short
    + +
     
    +
    boolean
    + +
     
    +
    int
    + +
     
    +
    boolean
    + +
    +
    Checks if this preparedTxnState represents an initialized transaction with a valid producer ID + that is not -1 (the uninitialized value).
    +
    +
    long
    + +
     
    + + +
    +
    Returns a serialized string representation of this transaction state.
    +
    +
    +
    +
    +
    +

    Methods inherited from class java.lang.Object

    +getClass, notify, notifyAll, wait, wait, wait
    +
    +
  • +
+
+
+
    + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      PreparedTxnState

      +
      public PreparedTxnState()
      +
      Creates a new empty PreparedTxnState
      +
      +
    • +
    • +
      +

      PreparedTxnState

      +
      public PreparedTxnState(String serializedState)
      +
      Creates a new PreparedTxnState from a serialized string representation
      +
      +
      Parameters:
      +
      serializedState - The serialized string to deserialize.
      +
      Throws:
      +
      IllegalArgumentException - if the serialized string is not in the expected format
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      producerId

      +
      public long producerId()
      +
      +
    • +
    • +
      +

      epoch

      +
      public short epoch()
      +
      +
    • +
    • +
      +

      hasTransaction

      +
      public boolean hasTransaction()
      +
      Checks if this preparedTxnState represents an initialized transaction with a valid producer ID + that is not -1 (the uninitialized value).
      +
      +
      Returns:
      +
      true if the state has an initialized transaction, false otherwise.
      +
      +
      +
    • +
    • +
      +

      toString

      +
      public String toString()
      +
      Returns a serialized string representation of this transaction state. + The format is "producerId:epoch" for an initialized state, or an empty string + for an uninitialized state (where producerId and epoch are both -1).
      +
      +
      Overrides:
      +
      toString in class Object
      +
      Returns:
      +
      a serialized string representation
      +
      +
      +
    • +
    • +
      +

      equals

      +
      public boolean equals(Object o)
      +
      +
      Overrides:
      +
      equals in class Object
      +
      +
      +
    • +
    • +
      +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class Object
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/producer/Producer.html b/static/41/javadoc/org/apache/kafka/clients/producer/Producer.html new file mode 100644 index 000000000..d214d859a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/producer/Producer.html @@ -0,0 +1,354 @@ + + + + +Producer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Interface Producer<K,V>

+
+
+
+
All Superinterfaces:
+
AutoCloseable, Closeable
+
+
+
All Known Implementing Classes:
+
KafkaProducer, MockProducer
+
+
+
public interface Producer<K,V> +extends Closeable
+
The interface for the KafkaProducer
+
+
See Also:
+
+ +
+
+
+
+ +
+
+ +
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/producer/ProducerConfig.html b/static/41/javadoc/org/apache/kafka/clients/producer/ProducerConfig.html new file mode 100644 index 000000000..9cb3fdea8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/producer/ProducerConfig.html @@ -0,0 +1,1232 @@ + + + + +ProducerConfig (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ProducerConfig

+
+
java.lang.Object +
org.apache.kafka.common.config.AbstractConfig +
org.apache.kafka.clients.producer.ProducerConfig
+
+
+
+
+
public class ProducerConfig +extends AbstractConfig
+
Configuration for the Kafka Producer. Documentation for these configurations can be found in the Kafka documentation
+
+
+ +
+
+
    + +
  • +
    +

    Field Details

    +
      +
    • +
      +

      BOOTSTRAP_SERVERS_CONFIG

      +
      public static final String BOOTSTRAP_SERVERS_CONFIG
      +
      bootstrap.servers
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      CLIENT_DNS_LOOKUP_CONFIG

      +
      public static final String CLIENT_DNS_LOOKUP_CONFIG
      +
      client.dns.lookup
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      METADATA_MAX_AGE_CONFIG

      +
      public static final String METADATA_MAX_AGE_CONFIG
      +
      metadata.max.age.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      METADATA_MAX_IDLE_CONFIG

      +
      public static final String METADATA_MAX_IDLE_CONFIG
      +
      metadata.max.idle.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      BATCH_SIZE_CONFIG

      +
      public static final String BATCH_SIZE_CONFIG
      +
      batch.size
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG

      +
      public static final String PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG
      +
      partitioner.adaptive.partitioning.enable
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      PARTITIONER_AVAILABILITY_TIMEOUT_MS_CONFIG

      +
      public static final String PARTITIONER_AVAILABILITY_TIMEOUT_MS_CONFIG
      +
      partitioner.availability.timeout.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      PARTITIONER_IGNORE_KEYS_CONFIG

      +
      public static final String PARTITIONER_IGNORE_KEYS_CONFIG
      +
      partitioner.ignore.keys
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      ACKS_CONFIG

      +
      public static final String ACKS_CONFIG
      +
      acks
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      LINGER_MS_CONFIG

      +
      public static final String LINGER_MS_CONFIG
      +
      linger.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      REQUEST_TIMEOUT_MS_CONFIG

      +
      public static final String REQUEST_TIMEOUT_MS_CONFIG
      +
      request.timeout.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      DELIVERY_TIMEOUT_MS_CONFIG

      +
      public static final String DELIVERY_TIMEOUT_MS_CONFIG
      +
      delivery.timeout.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      CLIENT_ID_CONFIG

      +
      public static final String CLIENT_ID_CONFIG
      +
      client.id
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      SEND_BUFFER_CONFIG

      +
      public static final String SEND_BUFFER_CONFIG
      +
      send.buffer.bytes
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      RECEIVE_BUFFER_CONFIG

      +
      public static final String RECEIVE_BUFFER_CONFIG
      +
      receive.buffer.bytes
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      MAX_REQUEST_SIZE_CONFIG

      +
      public static final String MAX_REQUEST_SIZE_CONFIG
      +
      max.request.size
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      RECONNECT_BACKOFF_MS_CONFIG

      +
      public static final String RECONNECT_BACKOFF_MS_CONFIG
      +
      reconnect.backoff.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      RECONNECT_BACKOFF_MAX_MS_CONFIG

      +
      public static final String RECONNECT_BACKOFF_MAX_MS_CONFIG
      +
      reconnect.backoff.max.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      MAX_BLOCK_MS_CONFIG

      +
      public static final String MAX_BLOCK_MS_CONFIG
      +
      max.block.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      BUFFER_MEMORY_CONFIG

      +
      public static final String BUFFER_MEMORY_CONFIG
      +
      buffer.memory
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      RETRY_BACKOFF_MS_CONFIG

      +
      public static final String RETRY_BACKOFF_MS_CONFIG
      +
      retry.backoff.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      RETRY_BACKOFF_MAX_MS_CONFIG

      +
      public static final String RETRY_BACKOFF_MAX_MS_CONFIG
      +
      retry.backoff.max.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      ENABLE_METRICS_PUSH_CONFIG

      +
      public static final String ENABLE_METRICS_PUSH_CONFIG
      +
      enable.metrics.push
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      ENABLE_METRICS_PUSH_DOC

      +
      public static final String ENABLE_METRICS_PUSH_DOC
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      COMPRESSION_TYPE_CONFIG

      +
      public static final String COMPRESSION_TYPE_CONFIG
      +
      compression.type
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      COMPRESSION_GZIP_LEVEL_CONFIG

      +
      public static final String COMPRESSION_GZIP_LEVEL_CONFIG
      +
      compression.gzip.level
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      COMPRESSION_LZ4_LEVEL_CONFIG

      +
      public static final String COMPRESSION_LZ4_LEVEL_CONFIG
      +
      compression.lz4.level
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      COMPRESSION_ZSTD_LEVEL_CONFIG

      +
      public static final String COMPRESSION_ZSTD_LEVEL_CONFIG
      +
      compression.zstd.level
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      METRICS_SAMPLE_WINDOW_MS_CONFIG

      +
      public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG
      +
      metrics.sample.window.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      METRICS_NUM_SAMPLES_CONFIG

      +
      public static final String METRICS_NUM_SAMPLES_CONFIG
      +
      metrics.num.samples
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      METRICS_RECORDING_LEVEL_CONFIG

      +
      public static final String METRICS_RECORDING_LEVEL_CONFIG
      +
      metrics.recording.level
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      METRIC_REPORTER_CLASSES_CONFIG

      +
      public static final String METRIC_REPORTER_CLASSES_CONFIG
      +
      metric.reporters
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION

      +
      public static final String MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION
      +
      max.in.flight.requests.per.connection
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      RETRIES_CONFIG

      +
      public static final String RETRIES_CONFIG
      +
      retries
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      KEY_SERIALIZER_CLASS_CONFIG

      +
      public static final String KEY_SERIALIZER_CLASS_CONFIG
      +
      key.serializer
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      KEY_SERIALIZER_CLASS_DOC

      +
      public static final String KEY_SERIALIZER_CLASS_DOC
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      VALUE_SERIALIZER_CLASS_CONFIG

      +
      public static final String VALUE_SERIALIZER_CLASS_CONFIG
      +
      value.serializer
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      VALUE_SERIALIZER_CLASS_DOC

      +
      public static final String VALUE_SERIALIZER_CLASS_DOC
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG

      +
      public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG
      +
      socket.connection.setup.timeout.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG

      +
      public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG
      +
      socket.connection.setup.timeout.max.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      CONNECTIONS_MAX_IDLE_MS_CONFIG

      +
      public static final String CONNECTIONS_MAX_IDLE_MS_CONFIG
      +
      connections.max.idle.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      PARTITIONER_CLASS_CONFIG

      +
      public static final String PARTITIONER_CLASS_CONFIG
      +
      partitioner.class
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      INTERCEPTOR_CLASSES_CONFIG

      +
      public static final String INTERCEPTOR_CLASSES_CONFIG
      +
      interceptor.classes
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      INTERCEPTOR_CLASSES_DOC

      +
      public static final String INTERCEPTOR_CLASSES_DOC
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      ENABLE_IDEMPOTENCE_CONFIG

      +
      public static final String ENABLE_IDEMPOTENCE_CONFIG
      +
      enable.idempotence
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      ENABLE_IDEMPOTENCE_DOC

      +
      public static final String ENABLE_IDEMPOTENCE_DOC
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      TRANSACTION_TIMEOUT_CONFIG

      +
      public static final String TRANSACTION_TIMEOUT_CONFIG
      +
      transaction.timeout.ms
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      TRANSACTION_TIMEOUT_DOC

      +
      public static final String TRANSACTION_TIMEOUT_DOC
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      TRANSACTIONAL_ID_CONFIG

      +
      public static final String TRANSACTIONAL_ID_CONFIG
      +
      transactional.id
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      TRANSACTIONAL_ID_DOC

      +
      public static final String TRANSACTIONAL_ID_DOC
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG

      +
      public static final String TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG
      +
      transaction.two.phase.commit.enable
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    • +
      +

      SECURITY_PROVIDERS_CONFIG

      +
      public static final String SECURITY_PROVIDERS_CONFIG
      +
      security.providers
      +
      +
      See Also:
      +
      + +
      +
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Constructor Details

    +
      +
    • +
      +

      ProducerConfig

      +
      public ProducerConfig(Properties props)
      +
      +
    • +
    • +
      +

      ProducerConfig

      +
      public ProducerConfig(Map<String,Object> props)
      +
      +
    • +
    +
    +
  • + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      configNames

      +
      public static Set<String> configNames()
      +
      +
    • +
    • +
      +

      configDef

      +
      public static ConfigDef configDef()
      +
      +
    • +
    • +
      +

      main

      +
      public static void main(String[] args)
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/producer/ProducerInterceptor.html b/static/41/javadoc/org/apache/kafka/clients/producer/ProducerInterceptor.html new file mode 100644 index 000000000..d8771da23 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/producer/ProducerInterceptor.html @@ -0,0 +1,269 @@ + + + + +ProducerInterceptor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Interface ProducerInterceptor<K,V>

+
+
+
+
All Superinterfaces:
+
AutoCloseable, Configurable
+
+
+
public interface ProducerInterceptor<K,V> +extends Configurable, AutoCloseable
+
A plugin interface that allows you to intercept (and possibly mutate) the records received by the producer before + they are published to the Kafka cluster. +

+ This class will get producer config properties via configure() method, including clientId assigned + by KafkaProducer if not specified in the producer config. The interceptor implementation needs to be aware that it will be + sharing producer config namespace with other interceptors and serializers, and ensure that there are no conflicts. +

+ Exceptions thrown by ProducerInterceptor methods will be caught, logged, but not propagated further. As a result, if + the user configures the interceptor with the wrong key and value type parameters, the producer will not throw an exception, + just log the errors. +

+ ProducerInterceptor callbacks may be called from multiple threads. Interceptor implementation must ensure thread-safety, if needed. +

+ Implement ClusterResourceListener to receive cluster metadata once it's available. Please see the class documentation for ClusterResourceListener for more information. + Implement Monitorable to enable the interceptor to register metrics. The following tags are automatically added to + all metrics registered: config set to interceptor.classes, and class set to the ProducerInterceptor class name.

+
+
+ +
+
+
    + +
  • +
    +

    Method Details

    +
      +
    • +
      +

      onSend

      +
      ProducerRecord<K,V> onSend(ProducerRecord<K,V> record)
      +
      This is called from KafkaProducer.send(ProducerRecord) and + KafkaProducer.send(ProducerRecord, Callback) methods, before key and value + get serialized and partition is assigned (if partition is not specified in ProducerRecord). +

      + This method is allowed to modify the record, in which case, the new record will be returned. The implication of modifying + key/value is that partition assignment (if not specified in ProducerRecord) will be done based on modified key/value, + not key/value from the client. Consequently, key and value transformation done in onSend() needs to be consistent: + same key and value should mutate to the same (modified) key and value. Otherwise, log compaction would not work + as expected. +

      + Similarly, it is up to interceptor implementation to ensure that correct topic/partition is returned in ProducerRecord. + Most often, it should be the same topic/partition from 'record'. +

      + Any exception thrown by this method will be caught by the caller and logged, but not propagated further. +

      + Since the producer may run multiple interceptors, a particular interceptor's onSend() callback will be called in the order + specified by ProducerConfig.INTERCEPTOR_CLASSES_CONFIG. The first interceptor + in the list gets the record passed from the client, the following interceptor will be passed the record returned by the + previous interceptor, and so on. Since interceptors are allowed to modify records, interceptors may potentially get + the record already modified by other interceptors. However, building a pipeline of mutable interceptors that depend on the output + of the previous interceptor is discouraged, because of potential side-effects caused by interceptors potentially failing to + modify the record and throwing an exception. If one of the interceptors in the list throws an exception from onSend(), the exception + is caught, logged, and the next interceptor is called with the record returned by the last successful interceptor in the list, + or otherwise the client.

      +
      +
      Parameters:
      +
      record - the record from client or the record returned by the previous interceptor in the chain of interceptors.
      +
      Returns:
      +
      producer record to send to topic/partition
      +
      +
      +
    • +
    • +
      +

      onAcknowledgement

      +
      default void onAcknowledgement(RecordMetadata metadata, + Exception exception)
      +
      This method is called when the record sent to the server has been acknowledged, or when sending the record fails before + it gets sent to the server. +

      + This method is generally called just before the user callback is called, and in additional cases when KafkaProducer.send() + throws an exception. +

      + Any exception thrown by this method will be ignored by the caller. +

      + This method will generally execute in the background I/O thread, so the implementation should be reasonably fast. + Otherwise, sending of messages from other threads could be delayed.

      +
      +
      Parameters:
      +
      metadata - The metadata for the record that was sent (i.e. the partition and offset). + If an error occurred, metadata will contain only valid topic and maybe + partition. If partition is not given in ProducerRecord and an error occurs + before partition gets assigned, then partition will be set to RecordMetadata.UNKNOWN_PARTITION. + The metadata may be null if the client passed null record to + KafkaProducer.send(ProducerRecord).
      +
      exception - The exception thrown during processing of this record. Null if no error occurred.
      +
      +
      +
    • +
    • +
      +

      onAcknowledgement

      +
      default void onAcknowledgement(RecordMetadata metadata, + Exception exception, + Headers headers)
      +
      This method is called when the record sent to the server has been acknowledged, or when sending the record fails before + it gets sent to the server. +

      + This method is generally called just before the user callback is called, and in additional cases when KafkaProducer.send() + throws an exception. +

      + Any exception thrown by this method will be ignored by the caller. +

      + This method will generally execute in the background I/O thread, so the implementation should be reasonably fast. + Otherwise, sending of messages from other threads could be delayed.

      +
      +
      Parameters:
      +
      metadata - The metadata for the record that was sent (i.e. the partition and offset). + If an error occurred, metadata will contain only valid topic and maybe + partition. If partition is not given in ProducerRecord and an error occurs + before partition gets assigned, then partition will be set to RecordMetadata.UNKNOWN_PARTITION. + The metadata may be null if the client passed null record to + KafkaProducer.send(ProducerRecord).
      +
      exception - The exception thrown during processing of this record. Null if no error occurred.
      +
      headers - The headers for the record that was sent. It is read-only.
      +
      +
      +
    • +
    • +
      +

      close

      +
      void close()
      +
      This is called when interceptor is closed
      +
      +
      Specified by:
      +
      close in interface AutoCloseable
      +
      +
      +
    • +
    +
    +
  • +
+
+ +
+
+
+ + diff --git a/static/41/javadoc/org/apache/kafka/clients/producer/ProducerRecord.html b/static/41/javadoc/org/apache/kafka/clients/producer/ProducerRecord.html new file mode 100644 index 000000000..e2b6f8261 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/producer/ProducerRecord.html @@ -0,0 +1,429 @@ + + + + +ProducerRecord (kafka 4.1.0 API) + + + + + + + + + + + + + + +
+ +
+
+ +
+ +

Class ProducerRecord<K,V>

+
+
java.lang.Object +
org.apache.kafka.clients.producer.ProducerRecord<K,V>
+
+
+
+
public class ProducerRecord<K,V> +extends Object
+
A key/value pair to be sent to Kafka. This consists of a topic name to which the record is being sent, an optional + partition number, and an optional key and value. +

+ If a valid partition number is specified that partition will be used when sending the record. If no partition is + specified but a key is present a partition will be chosen using a hash of the key. If neither key nor partition is + present a partition will be assigned in a round-robin fashion. Note that partition numbers are 0-indexed. +

+ The record also has an associated timestamp. If the user did not provide a timestamp, the producer will stamp the + record with its current time. The timestamp eventually used by Kafka depends on the timestamp type configured for + the topic. +

  • + If the topic is configured to use CreateTime, + the timestamp in the producer record will be used by the broker. +
  • +
  • + If the topic is configured to use LogAppendTime, + the timestamp in the producer record will be overwritten by the broker with the broker local time when it appends the + message to its log. +
  • +

    + In either of the cases above, the timestamp that has actually been used will be returned to user in + RecordMetadata

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ProducerRecord

        +
        public ProducerRecord(String topic, + Integer partition, + Long timestamp, + K key, + V value, + Iterable<Header> headers)
        +
        Creates a record with a specified timestamp to be sent to a specified topic and partition
        +
        +
        Parameters:
        +
        topic - The topic the record will be appended to
        +
        partition - The partition to which the record should be sent
        +
        timestamp - The timestamp of the record, in milliseconds since epoch. If null, the producer will assign + the timestamp using System.currentTimeMillis().
        +
        key - The key that will be included in the record
        +
        value - The record contents
        +
        headers - the headers that will be included in the record
        +
        +
        +
      • +
      • +
        +

        ProducerRecord

        +
        public ProducerRecord(String topic, + Integer partition, + Long timestamp, + K key, + V value)
        +
        Creates a record with a specified timestamp to be sent to a specified topic and partition
        +
        +
        Parameters:
        +
        topic - The topic the record will be appended to
        +
        partition - The partition to which the record should be sent
        +
        timestamp - The timestamp of the record, in milliseconds since epoch. If null, the producer will assign the + timestamp using System.currentTimeMillis().
        +
        key - The key that will be included in the record
        +
        value - The record contents
        +
        +
        +
      • +
      • +
        +

        ProducerRecord

        +
        public ProducerRecord(String topic, + Integer partition, + K key, + V value, + Iterable<Header> headers)
        +
        Creates a record to be sent to a specified topic and partition
        +
        +
        Parameters:
        +
        topic - The topic the record will be appended to
        +
        partition - The partition to which the record should be sent
        +
        key - The key that will be included in the record
        +
        value - The record contents
        +
        headers - The headers that will be included in the record
        +
        +
        +
      • +
      • +
        +

        ProducerRecord

        +
        public ProducerRecord(String topic, + Integer partition, + K key, + V value)
        +
        Creates a record to be sent to a specified topic and partition
        +
        +
        Parameters:
        +
        topic - The topic the record will be appended to
        +
        partition - The partition to which the record should be sent
        +
        key - The key that will be included in the record
        +
        value - The record contents
        +
        +
        +
      • +
      • +
        +

        ProducerRecord

        +
        public ProducerRecord(String topic, + K key, + V value)
        +
        Create a record to be sent to Kafka
        +
        +
        Parameters:
        +
        topic - The topic the record will be appended to
        +
        key - The key that will be included in the record
        +
        value - The record contents
        +
        +
        +
      • +
      • +
        +

        ProducerRecord

        +
        public ProducerRecord(String topic, + V value)
        +
        Create a record with no key
        +
        +
        Parameters:
        +
        topic - The topic this record should be sent to
        +
        value - The record contents
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        topic

        +
        public String topic()
        +
        +
        Returns:
        +
        The topic this record is being sent to
        +
        +
        +
      • +
      • +
        +

        headers

        +
        public Headers headers()
        +
        +
        Returns:
        +
        The headers
        +
        +
        +
      • +
      • +
        +

        key

        +
        public K key()
        +
        +
        Returns:
        +
        The key (or null if no key is specified)
        +
        +
        +
      • +
      • +
        +

        value

        +
        public V value()
        +
        +
        Returns:
        +
        The value
        +
        +
        +
      • +
      • +
        +

        timestamp

        +
        public Long timestamp()
        +
        +
        Returns:
        +
        The timestamp, which is in milliseconds since epoch.
        +
        +
        +
      • +
      • +
        +

        partition

        +
        public Integer partition()
        +
        +
        Returns:
        +
        The partition to which the record will be sent (or null if no partition was specified)
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/clients/producer/RecordMetadata.html b/static/41/javadoc/org/apache/kafka/clients/producer/RecordMetadata.html new file mode 100644 index 000000000..f7dcfca32 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/producer/RecordMetadata.html @@ -0,0 +1,330 @@ + + + + +RecordMetadata (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class RecordMetadata

    +
    +
    java.lang.Object +
    org.apache.kafka.clients.producer.RecordMetadata
    +
    +
    +
    +
    public final class RecordMetadata +extends Object
    +
    The metadata for a record that has been acknowledged by the server
    +
    +
    +
      + +
    • +
      +

      Field Summary

      +
      Fields
      +
      +
      Modifier and Type
      +
      Field
      +
      Description
      +
      static final int
      + +
      +
      Partition value for record without partition assigned
      +
      +
      +
      +
    • + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      +
      RecordMetadata(TopicPartition topicPartition, + long baseOffset, + int batchIndex, + long timestamp, + int serializedKeySize, + int serializedValueSize)
      +
      +
      Creates a new instance with the provided parameters.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      boolean
      + +
      +
      Indicates whether the record metadata includes the offset.
      +
      +
      boolean
      + +
      +
      Indicates whether the record metadata includes the timestamp.
      +
      +
      long
      + +
      +
      The offset of the record in the topic/partition.
      +
      +
      int
      + +
      +
      The partition the record was sent to
      +
      +
      int
      + +
      +
      The size of the serialized, uncompressed key in bytes.
      +
      +
      int
      + +
      +
      The size of the serialized, uncompressed value in bytes.
      +
      +
      long
      + +
      +
      The timestamp of the record in the topic/partition.
      +
      + + +
      +
      The topic the record was appended to
      +
      + + +
       
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        UNKNOWN_PARTITION

        +
        public static final int UNKNOWN_PARTITION
        +
        Partition value for record without partition assigned
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        RecordMetadata

        +
        public RecordMetadata(TopicPartition topicPartition, + long baseOffset, + int batchIndex, + long timestamp, + int serializedKeySize, + int serializedValueSize)
        +
        Creates a new instance with the provided parameters.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        hasOffset

        +
        public boolean hasOffset()
        +
        Indicates whether the record metadata includes the offset.
        +
        +
        Returns:
        +
        true if the offset is included in the metadata, false otherwise.
        +
        +
        +
      • +
      • +
        +

        offset

        +
        public long offset()
        +
        The offset of the record in the topic/partition.
        +
        +
        Returns:
        +
        the offset of the record, or -1 if {hasOffset()} returns false.
        +
        +
        +
      • +
      • +
        +

        hasTimestamp

        +
        public boolean hasTimestamp()
        +
        Indicates whether the record metadata includes the timestamp.
        +
        +
        Returns:
        +
        true if a valid timestamp exists, false otherwise.
        +
        +
        +
      • +
      • +
        +

        timestamp

        +
        public long timestamp()
        +
        The timestamp of the record in the topic/partition.
        +
        +
        Returns:
        +
        the timestamp of the record, or -1 if the {hasTimestamp()} returns false.
        +
        +
        +
      • +
      • +
        +

        serializedKeySize

        +
        public int serializedKeySize()
        +
        The size of the serialized, uncompressed key in bytes. If key is null, the returned size + is -1.
        +
        +
      • +
      • +
        +

        serializedValueSize

        +
        public int serializedValueSize()
        +
        The size of the serialized, uncompressed value in bytes. If value is null, the returned + size is -1.
        +
        +
      • +
      • +
        +

        topic

        +
        public String topic()
        +
        The topic the record was appended to
        +
        +
      • +
      • +
        +

        partition

        +
        public int partition()
        +
        The partition the record was sent to
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/clients/producer/RoundRobinPartitioner.html b/static/41/javadoc/org/apache/kafka/clients/producer/RoundRobinPartitioner.html new file mode 100644 index 000000000..9168819df --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/producer/RoundRobinPartitioner.html @@ -0,0 +1,230 @@ + + + + +RoundRobinPartitioner (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class RoundRobinPartitioner

    +
    +
    java.lang.Object +
    org.apache.kafka.clients.producer.RoundRobinPartitioner
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Partitioner, Configurable
    +
    +
    +
    public class RoundRobinPartitioner +extends Object +implements Partitioner
    +
    The "Round-Robin" partitioner + + This partitioning strategy can be used when user wants + to distribute the writes to all partitions equally. This + is the behaviour regardless of record key hash.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        RoundRobinPartitioner

        +
        public RoundRobinPartitioner()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs)
        +
        Description copied from interface: Configurable
        +
        Configure this class with the given key-value pairs
        +
        +
        Specified by:
        +
        configure in interface Configurable
        +
        +
        +
      • +
      • +
        +

        partition

        +
        public int partition(String topic, + Object key, + byte[] keyBytes, + Object value, + byte[] valueBytes, + Cluster cluster)
        +
        Compute the partition for the given record.
        +
        +
        Specified by:
        +
        partition in interface Partitioner
        +
        Parameters:
        +
        topic - The topic name
        +
        key - The key to partition on (or null if no key)
        +
        keyBytes - serialized key to partition on (or null if no key)
        +
        value - The value to partition on or null
        +
        valueBytes - serialized value to partition on or null
        +
        cluster - The current cluster metadata
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close()
        +
        Description copied from interface: Partitioner
        +
        This is called when partitioner is closed.
        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        Specified by:
        +
        close in interface Partitioner
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/clients/producer/package-summary.html b/static/41/javadoc/org/apache/kafka/clients/producer/package-summary.html new file mode 100644 index 000000000..2260a30dd --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/producer/package-summary.html @@ -0,0 +1,144 @@ + + + + +org.apache.kafka.clients.producer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.clients.producer

    +
    +
    +
    package org.apache.kafka.clients.producer
    +
    +
    Provides a Kafka client for producing records to topics and/or partitions in a Kafka cluster.
    +
    +
    +
      +
    • +
      +
      +
      +
      +
      Class
      +
      Description
      + +
      +
      This exception is thrown if the producer cannot allocate memory for a record within max.block.ms due to the buffer + being too full.
      +
      + +
      +
      A callback interface that the user can implement to allow code to execute when the request is complete.
      +
      + +
      +
      A Kafka client that publishes records to the Kafka cluster.
      +
      + +
      +
      A mock of the producer interface you can use for testing code that uses Kafka.
      +
      + +
      +
      Partitioner Interface +
      + Implement Monitorable to enable the partitioner to register metrics.
      +
      + +
      +
      Class containing the state of a transaction after it has been prepared for a two-phase commit.
      +
      + +
      +
      The interface for the KafkaProducer
      +
      + +
      +
      Configuration for the Kafka Producer.
      +
      + +
      +
      A plugin interface that allows you to intercept (and possibly mutate) the records received by the producer before + they are published to the Kafka cluster.
      +
      + +
      +
      A key/value pair to be sent to Kafka.
      +
      + +
      +
      The metadata for a record that has been acknowledged by the server
      +
      + +
      +
      The "Round-Robin" partitioner + + This partitioning strategy can be used when user wants + to distribute the writes to all partitions equally.
      +
      +
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/clients/producer/package-tree.html b/static/41/javadoc/org/apache/kafka/clients/producer/package-tree.html new file mode 100644 index 000000000..8d76fcdcd --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/clients/producer/package-tree.html @@ -0,0 +1,133 @@ + + + + +org.apache.kafka.clients.producer Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.clients.producer

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/ClassicGroupState.html b/static/41/javadoc/org/apache/kafka/common/ClassicGroupState.html new file mode 100644 index 000000000..9add4d4e7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/ClassicGroupState.html @@ -0,0 +1,275 @@ + + + + +ClassicGroupState (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class ClassicGroupState

    +
    +
    java.lang.Object +
    java.lang.Enum<ClassicGroupState> +
    org.apache.kafka.common.ClassicGroupState
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<ClassicGroupState>, Constable
    +
    +
    +
    public enum ClassicGroupState +extends Enum<ClassicGroupState>
    +
    The classic group state.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static ClassicGroupState[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static ClassicGroupState valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        parse

        +
        public static ClassicGroupState parse(String name)
        +
        Case-insensitive classic group state lookup by string name.
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Enum<ClassicGroupState>
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/Cluster.html b/static/41/javadoc/org/apache/kafka/common/Cluster.html new file mode 100644 index 000000000..5946fd8d9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/Cluster.html @@ -0,0 +1,588 @@ + + + + +Cluster (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Cluster

    +
    +
    java.lang.Object +
    org.apache.kafka.common.Cluster
    +
    +
    +
    +
    public final class Cluster +extends Object
    +
    An immutable representation of a subset of the nodes, topics, and partitions in the Kafka cluster.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Cluster

        +
        public Cluster(String clusterId, + Collection<Node> nodes, + Collection<PartitionInfo> partitions, + Set<String> unauthorizedTopics, + Set<String> internalTopics)
        +
        Create a new cluster with the given id, nodes and partitions
        +
        +
        Parameters:
        +
        nodes - The nodes in the cluster
        +
        partitions - Information about a subset of the topic-partitions this cluster hosts
        +
        +
        +
      • +
      • +
        +

        Cluster

        +
        public Cluster(String clusterId, + Collection<Node> nodes, + Collection<PartitionInfo> partitions, + Set<String> unauthorizedTopics, + Set<String> internalTopics, + Node controller)
        +
        Create a new cluster with the given id, nodes and partitions
        +
        +
        Parameters:
        +
        nodes - The nodes in the cluster
        +
        partitions - Information about a subset of the topic-partitions this cluster hosts
        +
        +
        +
      • +
      • +
        +

        Cluster

        +
        public Cluster(String clusterId, + Collection<Node> nodes, + Collection<PartitionInfo> partitions, + Set<String> unauthorizedTopics, + Set<String> invalidTopics, + Set<String> internalTopics, + Node controller)
        +
        Create a new cluster with the given id, nodes and partitions
        +
        +
        Parameters:
        +
        nodes - The nodes in the cluster
        +
        partitions - Information about a subset of the topic-partitions this cluster hosts
        +
        +
        +
      • +
      • +
        +

        Cluster

        +
        public Cluster(String clusterId, + Collection<Node> nodes, + Collection<PartitionInfo> partitions, + Set<String> unauthorizedTopics, + Set<String> invalidTopics, + Set<String> internalTopics, + Node controller, + Map<String,Uuid> topicIds)
        +
        Create a new cluster with the given id, nodes, partitions and topicIds
        +
        +
        Parameters:
        +
        nodes - The nodes in the cluster
        +
        partitions - Information about a subset of the topic-partitions this cluster hosts
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        empty

        +
        public static Cluster empty()
        +
        Create an empty cluster instance with no nodes and no topic-partitions.
        +
        +
      • +
      • +
        +

        bootstrap

        +
        public static Cluster bootstrap(List<InetSocketAddress> addresses)
        +
        Create a "bootstrap" cluster using the given list of host/ports
        +
        +
        Parameters:
        +
        addresses - The addresses
        +
        Returns:
        +
        A cluster for these hosts/ports
        +
        +
        +
      • +
      • +
        +

        withPartitions

        +
        public Cluster withPartitions(Map<TopicPartition,PartitionInfo> partitions)
        +
        Return a copy of this cluster combined with `partitions`.
        +
        +
      • +
      • +
        +

        nodes

        +
        public List<Node> nodes()
        +
        +
        Returns:
        +
        The known set of nodes
        +
        +
        +
      • +
      • +
        +

        nodeById

        +
        public Node nodeById(int id)
        +
        Get the node by the node id (or null if the node is not online or does not exist)
        +
        +
        Parameters:
        +
        id - The id of the node
        +
        Returns:
        +
        The node, or null if the node is not online or does not exist
        +
        +
        +
      • +
      • +
        +

        nodeIfOnline

        +
        public Optional<Node> nodeIfOnline(TopicPartition partition, + int id)
        +
        Get the node by node id if the replica for the given partition is online
        +
        +
        Parameters:
        +
        partition - The TopicPartition
        +
        id - The node id
        +
        Returns:
        +
        the node
        +
        +
        +
      • +
      • +
        +

        leaderFor

        +
        public Node leaderFor(TopicPartition topicPartition)
        +
        Get the current leader for the given topic-partition
        +
        +
        Parameters:
        +
        topicPartition - The topic and partition we want to know the leader for
        +
        Returns:
        +
        The node that is the leader for this topic-partition, or null if there is currently no leader
        +
        +
        +
      • +
      • +
        +

        partition

        +
        public PartitionInfo partition(TopicPartition topicPartition)
        +
        Get the metadata for the specified partition
        +
        +
        Parameters:
        +
        topicPartition - The topic and partition to fetch info for
        +
        Returns:
        +
        The metadata about the given topic and partition, or null if none is found
        +
        +
        +
      • +
      • +
        +

        partitionsForTopic

        +
        public List<PartitionInfo> partitionsForTopic(String topic)
        +
        Get the list of partitions for this topic
        +
        +
        Parameters:
        +
        topic - The topic name
        +
        Returns:
        +
        A list of partitions
        +
        +
        +
      • +
      • +
        +

        partitionCountForTopic

        +
        public Integer partitionCountForTopic(String topic)
        +
        Get the number of partitions for the given topic.
        +
        +
        Parameters:
        +
        topic - The topic to get the number of partitions for
        +
        Returns:
        +
        The number of partitions or null if there is no corresponding metadata
        +
        +
        +
      • +
      • +
        +

        availablePartitionsForTopic

        +
        public List<PartitionInfo> availablePartitionsForTopic(String topic)
        +
        Get the list of available partitions for this topic
        +
        +
        Parameters:
        +
        topic - The topic name
        +
        Returns:
        +
        A list of partitions
        +
        +
        +
      • +
      • +
        +

        partitionsForNode

        +
        public List<PartitionInfo> partitionsForNode(int nodeId)
        +
        Get the list of partitions whose leader is this node
        +
        +
        Parameters:
        +
        nodeId - The node id
        +
        Returns:
        +
        A list of partitions
        +
        +
        +
      • +
      • +
        +

        topics

        +
        public Set<String> topics()
        +
        Get all topics.
        +
        +
        Returns:
        +
        a set of all topics
        +
        +
        +
      • +
      • +
        +

        unauthorizedTopics

        +
        public Set<String> unauthorizedTopics()
        +
        +
      • +
      • +
        +

        invalidTopics

        +
        public Set<String> invalidTopics()
        +
        +
      • +
      • +
        +

        internalTopics

        +
        public Set<String> internalTopics()
        +
        +
      • +
      • +
        +

        isBootstrapConfigured

        +
        public boolean isBootstrapConfigured()
        +
        +
      • +
      • +
        +

        clusterResource

        +
        public ClusterResource clusterResource()
        +
        +
      • +
      • +
        +

        controller

        +
        public Node controller()
        +
        +
      • +
      • +
        +

        topicIds

        +
        public Collection<Uuid> topicIds()
        +
        +
      • +
      • +
        +

        topicId

        +
        public Uuid topicId(String topic)
        +
        +
      • +
      • +
        +

        topicName

        +
        public String topicName(Uuid topicId)
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/ClusterResource.html b/static/41/javadoc/org/apache/kafka/common/ClusterResource.html new file mode 100644 index 000000000..83689d54a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/ClusterResource.html @@ -0,0 +1,210 @@ + + + + +ClusterResource (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ClusterResource

    +
    +
    java.lang.Object +
    org.apache.kafka.common.ClusterResource
    +
    +
    +
    +
    public class ClusterResource +extends Object
    +
    The ClusterResource class encapsulates metadata for a Kafka cluster.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ClusterResource

        +
        public ClusterResource(String clusterId)
        +
        Create ClusterResource with a cluster id. Note that cluster id may be null if the + metadata request was sent to a broker without support for cluster ids.
        +
        +
        Parameters:
        +
        clusterId - The cluster id
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        clusterId

        +
        public String clusterId()
        +
        Return the cluster id. Note that it may be null if the metadata request was sent to a broker without + support for cluster ids.
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/ClusterResourceListener.html b/static/41/javadoc/org/apache/kafka/common/ClusterResourceListener.html new file mode 100644 index 000000000..f9dce4abd --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/ClusterResourceListener.html @@ -0,0 +1,157 @@ + + + + +ClusterResourceListener (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ClusterResourceListener

    +
    +
    +
    +
    public interface ClusterResourceListener
    +
    A callback interface that users can implement when they wish to get notified about changes in the Cluster metadata. +

    + Users who need access to cluster metadata in interceptors, metric reporters, serializers and deserializers + can implement this interface. The order of method calls for each of these types is described below. +

    +

    Clients

    + There will be one invocation of onUpdate(ClusterResource) after each metadata response. +

    + ProducerInterceptor : The onUpdate(ClusterResource) method will be invoked after ProducerInterceptor.onSend(org.apache.kafka.clients.producer.ProducerRecord) + but before ProducerInterceptor.onAcknowledgement(org.apache.kafka.clients.producer.RecordMetadata, Exception) . +

    + ConsumerInterceptor : The onUpdate(ClusterResource) method will be invoked before ConsumerInterceptor.onConsume(org.apache.kafka.clients.consumer.ConsumerRecords) +

    + Serializer : The onUpdate(ClusterResource) method will be invoked before Serializer.serialize(String, Object) +

    + Deserializer : The onUpdate(ClusterResource) method will be invoked before Deserializer.deserialize(String, byte[]) +

    + MetricsReporter : The onUpdate(ClusterResource) method will be invoked after first KafkaProducer.send(org.apache.kafka.clients.producer.ProducerRecord) invocation for Producer metrics reporter + and after first KafkaConsumer.poll(java.time.Duration) invocation for Consumer metrics + reporters. The reporter may receive metric events from the network layer before this method is invoked. +

    Broker

    + There is a single invocation onUpdate(ClusterResource) on broker start-up and the cluster metadata will never change. +

    + KafkaMetricsReporter : The onUpdate(ClusterResource) method will be invoked during the bootup of the Kafka broker. The reporter may receive metric events from the network layer before this method is invoked. +

    + MetricsReporter : The onUpdate(ClusterResource) method will be invoked during the bootup of the Kafka broker. The reporter may receive metric events from the network layer before this method is invoked.

    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      +
      onUpdate(ClusterResource clusterResource)
      +
      +
      A callback method that a user can implement to get updates for ClusterResource.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        onUpdate

        +
        void onUpdate(ClusterResource clusterResource)
        +
        A callback method that a user can implement to get updates for ClusterResource.
        +
        +
        Parameters:
        +
        clusterResource - cluster metadata
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/Configurable.html b/static/41/javadoc/org/apache/kafka/common/Configurable.html new file mode 100644 index 000000000..42c354a47 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/Configurable.html @@ -0,0 +1,136 @@ + + + + +Configurable (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Configurable

    +
    +
    +
    +
    All Known Subinterfaces:
    +
    AlterConfigPolicy, Authorizer, ClientQuotaCallback, ConfigProvider, ConnectorClientConfigOverridePolicy, ConnectRestExtension, ConsumerInterceptor<K,V>, CreateTopicPolicy, DeserializationExceptionHandler, DslStoreSuppliers, HeaderConverter, MessageFormatter, MetricsReporter, Partitioner, Predicate<R>, ProcessingExceptionHandler, ProcessorWrapper, ProducerInterceptor<K,V>, ProductionExceptionHandler, Reconfigurable, RecordReader, RemoteLogMetadataManager, RemoteStorageManager, SecurityProviderCreator, SslEngineFactory, TaskAssignor, Transformation<R>
    +
    +
    +
    All Known Implementing Classes:
    +
    BuiltInDslStoreSuppliers.InMemoryDslStoreSuppliers, BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers, DefaultProductionExceptionHandler, DefaultReplicationPolicy, DirectoryConfigProvider, EnvVarConfigProvider, FileConfigProvider, IdentityReplicationPolicy, JmxReporter, LogAndContinueExceptionHandler, LogAndContinueProcessingExceptionHandler, LogAndFailExceptionHandler, LogAndFailProcessingExceptionHandler, Materialized.StoreType, RoundRobinPartitioner, SimpleHeaderConverter, StickyTaskAssignor, StringConverter
    +
    +
    +
    public interface Configurable
    +
    A Mix-in style interface for classes that are instantiated by reflection and need to take configuration parameters
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      +
      configure(Map<String,?> configs)
      +
      +
      Configure this class with the given key-value pairs
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        void configure(Map<String,?> configs)
        +
        Configure this class with the given key-value pairs
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/ConsumerGroupState.html b/static/41/javadoc/org/apache/kafka/common/ConsumerGroupState.html new file mode 100644 index 000000000..ba4f776d0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/ConsumerGroupState.html @@ -0,0 +1,328 @@ + + + + +ConsumerGroupState (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class ConsumerGroupState

    +
    +
    java.lang.Object +
    java.lang.Enum<ConsumerGroupState> +
    org.apache.kafka.common.ConsumerGroupState
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<ConsumerGroupState>, Constable
    +
    +
    +
    @Deprecated +public enum ConsumerGroupState +extends Enum<ConsumerGroupState>
    +
    Deprecated. +
    Since 4.0. Use GroupState instead.
    +
    +
    The consumer group state.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static ConsumerGroupState[] values()
        +
        Deprecated.
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static ConsumerGroupState valueOf(String name)
        +
        Deprecated.
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        parse

        +
        public static ConsumerGroupState parse(String name)
        +
        Deprecated.
        +
        Case-insensitive consumer group state lookup by string name.
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        Deprecated.
        +
        +
        Overrides:
        +
        toString in class Enum<ConsumerGroupState>
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/ElectionType.html b/static/41/javadoc/org/apache/kafka/common/ElectionType.html new file mode 100644 index 000000000..3e5f23338 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/ElectionType.html @@ -0,0 +1,271 @@ + + + + +ElectionType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class ElectionType

    +
    +
    java.lang.Object +
    java.lang.Enum<ElectionType> +
    org.apache.kafka.common.ElectionType
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<ElectionType>, Constable
    +
    +
    +
    public enum ElectionType +extends Enum<ElectionType>
    + +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      +
        +
      • +
        +

        PREFERRED

        +
        public static final ElectionType PREFERRED
        +
        +
      • +
      • +
        +

        UNCLEAN

        +
        public static final ElectionType UNCLEAN
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        value

        +
        public final byte value
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static ElectionType[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static ElectionType valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static ElectionType valueOf(byte value)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        value - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/Endpoint.html b/static/41/javadoc/org/apache/kafka/common/Endpoint.html new file mode 100644 index 000000000..5322ef59d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/Endpoint.html @@ -0,0 +1,263 @@ + + + + +Endpoint (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Endpoint

    +
    +
    java.lang.Object +
    org.apache.kafka.common.Endpoint
    +
    +
    +
    +
    public class Endpoint +extends Object
    +
    Represents a broker endpoint.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        listener

        +
        public String listener()
        +
        Returns the listener name of this endpoint.
        +
        +
      • +
      • +
        +

        listenerName

        +
        @Deprecated(since="4.1", + forRemoval=true) +public Optional<String> listenerName()
        +
        Deprecated, for removal: This API element is subject to removal in a future version. +
        Since 4.1. Use listener() instead. This function will be removed in 5.0.
        +
        +
        Returns the listener name of this endpoint. This is non-empty for endpoints provided + to broker plugins, but may be empty when used in clients.
        +
        +
      • +
      • +
        +

        securityProtocol

        +
        public SecurityProtocol securityProtocol()
        +
        Returns the security protocol of this endpoint.
        +
        +
      • +
      • +
        +

        host

        +
        public String host()
        +
        Returns advertised host name of this endpoint.
        +
        +
      • +
      • +
        +

        port

        +
        public int port()
        +
        Returns the port to which the listener is bound.
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/GroupState.html b/static/41/javadoc/org/apache/kafka/common/GroupState.html new file mode 100644 index 000000000..3338b1394 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/GroupState.html @@ -0,0 +1,327 @@ + + + + +GroupState (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class GroupState

    +
    +
    java.lang.Object +
    java.lang.Enum<GroupState> +
    org.apache.kafka.common.GroupState
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<GroupState>, Constable
    +
    +
    +
    @Evolving +public enum GroupState +extends Enum<GroupState>
    +
    The group state. +

    + The following table shows the correspondence between the group states and types. + + + + + + + + + + + + + + + +
    StateClassic groupConsumer groupShare groupStreams group
    UNKNOWNYesYesYesYes
    PREPARING_REBALANCEYesYes
    COMPLETING_REBALANCEYesYes
    STABLEYesYesYesYes
    DEADYesYesYesYes
    EMPTYYesYesYesYes
    ASSIGNINGYesYes
    RECONCILINGYesYes
    NOT_READYYes

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      +
        +
      • +
        +

        UNKNOWN

        +
        public static final GroupState UNKNOWN
        +
        +
      • +
      • +
        +

        PREPARING_REBALANCE

        +
        public static final GroupState PREPARING_REBALANCE
        +
        +
      • +
      • +
        +

        COMPLETING_REBALANCE

        +
        public static final GroupState COMPLETING_REBALANCE
        +
        +
      • +
      • +
        +

        STABLE

        +
        public static final GroupState STABLE
        +
        +
      • +
      • +
        +

        DEAD

        +
        public static final GroupState DEAD
        +
        +
      • +
      • +
        +

        EMPTY

        +
        public static final GroupState EMPTY
        +
        +
      • +
      • +
        +

        ASSIGNING

        +
        public static final GroupState ASSIGNING
        +
        +
      • +
      • +
        +

        RECONCILING

        +
        public static final GroupState RECONCILING
        +
        +
      • +
      • +
        +

        NOT_READY

        +
        public static final GroupState NOT_READY
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static GroupState[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static GroupState valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        parse

        +
        public static GroupState parse(String name)
        +
        Case-insensitive group state lookup by string name.
        +
        +
      • +
      • +
        +

        groupStatesForType

        +
        public static Set<GroupState> groupStatesForType(GroupType type)
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Enum<GroupState>
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/GroupType.html b/static/41/javadoc/org/apache/kafka/common/GroupType.html new file mode 100644 index 000000000..eea8bc2a9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/GroupType.html @@ -0,0 +1,266 @@ + + + + +GroupType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class GroupType

    +
    +
    java.lang.Object +
    java.lang.Enum<GroupType> +
    org.apache.kafka.common.GroupType
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<GroupType>, Constable
    +
    +
    +
    public enum GroupType +extends Enum<GroupType>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      +
        +
      • +
        +

        UNKNOWN

        +
        public static final GroupType UNKNOWN
        +
        +
      • +
      • +
        +

        CONSUMER

        +
        public static final GroupType CONSUMER
        +
        +
      • +
      • +
        +

        CLASSIC

        +
        public static final GroupType CLASSIC
        +
        +
      • +
      • +
        +

        SHARE

        +
        public static final GroupType SHARE
        +
        +
      • +
      • +
        +

        STREAMS

        +
        public static final GroupType STREAMS
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static GroupType[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static GroupType valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        parse

        +
        public static GroupType parse(String name)
        +
        Parse a string into a consumer group type, in a case-insensitive manner.
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Enum<GroupType>
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/InvalidRecordException.html b/static/41/javadoc/org/apache/kafka/common/InvalidRecordException.html new file mode 100644 index 000000000..bee1ace51 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/InvalidRecordException.html @@ -0,0 +1,167 @@ + + + + +InvalidRecordException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidRecordException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidRecordException +extends InvalidConfigurationException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidRecordException

        +
        public InvalidRecordException(String s)
        +
        +
      • +
      • +
        +

        InvalidRecordException

        +
        public InvalidRecordException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/IsolationLevel.html b/static/41/javadoc/org/apache/kafka/common/IsolationLevel.html new file mode 100644 index 000000000..c11222efc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/IsolationLevel.html @@ -0,0 +1,248 @@ + + + + +IsolationLevel (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class IsolationLevel

    +
    +
    java.lang.Object +
    java.lang.Enum<IsolationLevel> +
    org.apache.kafka.common.IsolationLevel
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<IsolationLevel>, Constable
    +
    +
    +
    public enum IsolationLevel +extends Enum<IsolationLevel>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      +
        +
      • +
        +

        READ_UNCOMMITTED

        +
        public static final IsolationLevel READ_UNCOMMITTED
        +
        +
      • +
      • +
        +

        READ_COMMITTED

        +
        public static final IsolationLevel READ_COMMITTED
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static IsolationLevel[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static IsolationLevel valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        id

        +
        public byte id()
        +
        +
      • +
      • +
        +

        forId

        +
        public static IsolationLevel forId(byte id)
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Enum<IsolationLevel>
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/KafkaException.html b/static/41/javadoc/org/apache/kafka/common/KafkaException.html new file mode 100644 index 000000000..85838fbff --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/KafkaException.html @@ -0,0 +1,179 @@ + + + + +KafkaException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class KafkaException

    +
    +
    java.lang.Object +
    java.lang.Throwable +
    java.lang.Exception +
    java.lang.RuntimeException +
    org.apache.kafka.common.KafkaException
    +
    +
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    Direct Known Subclasses:
    +
    ApiException, CommitFailedException, ConfigException, ConnectException, InterruptException, InvalidOffsetException, JwtRetrieverException, JwtValidatorException, QuotaViolationException, SerializationException, StreamsException, WakeupException
    +
    +
    +
    public class KafkaException +extends RuntimeException
    +
    The base class of all other Kafka exceptions
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        KafkaException

        +
        public KafkaException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        KafkaException

        +
        public KafkaException(String message)
        +
        +
      • +
      • +
        +

        KafkaException

        +
        public KafkaException(Throwable cause)
        +
        +
      • +
      • +
        +

        KafkaException

        +
        public KafkaException()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/KafkaFuture.BaseFunction.html b/static/41/javadoc/org/apache/kafka/common/KafkaFuture.BaseFunction.html new file mode 100644 index 000000000..4bc865963 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/KafkaFuture.BaseFunction.html @@ -0,0 +1,134 @@ + + + + +KafkaFuture.BaseFunction (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface KafkaFuture.BaseFunction<A,B>

    +
    +
    +
    +
    Enclosing class:
    +
    KafkaFuture<T>
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public static interface KafkaFuture.BaseFunction<A,B>
    +
    A function which takes objects of type A and returns objects of type B.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      apply(A a)
      +
       
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        apply

        +
        B apply(A a)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/KafkaFuture.BiConsumer.html b/static/41/javadoc/org/apache/kafka/common/KafkaFuture.BiConsumer.html new file mode 100644 index 000000000..052ecb2b7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/KafkaFuture.BiConsumer.html @@ -0,0 +1,136 @@ + + + + +KafkaFuture.BiConsumer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface KafkaFuture.BiConsumer<A,B>

    +
    +
    +
    +
    Enclosing class:
    +
    KafkaFuture<T>
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public static interface KafkaFuture.BiConsumer<A,B>
    +
    A consumer of two different types of object.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      +
      accept(A a, + B b)
      +
       
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        accept

        +
        void accept(A a, + B b)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/KafkaFuture.html b/static/41/javadoc/org/apache/kafka/common/KafkaFuture.html new file mode 100644 index 000000000..b9dc0f7dc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/KafkaFuture.html @@ -0,0 +1,422 @@ + + + + +KafkaFuture (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class KafkaFuture<T>

    +
    +
    java.lang.Object +
    org.apache.kafka.common.KafkaFuture<T>
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Future<T>
    +
    +
    +
    public abstract class KafkaFuture<T> +extends Object +implements Future<T>
    +
    A flexible future which supports call chaining and other asynchronous programming patterns. + +

    Relation to CompletionStage

    +

    It is possible to obtain a CompletionStage from a + KafkaFuture instance by calling toCompletionStage(). + If converting whenComplete(BiConsumer) or thenApply(BaseFunction) to + CompletableFuture.whenComplete(java.util.function.BiConsumer) or + CompletableFuture.thenApply(java.util.function.Function) be aware that the returned + KafkaFuture will fail with an ExecutionException, whereas a CompletionStage fails + with a CompletionException.

    +
    +
    +
      + +
    • +
      +

      Nested Class Summary

      +
      Nested Classes
      +
      +
      Modifier and Type
      +
      Class
      +
      Description
      +
      static interface 
      + +
      +
      A function which takes objects of type A and returns objects of type B.
      +
      +
      static interface 
      + +
      +
      A consumer of two different types of object.
      +
      +
      +
      +
    • + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      allOf(KafkaFuture<?>... futures)
      +
      +
      Returns a new KafkaFuture that is completed when all the given futures have completed.
      +
      +
      abstract boolean
      +
      cancel(boolean mayInterruptIfRunning)
      +
      +
      If not already completed, completes this future with a CancellationException.
      +
      +
      static <U> KafkaFuture<U>
      +
      completedFuture(U value)
      +
      +
      Returns a new KafkaFuture that is already completed with the given value.
      +
      +
      abstract T
      +
      get()
      +
      +
      Waits if necessary for this future to complete, and then returns its result.
      +
      +
      abstract T
      +
      get(long timeout, + TimeUnit unit)
      +
      +
      Waits if necessary for at most the given time for this future to complete, and then returns + its result, if available.
      +
      +
      abstract T
      +
      getNow(T valueIfAbsent)
      +
      +
      Returns the result value (or throws any encountered exception) if completed, else returns + the given valueIfAbsent.
      +
      +
      abstract boolean
      + +
      +
      Returns true if this CompletableFuture was cancelled before it completed normally.
      +
      +
      abstract boolean
      + +
      +
      Returns true if this CompletableFuture completed exceptionally, in any way.
      +
      +
      abstract boolean
      + +
      +
      Returns true if completed in any fashion: normally, exceptionally, or via cancellation.
      +
      +
      abstract <R> KafkaFuture<R>
      + +
      +
      Returns a new KafkaFuture that, when this future completes normally, is executed with this + futures's result as the argument to the supplied function.
      +
      +
      abstract CompletionStage<T>
      + +
      +
      Gets a CompletionStage with the same completion properties as this KafkaFuture.
      +
      +
      abstract KafkaFuture<T>
      + +
      +
      Returns a new KafkaFuture with the same result or exception as this future, that executes the given action + when this future completes.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        KafkaFuture

        +
        public KafkaFuture()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        completedFuture

        +
        public static <U> KafkaFuture<U> completedFuture(U value)
        +
        Returns a new KafkaFuture that is already completed with the given value.
        +
        +
      • +
      • +
        +

        allOf

        +
        public static KafkaFuture<Void> allOf(KafkaFuture<?>... futures)
        +
        Returns a new KafkaFuture that is completed when all the given futures have completed. If + any future throws an exception, the returned future returns it. If multiple futures throw + an exception, which one gets returned is arbitrarily chosen.
        +
        +
      • +
      • +
        +

        toCompletionStage

        +
        public abstract CompletionStage<T> toCompletionStage()
        +
        Gets a CompletionStage with the same completion properties as this KafkaFuture. + The returned instance will complete when this future completes and in the same way + (with the same result or exception). + +

        Calling toCompletableFuture() on the returned instance will yield a CompletableFuture, + but invocation of the completion methods (complete() and other methods in the complete*() + and obtrude*() families) on that CompletableFuture instance will result in + UnsupportedOperationException being thrown. Unlike a "minimal" CompletableFuture, + the get*() and other methods of CompletableFuture that are not inherited from + CompletionStage will work normally. + +

        If you want to block on the completion of a KafkaFuture you should use + get(), get(long, TimeUnit) or getNow(Object), rather than calling + .toCompletionStage().toCompletableFuture().get() etc.

        +
        +
        Since:
        +
        Kafka 3.0
        +
        +
        +
      • +
      • +
        +

        thenApply

        +
        public abstract <R> KafkaFuture<R> thenApply(KafkaFuture.BaseFunction<T,R> function)
        +
        Returns a new KafkaFuture that, when this future completes normally, is executed with this + futures's result as the argument to the supplied function. + + The function may be invoked by the thread that calls thenApply or it may be invoked by the thread that + completes the future.
        +
        +
      • +
      • +
        +

        whenComplete

        +
        public abstract KafkaFuture<T> whenComplete(KafkaFuture.BiConsumer<? super T,? super Throwable> action)
        +
        Returns a new KafkaFuture with the same result or exception as this future, that executes the given action + when this future completes. + + When this future is done, the given action is invoked with the result (or null if none) and the exception + (or null if none) of this future as arguments. + + The returned future is completed when the action returns. + The supplied action should not throw an exception. However, if it does, the following rules apply: + if this future completed normally but the supplied action throws an exception, then the returned future completes + exceptionally with the supplied action's exception. + Or, if this future completed exceptionally and the supplied action throws an exception, then the returned future + completes exceptionally with this future's exception. + + The action may be invoked by the thread that calls whenComplete or it may be invoked by the thread that + completes the future.
        +
        +
        Parameters:
        +
        action - the action to perform
        +
        Returns:
        +
        the new future
        +
        +
        +
      • +
      • +
        +

        cancel

        +
        public abstract boolean cancel(boolean mayInterruptIfRunning)
        +
        If not already completed, completes this future with a CancellationException. Dependent + futures that have not already completed will also complete exceptionally, with a + CompletionException caused by this CancellationException.
        +
        +
        Specified by:
        +
        cancel in interface Future<T>
        +
        +
        +
      • +
      • +
        +

        get

        +
        public abstract T get() + throws InterruptedException, +ExecutionException
        +
        Waits if necessary for this future to complete, and then returns its result.
        +
        +
        Specified by:
        +
        get in interface Future<T>
        +
        Throws:
        +
        InterruptedException
        +
        ExecutionException
        +
        +
        +
      • +
      • +
        +

        get

        +
        public abstract T get(long timeout, + TimeUnit unit) + throws InterruptedException, +ExecutionException, +TimeoutException
        +
        Waits if necessary for at most the given time for this future to complete, and then returns + its result, if available.
        +
        +
        Specified by:
        +
        get in interface Future<T>
        +
        Throws:
        +
        InterruptedException
        +
        ExecutionException
        +
        TimeoutException
        +
        +
        +
      • +
      • +
        +

        getNow

        +
        public abstract T getNow(T valueIfAbsent) + throws InterruptedException, +ExecutionException
        +
        Returns the result value (or throws any encountered exception) if completed, else returns + the given valueIfAbsent.
        +
        +
        Throws:
        +
        InterruptedException
        +
        ExecutionException
        +
        +
        +
      • +
      • +
        +

        isCancelled

        +
        public abstract boolean isCancelled()
        +
        Returns true if this CompletableFuture was cancelled before it completed normally.
        +
        +
        Specified by:
        +
        isCancelled in interface Future<T>
        +
        +
        +
      • +
      • +
        +

        isCompletedExceptionally

        +
        public abstract boolean isCompletedExceptionally()
        +
        Returns true if this CompletableFuture completed exceptionally, in any way.
        +
        +
      • +
      • +
        +

        isDone

        +
        public abstract boolean isDone()
        +
        Returns true if completed in any fashion: normally, exceptionally, or via cancellation.
        +
        +
        Specified by:
        +
        isDone in interface Future<T>
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/MessageFormatter.html b/static/41/javadoc/org/apache/kafka/common/MessageFormatter.html new file mode 100644 index 000000000..d3378f932 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/MessageFormatter.html @@ -0,0 +1,181 @@ + + + + +MessageFormatter (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface MessageFormatter

    +
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Closeable, Configurable
    +
    +
    +
    public interface MessageFormatter +extends Configurable, Closeable
    +
    This interface allows to define Formatters that can be used to parse and format records read by a + Consumer instance for display. + The kafka-console-consumer has built-in support for MessageFormatter, via the --formatter flag. + + Kafka provides a few implementations to display records of internal topics such as __consumer_offsets, + __transaction_state and the MirrorMaker2 topics.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      default void
      + +
      +
      Closes the formatter
      +
      +
      default void
      +
      configure(Map<String,?> configs)
      +
      +
      Configures the MessageFormatter
      +
      +
      void
      +
      writeTo(ConsumerRecord<byte[],byte[]> consumerRecord, + PrintStream output)
      +
      +
      Parses and formats a record for display
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        default void configure(Map<String,?> configs)
        +
        Configures the MessageFormatter
        +
        +
        Specified by:
        +
        configure in interface Configurable
        +
        Parameters:
        +
        configs - Map to configure the formatter
        +
        +
        +
      • +
      • +
        +

        writeTo

        +
        void writeTo(ConsumerRecord<byte[],byte[]> consumerRecord, + PrintStream output)
        +
        Parses and formats a record for display
        +
        +
        Parameters:
        +
        consumerRecord - the record to format
        +
        output - the print stream used to output the record
        +
        +
        +
      • +
      • +
        +

        close

        +
        default void close()
        +
        Closes the formatter
        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/Metric.html b/static/41/javadoc/org/apache/kafka/common/Metric.html new file mode 100644 index 000000000..b5d7223d9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/Metric.html @@ -0,0 +1,144 @@ + + + + +Metric (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Metric

    +
    +
    +
    +
    All Known Implementing Classes:
    +
    KafkaMetric
    +
    +
    +
    public interface Metric
    +
    A metric tracked for monitoring purposes.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      A name for this metric
      +
      + + +
      +
      The value of the metric, which may be measurable or a non-measurable gauge
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        metricName

        +
        MetricName metricName()
        +
        A name for this metric
        +
        +
      • +
      • +
        +

        metricValue

        +
        Object metricValue()
        +
        The value of the metric, which may be measurable or a non-measurable gauge
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/MetricName.html b/static/41/javadoc/org/apache/kafka/common/MetricName.html new file mode 100644 index 000000000..9d1658942 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/MetricName.html @@ -0,0 +1,279 @@ + + + + +MetricName (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class MetricName

    +
    +
    java.lang.Object +
    org.apache.kafka.common.MetricName
    +
    +
    +
    +
    public final class MetricName +extends Object
    +
    The MetricName class encapsulates a metric's name, logical group and its related attributes. It should be constructed using metrics.MetricName(...). +

    + This class captures the following parameters: +

    +  name The name of the metric
    +  group logical group name of the metrics to which this metric belongs.
    +  description A human-readable description to include in the metric. This is optional.
    +  tags additional key/value attributes of the metric. This is optional.
    + 
    + group, tags parameters can be used to create unique metric names while reporting in JMX or any custom reporting. +

    + Ex: standard JMX MBean can be constructed like domainName:type=group,key1=val1,key2=val2 +

    + + Usage looks something like this: +

    
    + // set up metrics:
    +
    + Map<String, String> metricTags = new LinkedHashMap<String, String>();
    + metricTags.put("client-id", "producer-1");
    + metricTags.put("topic", "topic");
    +
    + MetricConfig metricConfig = new MetricConfig().tags(metricTags);
    + Metrics metrics = new Metrics(metricConfig); // this is the global repository of metrics and sensors
    +
    + Sensor sensor = metrics.sensor("message-sizes");
    +
    + MetricName metricName = metrics.metricName("message-size-avg", "producer-metrics", "average message size");
    + sensor.add(metricName, new Avg());
    +
    + metricName = metrics.metricName("message-size-max", "producer-metrics");
    + sensor.add(metricName, new Max());
    +
    + metricName = metrics.metricName("message-size-min", "producer-metrics", "message minimum size", "client-id", "my-client", "topic", "my-topic");
    + sensor.add(metricName, new Min());
    +
    + // as messages are sent we record the sizes
    + sensor.record(messageSize);
    + 
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        MetricName

        +
        public MetricName(String name, + String group, + String description, + Map<String,String> tags)
        +
        Please create MetricName by method Metrics.metricName(String, String, String, Map)
        +
        +
        Parameters:
        +
        name - The name of the metric
        +
        group - logical group name of the metrics to which this metric belongs
        +
        description - A human-readable description to include in the metric
        +
        tags - additional key/value attributes of the metric
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        name

        +
        public String name()
        +
        +
      • +
      • +
        +

        group

        +
        public String group()
        +
        +
      • +
      • +
        +

        tags

        +
        public Map<String,String> tags()
        +
        +
      • +
      • +
        +

        description

        +
        public String description()
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object obj)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/MetricNameTemplate.html b/static/41/javadoc/org/apache/kafka/common/MetricNameTemplate.html new file mode 100644 index 000000000..e85579aea --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/MetricNameTemplate.html @@ -0,0 +1,298 @@ + + + + +MetricNameTemplate (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class MetricNameTemplate

    +
    +
    java.lang.Object +
    org.apache.kafka.common.MetricNameTemplate
    +
    +
    +
    +
    public class MetricNameTemplate +extends Object
    +
    A template for a MetricName. It contains a name, group, and description, as + well as all the tags that will be used to create the mBean name. Tag values + are omitted from the template, but are filled in at runtime with their + specified values. The order of the tags is maintained, if an ordered set + is provided, so that the mBean names can be compared and sorted lexicographically.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        MetricNameTemplate

        +
        public MetricNameTemplate(String name, + String group, + String description, + Set<String> tagsNames)
        +
        Create a new template. Note that the order of the tags will be preserved if the supplied + tagsNames set has an order.
        +
        +
        Parameters:
        +
        name - the name of the metric; may not be null
        +
        group - the name of the group; may not be null
        +
        description - the description of the metric; may not be null
        +
        tagsNames - the set of metric tag names, which can/should be a set that maintains order; may not be null
        +
        +
        +
      • +
      • +
        +

        MetricNameTemplate

        +
        public MetricNameTemplate(String name, + String group, + String description, + String... tagsNames)
        +
        Create a new template. Note that the order of the tags will be preserved.
        +
        +
        Parameters:
        +
        name - the name of the metric; may not be null
        +
        group - the name of the group; may not be null
        +
        description - the description of the metric; may not be null
        +
        tagsNames - the names of the metric tags in the preferred order; none of the tag names should be null
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        name

        +
        public String name()
        +
        Get the name of the metric.
        +
        +
        Returns:
        +
        the metric name; never null
        +
        +
        +
      • +
      • +
        +

        group

        +
        public String group()
        +
        Get the name of the group.
        +
        +
        Returns:
        +
        the group name; never null
        +
        +
        +
      • +
      • +
        +

        description

        +
        public String description()
        +
        Get the description of the metric.
        +
        +
        Returns:
        +
        the metric description; never null
        +
        +
        +
      • +
      • +
        +

        tags

        +
        public Set<String> tags()
        +
        Get the set of tag names for the metric.
        +
        +
        Returns:
        +
        the ordered set of tag names; never null but possibly empty
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/Node.html b/static/41/javadoc/org/apache/kafka/common/Node.html new file mode 100644 index 000000000..78180daa8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/Node.html @@ -0,0 +1,335 @@ + + + + +Node (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Node

    +
    +
    java.lang.Object +
    org.apache.kafka.common.Node
    +
    +
    +
    +
    public class Node +extends Object
    +
    Information about a Kafka node
    +
    +
    +
      + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      +
      Node(int id, + String host, + int port)
      +
       
      +
      Node(int id, + String host, + int port, + String rack)
      +
       
      +
      Node(int id, + String host, + int port, + String rack, + boolean isFenced)
      +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      boolean
      + +
       
      +
      int
      + +
       
      +
      boolean
      + +
      +
      True if this node has a defined rack
      +
      + + +
      +
      The host name for this node
      +
      +
      int
      +
      id()
      +
      +
      The node id of this node
      +
      + + +
      +
      String representation of the node id.
      +
      +
      boolean
      + +
      +
      Check whether this node is empty, which may be the case if noNode() is used as a placeholder + in a response payload with an error.
      +
      +
      boolean
      + +
      +
      Whether if this node is fenced
      +
      +
      static Node
      + +
       
      +
      int
      + +
      +
      The port for this node
      +
      + + +
      +
      The rack for this node
      +
      + + +
       
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +getClass, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Node

        +
        public Node(int id, + String host, + int port)
        +
        +
      • +
      • +
        +

        Node

        +
        public Node(int id, + String host, + int port, + String rack)
        +
        +
      • +
      • +
        +

        Node

        +
        public Node(int id, + String host, + int port, + String rack, + boolean isFenced)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        noNode

        +
        public static Node noNode()
        +
        +
      • +
      • +
        +

        isEmpty

        +
        public boolean isEmpty()
        +
        Check whether this node is empty, which may be the case if noNode() is used as a placeholder + in a response payload with an error.
        +
        +
        Returns:
        +
        true if it is, false otherwise
        +
        +
        +
      • +
      • +
        +

        id

        +
        public int id()
        +
        The node id of this node
        +
        +
      • +
      • +
        +

        idString

        +
        public String idString()
        +
        String representation of the node id. + Typically the integer id is used to serialize over the wire, the string representation is used as an identifier with NetworkClient code
        +
        +
      • +
      • +
        +

        host

        +
        public String host()
        +
        The host name for this node
        +
        +
      • +
      • +
        +

        port

        +
        public int port()
        +
        The port for this node
        +
        +
      • +
      • +
        +

        hasRack

        +
        public boolean hasRack()
        +
        True if this node has a defined rack
        +
        +
      • +
      • +
        +

        rack

        +
        public String rack()
        +
        The rack for this node
        +
        +
      • +
      • +
        +

        isFenced

        +
        public boolean isFenced()
        +
        Whether if this node is fenced
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object obj)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/PartitionInfo.html b/static/41/javadoc/org/apache/kafka/common/PartitionInfo.html new file mode 100644 index 000000000..8d74844c0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/PartitionInfo.html @@ -0,0 +1,289 @@ + + + + +PartitionInfo (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class PartitionInfo

    +
    +
    java.lang.Object +
    org.apache.kafka.common.PartitionInfo
    +
    +
    +
    +
    public class PartitionInfo +extends Object
    +
    This is used to describe per-partition state in the MetadataResponse.
    +
    +
    +
      + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      +
      PartitionInfo(String topic, + int partition, + Node leader, + Node[] replicas, + Node[] inSyncReplicas)
      +
       
      +
      PartitionInfo(String topic, + int partition, + Node leader, + Node[] replicas, + Node[] inSyncReplicas, + Node[] offlineReplicas)
      +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      boolean
      + +
       
      +
      int
      + +
       
      + + +
      +
      The subset of the replicas that are in sync, that is caught-up to the leader and ready to take over as leader if + the leader should fail
      +
      + + +
      +
      The node id of the node currently acting as a leader for this partition or null if there is no leader
      +
      + + +
      +
      The subset of the replicas that are offline
      +
      +
      int
      + +
      +
      The partition id
      +
      + + +
      +
      The complete set of replicas for this partition regardless of whether they are alive or up-to-date
      +
      + + +
      +
      The topic name
      +
      + + +
       
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +getClass, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        PartitionInfo

        +
        public PartitionInfo(String topic, + int partition, + Node leader, + Node[] replicas, + Node[] inSyncReplicas)
        +
        +
      • +
      • +
        +

        PartitionInfo

        +
        public PartitionInfo(String topic, + int partition, + Node leader, + Node[] replicas, + Node[] inSyncReplicas, + Node[] offlineReplicas)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        topic

        +
        public String topic()
        +
        The topic name
        +
        +
      • +
      • +
        +

        partition

        +
        public int partition()
        +
        The partition id
        +
        +
      • +
      • +
        +

        leader

        +
        public Node leader()
        +
        The node id of the node currently acting as a leader for this partition or null if there is no leader
        +
        +
      • +
      • +
        +

        replicas

        +
        public Node[] replicas()
        +
        The complete set of replicas for this partition regardless of whether they are alive or up-to-date
        +
        +
      • +
      • +
        +

        inSyncReplicas

        +
        public Node[] inSyncReplicas()
        +
        The subset of the replicas that are in sync, that is caught-up to the leader and ready to take over as leader if + the leader should fail
        +
        +
      • +
      • +
        +

        offlineReplicas

        +
        public Node[] offlineReplicas()
        +
        The subset of the replicas that are offline
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object obj)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/Reconfigurable.html b/static/41/javadoc/org/apache/kafka/common/Reconfigurable.html new file mode 100644 index 000000000..7dedbfaf9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/Reconfigurable.html @@ -0,0 +1,182 @@ + + + + +Reconfigurable (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Reconfigurable

    +
    +
    +
    +
    All Superinterfaces:
    +
    Configurable
    +
    +
    +
    All Known Subinterfaces:
    +
    MetricsReporter
    +
    +
    +
    All Known Implementing Classes:
    +
    JmxReporter
    +
    +
    +
    public interface Reconfigurable +extends Configurable
    +
    Interface for reconfigurable classes that support dynamic configuration.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        reconfigurableConfigs

        +
        Set<String> reconfigurableConfigs()
        +
        Returns the names of configs that may be reconfigured.
        +
        +
      • +
      • +
        +

        validateReconfiguration

        +
        void validateReconfiguration(Map<String,?> configs) + throws ConfigException
        +
        Validates the provided configuration. The provided map contains + all configs including any reconfigurable configs that may be different + from the initial configuration. Reconfiguration will be not performed + if this method throws any exception.
        +
        +
        Throws:
        +
        ConfigException - if the provided configs are not valid. The exception + message from ConfigException will be returned to the client in + the AlterConfigs response.
        +
        +
        +
      • +
      • +
        +

        reconfigure

        +
        void reconfigure(Map<String,?> configs)
        +
        Reconfigures this instance with the given key-value pairs. The provided + map contains all configs including any reconfigurable configs that + may have changed since the object was initially configured using + Configurable.configure(Map). This method will only be invoked if + the configs have passed validation using validateReconfiguration(Map).
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/TopicCollection.TopicIdCollection.html b/static/41/javadoc/org/apache/kafka/common/TopicCollection.TopicIdCollection.html new file mode 100644 index 000000000..908fa2f06 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/TopicCollection.TopicIdCollection.html @@ -0,0 +1,155 @@ + + + + +TopicCollection.TopicIdCollection (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TopicCollection.TopicIdCollection

    +
    +
    java.lang.Object +
    org.apache.kafka.common.TopicCollection +
    org.apache.kafka.common.TopicCollection.TopicIdCollection
    +
    +
    +
    +
    +
    Enclosing class:
    +
    TopicCollection
    +
    +
    +
    public static class TopicCollection.TopicIdCollection +extends TopicCollection
    +
    A class used to represent a collection of topics defined by their topic ID. + Subclassing this class beyond the classes provided here is not supported.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        topicIds

        +
        public Collection<Uuid> topicIds()
        +
        +
        Returns:
        +
        A collection of topic IDs
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/TopicCollection.TopicNameCollection.html b/static/41/javadoc/org/apache/kafka/common/TopicCollection.TopicNameCollection.html new file mode 100644 index 000000000..94be39d16 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/TopicCollection.TopicNameCollection.html @@ -0,0 +1,155 @@ + + + + +TopicCollection.TopicNameCollection (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TopicCollection.TopicNameCollection

    +
    +
    java.lang.Object +
    org.apache.kafka.common.TopicCollection +
    org.apache.kafka.common.TopicCollection.TopicNameCollection
    +
    +
    +
    +
    +
    Enclosing class:
    +
    TopicCollection
    +
    +
    +
    public static class TopicCollection.TopicNameCollection +extends TopicCollection
    +
    A class used to represent a collection of topics defined by their topic name. + Subclassing this class beyond the classes provided here is not supported.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        topicNames

        +
        public Collection<String> topicNames()
        +
        +
        Returns:
        +
        A collection of topic names
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/TopicCollection.html b/static/41/javadoc/org/apache/kafka/common/TopicCollection.html new file mode 100644 index 000000000..9d7a2f9a5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/TopicCollection.html @@ -0,0 +1,175 @@ + + + + +TopicCollection (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TopicCollection

    +
    +
    java.lang.Object +
    org.apache.kafka.common.TopicCollection
    +
    +
    +
    +
    Direct Known Subclasses:
    +
    TopicCollection.TopicIdCollection, TopicCollection.TopicNameCollection
    +
    +
    +
    public abstract class TopicCollection +extends Object
    +
    A class used to represent a collection of topics. This collection may define topics by name or ID.
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/TopicIdPartition.html b/static/41/javadoc/org/apache/kafka/common/TopicIdPartition.html new file mode 100644 index 000000000..f77989b1b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/TopicIdPartition.html @@ -0,0 +1,273 @@ + + + + +TopicIdPartition (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TopicIdPartition

    +
    +
    java.lang.Object +
    org.apache.kafka.common.TopicIdPartition
    +
    +
    +
    +
    public class TopicIdPartition +extends Object
    +
    This represents universally unique identifier with topic id for a topic partition. This makes sure that topics + recreated with the same name will always have unique topic identifiers.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TopicIdPartition

        +
        public TopicIdPartition(Uuid topicId, + TopicPartition topicPartition)
        +
        Create an instance with the provided parameters.
        +
        +
        Parameters:
        +
        topicId - the topic id
        +
        topicPartition - the topic partition
        +
        +
        +
      • +
      • +
        +

        TopicIdPartition

        +
        public TopicIdPartition(Uuid topicId, + int partition, + String topic)
        +
        Create an instance with the provided parameters.
        +
        +
        Parameters:
        +
        topicId - the topic id
        +
        partition - the partition id
        +
        topic - the topic name or null
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        topicId

        +
        public Uuid topicId()
        +
        +
        Returns:
        +
        Universally unique id representing this topic partition.
        +
        +
        +
      • +
      • +
        +

        topic

        +
        public String topic()
        +
        +
        Returns:
        +
        the topic name or null if it is unknown.
        +
        +
        +
      • +
      • +
        +

        partition

        +
        public int partition()
        +
        +
        Returns:
        +
        the partition id.
        +
        +
        +
      • +
      • +
        +

        topicPartition

        +
        public TopicPartition topicPartition()
        +
        +
        Returns:
        +
        Topic partition representing this instance.
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/TopicPartition.html b/static/41/javadoc/org/apache/kafka/common/TopicPartition.html new file mode 100644 index 000000000..e52ee89e7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/TopicPartition.html @@ -0,0 +1,222 @@ + + + + +TopicPartition (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TopicPartition

    +
    +
    java.lang.Object +
    org.apache.kafka.common.TopicPartition
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public final class TopicPartition +extends Object +implements Serializable
    +
    A topic name and partition number
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TopicPartition

        +
        public TopicPartition(String topic, + int partition)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        partition

        +
        public int partition()
        +
        +
      • +
      • +
        +

        topic

        +
        public String topic()
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object obj)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/TopicPartitionInfo.html b/static/41/javadoc/org/apache/kafka/common/TopicPartitionInfo.html new file mode 100644 index 000000000..8ecd0f427 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/TopicPartitionInfo.html @@ -0,0 +1,301 @@ + + + + +TopicPartitionInfo (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TopicPartitionInfo

    +
    +
    java.lang.Object +
    org.apache.kafka.common.TopicPartitionInfo
    +
    +
    +
    +
    public class TopicPartitionInfo +extends Object
    +
    A class containing leadership, replicas and ISR information for a topic partition.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TopicPartitionInfo

        +
        public TopicPartitionInfo(int partition, + Node leader, + List<Node> replicas, + List<Node> isr, + List<Node> elr, + List<Node> lastKnownElr)
        +
        Create an instance of this class with the provided parameters.
        +
        +
        Parameters:
        +
        partition - the partition id
        +
        leader - the leader of the partition or Node.noNode() if there is none.
        +
        replicas - the replicas of the partition in the same order as the replica assignment (the preferred replica + is the head of the list)
        +
        isr - the in-sync replicas
        +
        elr - the eligible leader replicas
        +
        lastKnownElr - the last known eligible leader replicas.
        +
        +
        +
      • +
      • +
        +

        TopicPartitionInfo

        +
        public TopicPartitionInfo(int partition, + Node leader, + List<Node> replicas, + List<Node> isr)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        partition

        +
        public int partition()
        +
        Return the partition id.
        +
        +
      • +
      • +
        +

        leader

        +
        public Node leader()
        +
        Return the leader of the partition or null if there is none.
        +
        +
      • +
      • +
        +

        replicas

        +
        public List<Node> replicas()
        +
        Return the replicas of the partition in the same order as the replica assignment. The preferred replica is the + head of the list. + + Brokers with version lower than 0.11.0.0 return the replicas in unspecified order due to a bug.
        +
        +
      • +
      • +
        +

        isr

        +
        public List<Node> isr()
        +
        Return the in-sync replicas of the partition. Note that the ordering of the result is unspecified.
        +
        +
      • +
      • +
        +

        elr

        +
        public List<Node> elr()
        +
        Return the eligible leader replicas of the partition. Note that the ordering of the result is unspecified.
        +
        +
      • +
      • +
        +

        lastKnownElr

        +
        public List<Node> lastKnownElr()
        +
        Return the last known eligible leader replicas of the partition. Note that the ordering of the result is unspecified.
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/TopicPartitionReplica.html b/static/41/javadoc/org/apache/kafka/common/TopicPartitionReplica.html new file mode 100644 index 000000000..b4bde2b77 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/TopicPartitionReplica.html @@ -0,0 +1,233 @@ + + + + +TopicPartitionReplica (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TopicPartitionReplica

    +
    +
    java.lang.Object +
    org.apache.kafka.common.TopicPartitionReplica
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public final class TopicPartitionReplica +extends Object +implements Serializable
    +
    The topic name, partition number and the brokerId of the replica
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TopicPartitionReplica

        +
        public TopicPartitionReplica(String topic, + int partition, + int brokerId)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        topic

        +
        public String topic()
        +
        +
      • +
      • +
        +

        partition

        +
        public int partition()
        +
        +
      • +
      • +
        +

        brokerId

        +
        public int brokerId()
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object obj)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/Uuid.html b/static/41/javadoc/org/apache/kafka/common/Uuid.html new file mode 100644 index 000000000..a8bfd6f9b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/Uuid.html @@ -0,0 +1,380 @@ + + + + +Uuid (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Uuid

    +
    +
    java.lang.Object +
    org.apache.kafka.common.Uuid
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Comparable<Uuid>
    +
    +
    +
    public class Uuid +extends Object +implements Comparable<Uuid>
    +
    This class defines an immutable universally unique identifier (UUID). It represents a 128-bit value. + More specifically, the random UUIDs generated by this class are variant 2 (Leach-Salz) version 4 UUIDs. + This is the same type of UUID as the ones generated by java.util.UUID. The toString() method prints + using the base64 string encoding. Likewise, the fromString method expects a base64 string encoding.
    +
    +
    +
      + +
    • +
      +

      Field Summary

      +
      Fields
      +
      +
      Modifier and Type
      +
      Field
      +
      Description
      +
      static final Uuid
      + +
      +
      A UUID for the metadata topic in KRaft mode.
      +
      +
      static final Uuid
      + +
      +
      A reserved UUID.
      +
      +
      static final Set<Uuid>
      + +
      +
      The set of reserved UUIDs that will never be returned by the randomUuid method.
      +
      +
      static final Uuid
      + +
      +
      A UUID that represents a null or empty UUID.
      +
      +
      +
      +
    • + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      +
      Uuid(long mostSigBits, + long leastSigBits)
      +
      +
      Constructs a 128-bit type 4 UUID where the first long represents the most significant 64 bits + and the second long represents the least significant 64 bits.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      int
      +
      compareTo(Uuid other)
      +
       
      +
      boolean
      + +
      +
      Returns true iff obj is another Uuid represented by the same two long values.
      +
      +
      static Uuid
      + +
      +
      Creates a UUID based on a base64 string encoding used in the toString() method.
      +
      +
      long
      + +
      +
      Returns the least significant bits of the UUID's 128 value.
      +
      +
      long
      + +
      +
      Returns the most significant bits of the UUID's 128 value.
      +
      +
      int
      + +
      +
      Returns a hash code for this UUID
      +
      +
      static Uuid
      + +
      +
      Static factory to retrieve a type 4 (pseudo randomly generated) UUID.
      +
      +
      static Uuid[]
      +
      toArray(List<Uuid> list)
      +
      +
      Convert a list of Uuid to an array of Uuid.
      +
      +
      static List<Uuid>
      +
      toList(Uuid[] array)
      +
      +
      Convert an array of Uuids to a list of Uuid.
      +
      + + +
      +
      Returns a base64 string encoding of the UUID.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +getClass, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        ONE_UUID

        +
        public static final Uuid ONE_UUID
        +
        A reserved UUID. Will never be returned by the randomUuid method.
        +
        +
      • +
      • +
        +

        METADATA_TOPIC_ID

        +
        public static final Uuid METADATA_TOPIC_ID
        +
        A UUID for the metadata topic in KRaft mode. Will never be returned by the randomUuid method.
        +
        +
      • +
      • +
        +

        ZERO_UUID

        +
        public static final Uuid ZERO_UUID
        +
        A UUID that represents a null or empty UUID. Will never be returned by the randomUuid method.
        +
        +
      • +
      • +
        +

        RESERVED

        +
        public static final Set<Uuid> RESERVED
        +
        The set of reserved UUIDs that will never be returned by the randomUuid method.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Uuid

        +
        public Uuid(long mostSigBits, + long leastSigBits)
        +
        Constructs a 128-bit type 4 UUID where the first long represents the most significant 64 bits + and the second long represents the least significant 64 bits.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        randomUuid

        +
        public static Uuid randomUuid()
        +
        Static factory to retrieve a type 4 (pseudo randomly generated) UUID. + + This will not generate a UUID equal to 0, 1, or one whose string representation starts with a dash ("-")
        +
        +
      • +
      • +
        +

        getMostSignificantBits

        +
        public long getMostSignificantBits()
        +
        Returns the most significant bits of the UUID's 128 value.
        +
        +
      • +
      • +
        +

        getLeastSignificantBits

        +
        public long getLeastSignificantBits()
        +
        Returns the least significant bits of the UUID's 128 value.
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object obj)
        +
        Returns true iff obj is another Uuid represented by the same two long values.
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        Returns a hash code for this UUID
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        Returns a base64 string encoding of the UUID.
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        fromString

        +
        public static Uuid fromString(String str)
        +
        Creates a UUID based on a base64 string encoding used in the toString() method.
        +
        +
      • +
      • +
        +

        compareTo

        +
        public int compareTo(Uuid other)
        +
        +
        Specified by:
        +
        compareTo in interface Comparable<Uuid>
        +
        +
        +
      • +
      • +
        +

        toArray

        +
        public static Uuid[] toArray(List<Uuid> list)
        +
        Convert a list of Uuid to an array of Uuid.
        +
        +
        Parameters:
        +
        list - The input list
        +
        Returns:
        +
        The output array
        +
        +
        +
      • +
      • +
        +

        toList

        +
        public static List<Uuid> toList(Uuid[] array)
        +
        Convert an array of Uuids to a list of Uuid.
        +
        +
        Parameters:
        +
        array - The input array
        +
        Returns:
        +
        The output list
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/acl/AccessControlEntry.html b/static/41/javadoc/org/apache/kafka/common/acl/AccessControlEntry.html new file mode 100644 index 000000000..5ae932a44 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/acl/AccessControlEntry.html @@ -0,0 +1,277 @@ + + + + +AccessControlEntry (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class AccessControlEntry

    +
    +
    java.lang.Object +
    org.apache.kafka.common.acl.AccessControlEntry
    +
    +
    +
    +
    public class AccessControlEntry +extends Object
    +
    Represents an access control entry. ACEs are a tuple of principal, host, operation, and permissionType.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        AccessControlEntry

        +
        public AccessControlEntry(String principal, + String host, + AclOperation operation, + AclPermissionType permissionType)
        +
        Create an instance of an access control entry with the provided parameters.
        +
        +
        Parameters:
        +
        principal - non-null principal
        +
        host - non-null host
        +
        operation - non-null operation, ANY is not an allowed operation
        +
        permissionType - non-null permission type, ANY is not an allowed type
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        principal

        +
        public String principal()
        +
        Return the principal for this entry.
        +
        +
      • +
      • +
        +

        host

        +
        public String host()
        +
        Return the host or `*` for all hosts.
        +
        +
      • +
      • +
        +

        operation

        +
        public AclOperation operation()
        +
        Return the AclOperation. This method will never return AclOperation.ANY.
        +
        +
      • +
      • +
        +

        permissionType

        +
        public AclPermissionType permissionType()
        +
        Return the AclPermissionType. This method will never return AclPermissionType.ANY.
        +
        +
      • +
      • +
        +

        toFilter

        +
        public AccessControlEntryFilter toFilter()
        +
        Create a filter which matches only this AccessControlEntry.
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        isUnknown

        +
        public boolean isUnknown()
        +
        Return true if this AclResource has any UNKNOWN components.
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/acl/AccessControlEntryFilter.html b/static/41/javadoc/org/apache/kafka/common/acl/AccessControlEntryFilter.html new file mode 100644 index 000000000..3124127da --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/acl/AccessControlEntryFilter.html @@ -0,0 +1,337 @@ + + + + +AccessControlEntryFilter (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class AccessControlEntryFilter

    +
    +
    java.lang.Object +
    org.apache.kafka.common.acl.AccessControlEntryFilter
    +
    +
    +
    +
    public class AccessControlEntryFilter +extends Object
    +
    Represents a filter which matches access control entries.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        AccessControlEntryFilter

        +
        public AccessControlEntryFilter(String principal, + String host, + AclOperation operation, + AclPermissionType permissionType)
        +
        Create an instance of an access control entry filter with the provided parameters.
        +
        +
        Parameters:
        +
        principal - the principal or null
        +
        host - the host or null
        +
        operation - non-null operation
        +
        permissionType - non-null permission type
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        principal

        +
        public String principal()
        +
        Return the principal or null.
        +
        +
      • +
      • +
        +

        host

        +
        public String host()
        +
        Return the host or null. The value `*` means any host.
        +
        +
      • +
      • +
        +

        operation

        +
        public AclOperation operation()
        +
        Return the AclOperation.
        +
        +
      • +
      • +
        +

        permissionType

        +
        public AclPermissionType permissionType()
        +
        Return the AclPermissionType.
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        isUnknown

        +
        public boolean isUnknown()
        +
        Return true if there are any UNKNOWN components.
        +
        +
      • +
      • +
        +

        matches

        +
        public boolean matches(AccessControlEntry other)
        +
        Returns true if this filter matches the given AccessControlEntry.
        +
        +
      • +
      • +
        +

        matchesAtMostOne

        +
        public boolean matchesAtMostOne()
        +
        Returns true if this filter could only match one ACE -- in other words, if + there are no ANY or UNKNOWN fields.
        +
        +
      • +
      • +
        +

        findIndefiniteField

        +
        public String findIndefiniteField()
        +
        Returns a string describing an ANY or UNKNOWN field, or null if there is + no such field.
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/acl/AclBinding.html b/static/41/javadoc/org/apache/kafka/common/acl/AclBinding.html new file mode 100644 index 000000000..399ae4602 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/acl/AclBinding.html @@ -0,0 +1,250 @@ + + + + +AclBinding (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class AclBinding

    +
    +
    java.lang.Object +
    org.apache.kafka.common.acl.AclBinding
    +
    +
    +
    +
    public class AclBinding +extends Object
    +
    Represents a binding between a resource pattern and an access control entry.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        AclBinding

        +
        public AclBinding(ResourcePattern pattern, + AccessControlEntry entry)
        +
        Create an instance of this class with the provided parameters.
        +
        +
        Parameters:
        +
        pattern - non-null resource pattern.
        +
        entry - non-null entry
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        isUnknown

        +
        public boolean isUnknown()
        +
        +
        Returns:
        +
        true if this binding has any UNKNOWN components.
        +
        +
        +
      • +
      • +
        +

        pattern

        +
        public ResourcePattern pattern()
        +
        +
        Returns:
        +
        the resource pattern for this binding.
        +
        +
        +
      • +
      • +
        +

        entry

        +
        public final AccessControlEntry entry()
        +
        +
        Returns:
        +
        the access control entry for this binding.
        +
        +
        +
      • +
      • +
        +

        toFilter

        +
        public AclBindingFilter toFilter()
        +
        Create a filter which matches only this AclBinding.
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/acl/AclBindingFilter.html b/static/41/javadoc/org/apache/kafka/common/acl/AclBindingFilter.html new file mode 100644 index 000000000..2ece9031b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/acl/AclBindingFilter.html @@ -0,0 +1,307 @@ + + + + +AclBindingFilter (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class AclBindingFilter

    +
    +
    java.lang.Object +
    org.apache.kafka.common.acl.AclBindingFilter
    +
    +
    +
    +
    public class AclBindingFilter +extends Object
    +
    A filter which can match AclBinding objects.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        ANY

        +
        public static final AclBindingFilter ANY
        +
        A filter which matches any ACL binding.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        AclBindingFilter

        +
        public AclBindingFilter(ResourcePatternFilter patternFilter, + AccessControlEntryFilter entryFilter)
        +
        Create an instance of this filter with the provided parameters.
        +
        +
        Parameters:
        +
        patternFilter - non-null pattern filter
        +
        entryFilter - non-null access control entry filter
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        isUnknown

        +
        public boolean isUnknown()
        +
        +
        Returns:
        +
        true if this filter has any UNKNOWN components.
        +
        +
        +
      • +
      • +
        +

        patternFilter

        +
        public ResourcePatternFilter patternFilter()
        +
        +
        Returns:
        +
        the resource pattern filter.
        +
        +
        +
      • +
      • +
        +

        entryFilter

        +
        public final AccessControlEntryFilter entryFilter()
        +
        +
        Returns:
        +
        the access control entry filter.
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        matchesAtMostOne

        +
        public boolean matchesAtMostOne()
        +
        Return true if the resource and entry filters can only match one ACE. In other words, if + there are no ANY or UNKNOWN fields.
        +
        +
      • +
      • +
        +

        findIndefiniteField

        +
        public String findIndefiniteField()
        +
        Return a string describing an ANY or UNKNOWN field, or null if there is no such field.
        +
        +
      • +
      • +
        +

        matches

        +
        public boolean matches(AclBinding binding)
        +
        Return true if the resource filter matches the binding's resource and the entry filter matches binding's entry.
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/acl/AclOperation.html b/static/41/javadoc/org/apache/kafka/common/acl/AclOperation.html new file mode 100644 index 000000000..019eb72b2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/acl/AclOperation.html @@ -0,0 +1,451 @@ + + + + +AclOperation (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class AclOperation

    +
    +
    java.lang.Object +
    java.lang.Enum<AclOperation> +
    org.apache.kafka.common.acl.AclOperation
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<AclOperation>, Constable
    +
    +
    +
    public enum AclOperation +extends Enum<AclOperation>
    +
    Represents an operation which an ACL grants or denies permission to perform. + + Some operations imply other operations: +
      +
    • ALLOW ALL implies ALLOW everything +
    • DENY ALL implies DENY everything + +
    • ALLOW READ implies ALLOW DESCRIBE +
    • ALLOW WRITE implies ALLOW DESCRIBE +
    • ALLOW DELETE implies ALLOW DESCRIBE + +
    • ALLOW ALTER implies ALLOW DESCRIBE + +
    • ALLOW ALTER_CONFIGS implies ALLOW DESCRIBE_CONFIGS +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      +
        +
      • +
        +

        UNKNOWN

        +
        public static final AclOperation UNKNOWN
        +
        Represents any AclOperation which this client cannot understand, perhaps because this + client is too old.
        +
        +
      • +
      • +
        +

        ANY

        +
        public static final AclOperation ANY
        +
        In a filter, matches any AclOperation.
        +
        +
      • +
      • +
        +

        ALL

        +
        public static final AclOperation ALL
        +
        ALL operation.
        +
        +
      • +
      • +
        +

        READ

        +
        public static final AclOperation READ
        +
        READ operation.
        +
        +
      • +
      • +
        +

        WRITE

        +
        public static final AclOperation WRITE
        +
        WRITE operation.
        +
        +
      • +
      • +
        +

        CREATE

        +
        public static final AclOperation CREATE
        +
        CREATE operation.
        +
        +
      • +
      • +
        +

        DELETE

        +
        public static final AclOperation DELETE
        +
        DELETE operation.
        +
        +
      • +
      • +
        +

        ALTER

        +
        public static final AclOperation ALTER
        +
        ALTER operation.
        +
        +
      • +
      • +
        +

        DESCRIBE

        +
        public static final AclOperation DESCRIBE
        +
        DESCRIBE operation.
        +
        +
      • +
      • +
        +

        CLUSTER_ACTION

        +
        public static final AclOperation CLUSTER_ACTION
        +
        CLUSTER_ACTION operation.
        +
        +
      • +
      • +
        +

        DESCRIBE_CONFIGS

        +
        public static final AclOperation DESCRIBE_CONFIGS
        +
        DESCRIBE_CONFIGS operation.
        +
        +
      • +
      • +
        +

        ALTER_CONFIGS

        +
        public static final AclOperation ALTER_CONFIGS
        +
        ALTER_CONFIGS operation.
        +
        +
      • +
      • +
        +

        IDEMPOTENT_WRITE

        +
        public static final AclOperation IDEMPOTENT_WRITE
        +
        IDEMPOTENT_WRITE operation.
        +
        +
      • +
      • +
        +

        CREATE_TOKENS

        +
        public static final AclOperation CREATE_TOKENS
        +
        CREATE_TOKENS operation.
        +
        +
      • +
      • +
        +

        DESCRIBE_TOKENS

        +
        public static final AclOperation DESCRIBE_TOKENS
        +
        DESCRIBE_TOKENS operation.
        +
        +
      • +
      • +
        +

        TWO_PHASE_COMMIT

        +
        public static final AclOperation TWO_PHASE_COMMIT
        +
        TWO_PHASE_COMMIT operation.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static AclOperation[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static AclOperation valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        fromString

        +
        public static AclOperation fromString(String str) + throws IllegalArgumentException
        +
        Parse the given string as an ACL operation.
        +
        +
        Parameters:
        +
        str - The string to parse.
        +
        Returns:
        +
        The AclOperation, or UNKNOWN if the string could not be matched.
        +
        Throws:
        +
        IllegalArgumentException
        +
        +
        +
      • +
      • +
        +

        fromCode

        +
        public static AclOperation fromCode(byte code)
        +
        Return the AclOperation with the provided code or `AclOperation.UNKNOWN` if one cannot be found.
        +
        +
      • +
      • +
        +

        code

        +
        public byte code()
        +
        Return the code of this operation.
        +
        +
      • +
      • +
        +

        isUnknown

        +
        public boolean isUnknown()
        +
        Return true if this operation is UNKNOWN.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/acl/AclPermissionType.html b/static/41/javadoc/org/apache/kafka/common/acl/AclPermissionType.html new file mode 100644 index 000000000..bd21d63ab --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/acl/AclPermissionType.html @@ -0,0 +1,302 @@ + + + + +AclPermissionType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class AclPermissionType

    +
    +
    java.lang.Object +
    java.lang.Enum<AclPermissionType> +
    org.apache.kafka.common.acl.AclPermissionType
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<AclPermissionType>, Constable
    +
    +
    +
    public enum AclPermissionType +extends Enum<AclPermissionType>
    +
    Represents whether an ACL grants or denies permissions.
    +
    +
    +
      + +
    • +
      +

      Nested Class Summary

      +
      +

      Nested classes/interfaces inherited from class java.lang.Enum

      +Enum.EnumDesc<E extends Enum<E>>
      +
      +
    • + +
    • +
      +

      Enum Constant Summary

      +
      Enum Constants
      +
      +
      Enum Constant
      +
      Description
      + +
      +
      Grants access.
      +
      + +
      +
      In a filter, matches any AclPermissionType.
      +
      + +
      +
      Disallows access.
      +
      + +
      +
      Represents any AclPermissionType which this client cannot understand, + perhaps because this client is too old.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      byte
      + +
      +
      Return the code of this permission type.
      +
      + +
      fromCode(byte code)
      +
      +
      Return the AclPermissionType with the provided code or `AclPermissionType.UNKNOWN` if one cannot be found.
      +
      + + +
      +
      Parse the given string as an ACL permission.
      +
      +
      boolean
      + +
      +
      Return true if this permission type is UNKNOWN.
      +
      + + +
      +
      Returns the enum constant of this class with the specified name.
      +
      + + +
      +
      Returns an array containing the constants of this enum class, in +the order they are declared.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Enum

      +compareTo, describeConstable, equals, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
      +
      +

      Methods inherited from class java.lang.Object

      +getClass, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      +
        +
      • +
        +

        UNKNOWN

        +
        public static final AclPermissionType UNKNOWN
        +
        Represents any AclPermissionType which this client cannot understand, + perhaps because this client is too old.
        +
        +
      • +
      • +
        +

        ANY

        +
        public static final AclPermissionType ANY
        +
        In a filter, matches any AclPermissionType.
        +
        +
      • +
      • +
        +

        DENY

        +
        public static final AclPermissionType DENY
        +
        Disallows access.
        +
        +
      • +
      • +
        +

        ALLOW

        +
        public static final AclPermissionType ALLOW
        +
        Grants access.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static AclPermissionType[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static AclPermissionType valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        fromString

        +
        public static AclPermissionType fromString(String str)
        +
        Parse the given string as an ACL permission.
        +
        +
        Parameters:
        +
        str - The string to parse.
        +
        Returns:
        +
        The AclPermissionType, or UNKNOWN if the string could not be matched.
        +
        +
        +
      • +
      • +
        +

        fromCode

        +
        public static AclPermissionType fromCode(byte code)
        +
        Return the AclPermissionType with the provided code or `AclPermissionType.UNKNOWN` if one cannot be found.
        +
        +
      • +
      • +
        +

        code

        +
        public byte code()
        +
        Return the code of this permission type.
        +
        +
      • +
      • +
        +

        isUnknown

        +
        public boolean isUnknown()
        +
        Return true if this permission type is UNKNOWN.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/acl/package-summary.html b/static/41/javadoc/org/apache/kafka/common/acl/package-summary.html new file mode 100644 index 000000000..8d8ce7c04 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/acl/package-summary.html @@ -0,0 +1,126 @@ + + + + +org.apache.kafka.common.acl (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.common.acl

    +
    +
    +
    package org.apache.kafka.common.acl
    +
    +
    Provides classes representing Access Control Lists for authorization of clients
    +
    +
    +
      +
    • + +
    • +
    • +
      +
      +
      +
      +
      Class
      +
      Description
      + +
      +
      Represents an access control entry.
      +
      + +
      +
      Represents a filter which matches access control entries.
      +
      + +
      +
      Represents a binding between a resource pattern and an access control entry.
      +
      + +
      +
      A filter which can match AclBinding objects.
      +
      + +
      +
      Represents an operation which an ACL grants or denies permission to perform.
      +
      + +
      +
      Represents whether an ACL grants or denies permissions.
      +
      +
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/acl/package-tree.html b/static/41/javadoc/org/apache/kafka/common/acl/package-tree.html new file mode 100644 index 000000000..a89e6e392 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/acl/package-tree.html @@ -0,0 +1,89 @@ + + + + +org.apache.kafka.common.acl Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.common.acl

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/annotation/InterfaceStability.Evolving.html b/static/41/javadoc/org/apache/kafka/common/annotation/InterfaceStability.Evolving.html new file mode 100644 index 000000000..a593b1b96 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/annotation/InterfaceStability.Evolving.html @@ -0,0 +1,84 @@ + + + + +InterfaceStability.Evolving (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Annotation Interface InterfaceStability.Evolving

    +
    +
    +
    +
    Enclosing class:
    +
    InterfaceStability
    +
    +
    +
    @Documented +@Retention(RUNTIME) +public static @interface InterfaceStability.Evolving
    +
    Compatibility may be broken at minor release (i.e. m.x).
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/annotation/InterfaceStability.Stable.html b/static/41/javadoc/org/apache/kafka/common/annotation/InterfaceStability.Stable.html new file mode 100644 index 000000000..51b9f98a9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/annotation/InterfaceStability.Stable.html @@ -0,0 +1,89 @@ + + + + +InterfaceStability.Stable (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Annotation Interface InterfaceStability.Stable

    +
    +
    +
    +
    Enclosing class:
    +
    InterfaceStability
    +
    +
    +
    @Documented +@Retention(RUNTIME) +public static @interface InterfaceStability.Stable
    +
    Compatibility is maintained in major, minor and patch releases with one exception: compatibility may be broken + in a major release (i.e. 0.m) for APIs that have been deprecated for at least one major/minor release cycle. + In cases where the impact of breaking compatibility is significant, there is also a minimum deprecation period + of one year. + + This is the default stability level for public APIs that are not annotated.
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/annotation/InterfaceStability.Unstable.html b/static/41/javadoc/org/apache/kafka/common/annotation/InterfaceStability.Unstable.html new file mode 100644 index 000000000..0d3c0d85f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/annotation/InterfaceStability.Unstable.html @@ -0,0 +1,84 @@ + + + + +InterfaceStability.Unstable (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Annotation Interface InterfaceStability.Unstable

    +
    +
    +
    +
    Enclosing class:
    +
    InterfaceStability
    +
    +
    +
    @Documented +@Retention(RUNTIME) +public static @interface InterfaceStability.Unstable
    +
    No guarantee is provided as to reliability or stability across any level of release granularity.
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/annotation/InterfaceStability.html b/static/41/javadoc/org/apache/kafka/common/annotation/InterfaceStability.html new file mode 100644 index 000000000..c3cf7334e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/annotation/InterfaceStability.html @@ -0,0 +1,157 @@ + + + + +InterfaceStability (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InterfaceStability

    +
    +
    java.lang.Object +
    org.apache.kafka.common.annotation.InterfaceStability
    +
    +
    +
    +
    public class InterfaceStability +extends Object
    +
    Annotation to inform users of how much to rely on a particular package, class or method not changing over time. + Currently the stability can be InterfaceStability.Stable, InterfaceStability.Evolving or InterfaceStability.Unstable.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InterfaceStability

        +
        public InterfaceStability()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/annotation/package-summary.html b/static/41/javadoc/org/apache/kafka/common/annotation/package-summary.html new file mode 100644 index 000000000..1bfcd48b8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/annotation/package-summary.html @@ -0,0 +1,119 @@ + + + + +org.apache.kafka.common.annotation (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.common.annotation

    +
    +
    +
    package org.apache.kafka.common.annotation
    +
    +
    Provides annotations used on Kafka APIs.
    +
    +
    +
      +
    • + +
    • +
    • +
      +
      +
      +
      +
      Class
      +
      Description
      + +
      +
      Annotation to inform users of how much to rely on a particular package, class or method not changing over time.
      +
      + +
      +
      Compatibility may be broken at minor release (i.e.
      +
      + +
      +
      Compatibility is maintained in major, minor and patch releases with one exception: compatibility may be broken + in a major release (i.e.
      +
      + +
      +
      No guarantee is provided as to reliability or stability across any level of release granularity.
      +
      +
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/annotation/package-tree.html b/static/41/javadoc/org/apache/kafka/common/annotation/package-tree.html new file mode 100644 index 000000000..7222b3fa8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/annotation/package-tree.html @@ -0,0 +1,79 @@ + + + + +org.apache.kafka.common.annotation Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.common.annotation

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Annotation Interface Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/AbstractConfig.html b/static/41/javadoc/org/apache/kafka/common/config/AbstractConfig.html new file mode 100644 index 000000000..e61254b6b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/AbstractConfig.html @@ -0,0 +1,718 @@ + + + + +AbstractConfig (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class AbstractConfig

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.AbstractConfig
    +
    +
    +
    +
    Direct Known Subclasses:
    +
    AdminClientConfig, ConsumerConfig, ConverterConfig, MirrorClientConfig, ProducerConfig, StreamsConfig, TopologyConfig
    +
    +
    +
    public class AbstractConfig +extends Object
    +
    A convenient base class for configurations to extend. +

    + This class holds both the original configuration that was provided as well as the parsed

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        AUTOMATIC_CONFIG_PROVIDERS_PROPERTY

        +
        public static final String AUTOMATIC_CONFIG_PROVIDERS_PROPERTY
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        CONFIG_PROVIDERS_CONFIG

        +
        public static final String CONFIG_PROVIDERS_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        AbstractConfig

        +
        public AbstractConfig(ConfigDef definition, + Map<?,?> originals, + Map<String,?> configProviderProps, + boolean doLog)
        +
        Construct a configuration with a ConfigDef and the configuration properties, which can include properties + for zero or more ConfigProvider that will be used to resolve variables in configuration property + values. +

        + The originals is a name-value pair configuration properties and optional config provider configs. The + value of the configuration can be a variable as defined below or the actual value. This constructor will + first instantiate the ConfigProviders using the config provider configs, then it will find all the + variables in the values of the originals configurations, attempt to resolve the variables using the named + ConfigProviders, and then parse and validate the configurations. +

        + ConfigProvider configs can be passed either as configs in the originals map or in the separate + configProviderProps map. If config providers properties are passed in the configProviderProps any config + provider properties in originals map will be ignored. If ConfigProvider properties are not provided, the + constructor will skip the variable substitution step and will simply validate and parse the supplied + configuration. +

        + The "config.providers" configuration property and all configuration properties that begin with the + "config.providers." prefix are reserved. The "config.providers" configuration property + specifies the names of the config providers, and properties that begin with the "config.providers.." + prefix correspond to the properties for that named provider. For example, the "config.providers..class" + property specifies the name of the ConfigProvider implementation class that should be used for + the provider. +

        + The keys for ConfigProvider configs in both originals and configProviderProps will start with the above + mentioned "config.providers." prefix. +

        + Variables have the form "${providerName:[path:]key}", where "providerName" is the name of a ConfigProvider, + "path" is an optional string, and "key" is a required string. This variable is resolved by passing the "key" + and optional "path" to a ConfigProvider with the specified name, and the result from the ConfigProvider is + then used in place of the variable. Variables that cannot be resolved by the AbstractConfig constructor will + be left unchanged in the configuration.

        +
        +
        Parameters:
        +
        definition - the definition of the configurations; may not be null
        +
        originals - the configuration properties plus any optional config provider properties;
        +
        configProviderProps - the map of properties of config providers which will be instantiated by + the constructor to resolve any variables in originals; may be null or empty
        +
        doLog - whether the configurations should be logged
        +
        +
        +
      • +
      • +
        +

        AbstractConfig

        +
        public AbstractConfig(ConfigDef definition, + Map<?,?> originals)
        +
        Construct a configuration with a ConfigDef and the configuration properties, + which can include properties for zero or more ConfigProvider + that will be used to resolve variables in configuration property values.
        +
        +
        Parameters:
        +
        definition - the definition of the configurations; may not be null
        +
        originals - the configuration properties plus any optional config provider properties; may not be null
        +
        +
        +
      • +
      • +
        +

        AbstractConfig

        +
        public AbstractConfig(ConfigDef definition, + Map<?,?> originals, + boolean doLog)
        +
        Construct a configuration with a ConfigDef and the configuration properties, + which can include properties for zero or more ConfigProvider + that will be used to resolve variables in configuration property values.
        +
        +
        Parameters:
        +
        definition - the definition of the configurations; may not be null
        +
        originals - the configuration properties plus any optional config provider properties; may not be null
        +
        doLog - whether the configurations should be logged
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        ignore

        +
        public void ignore(String key)
        +
        +
      • +
      • +
        +

        getShort

        +
        public Short getShort(String key)
        +
        +
      • +
      • +
        +

        getInt

        +
        public Integer getInt(String key)
        +
        +
      • +
      • +
        +

        getLong

        +
        public Long getLong(String key)
        +
        +
      • +
      • +
        +

        getDouble

        +
        public Double getDouble(String key)
        +
        +
      • +
      • +
        +

        getList

        +
        public List<String> getList(String key)
        +
        +
      • +
      • +
        +

        getBoolean

        +
        public Boolean getBoolean(String key)
        +
        +
      • +
      • +
        +

        getString

        +
        public String getString(String key)
        +
        +
      • +
      • +
        +

        typeOf

        +
        public ConfigDef.Type typeOf(String key)
        +
        +
      • +
      • +
        +

        documentationOf

        +
        public String documentationOf(String key)
        +
        +
      • +
      • +
        +

        getPassword

        +
        public org.apache.kafka.common.config.types.Password getPassword(String key)
        +
        +
      • +
      • +
        +

        getClass

        +
        public Class<?> getClass(String key)
        +
        +
      • +
      • +
        +

        unused

        +
        public Set<String> unused()
        +
        +
      • +
      • +
        +

        originals

        +
        public Map<String,Object> originals()
        +
        +
      • +
      • +
        +

        originals

        +
        public Map<String,Object> originals(Map<String,Object> configOverrides)
        +
        +
      • +
      • +
        +

        originalsStrings

        +
        public Map<String,String> originalsStrings()
        +
        Get all the original settings, ensuring that all values are of type String.
        +
        +
        Returns:
        +
        the original settings
        +
        Throws:
        +
        ClassCastException - if any of the values are not strings
        +
        +
        +
      • +
      • +
        +

        originalsWithPrefix

        +
        public Map<String,Object> originalsWithPrefix(String prefix)
        +
        Gets all original settings with the given prefix, stripping the prefix before adding it to the output.
        +
        +
        Parameters:
        +
        prefix - the prefix to use as a filter
        +
        Returns:
        +
        a Map containing the settings with the prefix
        +
        +
        +
      • +
      • +
        +

        originalsWithPrefix

        +
        public Map<String,Object> originalsWithPrefix(String prefix, + boolean strip)
        +
        Gets all original settings with the given prefix.
        +
        +
        Parameters:
        +
        prefix - the prefix to use as a filter
        +
        strip - strip the prefix before adding to the output if set true
        +
        Returns:
        +
        a Map containing the settings with the prefix
        +
        +
        +
      • +
      • +
        +

        valuesWithPrefixOverride

        +
        public Map<String,Object> valuesWithPrefixOverride(String prefix)
        +
        Put all keys that do not start with prefix and their parsed values in the result map and then + put all the remaining keys with the prefix stripped and their parsed values in the result map. +

        + This is useful if one wants to allow prefixed configs to override default ones. +

        + Two forms of prefixes are supported: +

          +
        • listener.name.{listenerName}.some.prop: If the provided prefix is `listener.name.{listenerName}.`, + the key `some.prop` with the value parsed using the definition of `some.prop` is returned.
        • +
        • listener.name.{listenerName}.{mechanism}.some.prop: If the provided prefix is `listener.name.{listenerName}.`, + the key `{mechanism}.some.prop` with the value parsed using the definition of `some.prop` is returned. + This is used to provide per-mechanism configs for a broker listener (e.g sasl.jaas.config)
        • +
        +

        +
        +
      • +
      • +
        +

        valuesWithPrefixAllOrNothing

        +
        public Map<String,Object> valuesWithPrefixAllOrNothing(String prefix)
        +
        If at least one key with prefix exists, all prefixed values will be parsed and put into map. + If no value with prefix exists all unprefixed values will be returned. +

        + This is useful if one wants to allow prefixed configs to override default ones, but wants to use either + only prefixed configs or only regular configs, but not mix them.

        +
        +
      • +
      • +
        +

        values

        +
        public Map<String,?> values()
        +
        +
      • +
      • +
        +

        nonInternalValues

        +
        public Map<String,?> nonInternalValues()
        +
        +
      • +
      • +
        +

        logUnused

        +
        public void logUnused()
        +
        Info level log for any unused configurations
        +
        +
      • +
      • +
        +

        getConfiguredInstance

        +
        public <T> T getConfiguredInstance(String key, + Class<T> t)
        +
        Get a configured instance of the give class specified by the given configuration key. If the object implements + Configurable configure it using the configuration.
        +
        +
        Parameters:
        +
        key - The configuration key for the class
        +
        t - The interface the class should implement
        +
        Returns:
        +
        A configured instance of the class
        +
        +
        +
      • +
      • +
        +

        getConfiguredInstance

        +
        public <T> T getConfiguredInstance(String key, + Class<T> t, + Map<String,Object> configOverrides)
        +
        Get a configured instance of the give class specified by the given configuration key. If the object implements + Configurable configure it using the configuration.
        +
        +
        Parameters:
        +
        key - The configuration key for the class
        +
        t - The interface the class should implement
        +
        configOverrides - override origin configs
        +
        Returns:
        +
        A configured instance of the class
        +
        +
        +
      • +
      • +
        +

        getConfiguredInstances

        +
        public <T> List<T> getConfiguredInstances(String key, + Class<T> t)
        +
        Get a list of configured instances of the given class specified by the given configuration key. The configuration + may specify either null or an empty string to indicate no configured instances. In both cases, this method + returns an empty list to indicate no configured instances.
        +
        +
        Parameters:
        +
        key - The configuration key for the class
        +
        t - The interface the class should implement
        +
        Returns:
        +
        The list of configured instances
        +
        +
        +
      • +
      • +
        +

        getConfiguredInstances

        +
        public <T> List<T> getConfiguredInstances(String key, + Class<T> t, + Map<String,Object> configOverrides)
        +
        Get a list of configured instances of the given class specified by the given configuration key. The configuration + may specify either null or an empty string to indicate no configured instances. In both cases, this method + returns an empty list to indicate no configured instances.
        +
        +
        Parameters:
        +
        key - The configuration key for the class
        +
        t - The interface the class should implement
        +
        configOverrides - Configuration overrides to use.
        +
        Returns:
        +
        The list of configured instances
        +
        +
        +
      • +
      • +
        +

        getConfiguredInstances

        +
        public <T> List<T> getConfiguredInstances(List<String> classNames, + Class<T> t, + Map<String,Object> configOverrides)
        +
        Get a list of configured instances of the given class specified by the given configuration key. The configuration + may specify either null or an empty string to indicate no configured instances. In both cases, this method + returns an empty list to indicate no configured instances.
        +
        +
        Parameters:
        +
        classNames - The list of class names of the instances to create
        +
        t - The interface the class should implement
        +
        configOverrides - Configuration overrides to use.
        +
        Returns:
        +
        The list of configured instances
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/Config.html b/static/41/javadoc/org/apache/kafka/common/config/Config.html new file mode 100644 index 000000000..96207931f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/Config.html @@ -0,0 +1,158 @@ + + + + +Config (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Config

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.Config
    +
    +
    +
    +
    public class Config +extends Object
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      + +
      +
    • + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigChangeCallback.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigChangeCallback.html new file mode 100644 index 000000000..e675253a2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigChangeCallback.html @@ -0,0 +1,135 @@ + + + + +ConfigChangeCallback (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ConfigChangeCallback

    +
    +
    +
    +
    public interface ConfigChangeCallback
    +
    A callback passed to ConfigProvider for subscribing to changes.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      +
      onChange(String path, + ConfigData data)
      +
      +
      Performs an action when configuration data changes.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        onChange

        +
        void onChange(String path, + ConfigData data)
        +
        Performs an action when configuration data changes.
        +
        +
        Parameters:
        +
        path - the path at which the data resides
        +
        data - the configuration data
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigData.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigData.html new file mode 100644 index 000000000..728579180 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigData.html @@ -0,0 +1,207 @@ + + + + +ConfigData (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConfigData

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.ConfigData
    +
    +
    +
    +
    public class ConfigData +extends Object
    +
    Configuration data from a ConfigProvider.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ConfigData

        +
        public ConfigData(Map<String,String> data, + Long ttl)
        +
        Creates a new ConfigData with the given data and TTL (in milliseconds).
        +
        +
        Parameters:
        +
        data - a Map of key-value pairs
        +
        ttl - the time-to-live of the data in milliseconds, or null if there is no TTL
        +
        +
        +
      • +
      • +
        +

        ConfigData

        +
        public ConfigData(Map<String,String> data)
        +
        Creates a new ConfigData with the given data.
        +
        +
        Parameters:
        +
        data - a Map of key-value pairs
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        data

        +
        public Map<String,String> data()
        +
        Returns the data.
        +
        +
        Returns:
        +
        data a Map of key-value pairs
        +
        +
        +
      • +
      • +
        +

        ttl

        +
        public Long ttl()
        +
        Returns the TTL (in milliseconds).
        +
        +
        Returns:
        +
        ttl the time-to-live (in milliseconds) of the data, or null if there is no TTL
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.CaseInsensitiveValidString.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.CaseInsensitiveValidString.html new file mode 100644 index 000000000..48ff3dd3b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.CaseInsensitiveValidString.html @@ -0,0 +1,175 @@ + + + + +ConfigDef.CaseInsensitiveValidString (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConfigDef.CaseInsensitiveValidString

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.ConfigDef.CaseInsensitiveValidString
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    ConfigDef.Validator
    +
    +
    +
    Enclosing class:
    +
    ConfigDef
    +
    +
    +
    public static class ConfigDef.CaseInsensitiveValidString +extends Object +implements ConfigDef.Validator
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.CompositeValidator.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.CompositeValidator.html new file mode 100644 index 000000000..ab4fd998c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.CompositeValidator.html @@ -0,0 +1,175 @@ + + + + +ConfigDef.CompositeValidator (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConfigDef.CompositeValidator

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.ConfigDef.CompositeValidator
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    ConfigDef.Validator
    +
    +
    +
    Enclosing class:
    +
    ConfigDef
    +
    +
    +
    public static class ConfigDef.CompositeValidator +extends Object +implements ConfigDef.Validator
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.ConfigKey.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.ConfigKey.html new file mode 100644 index 000000000..8ff4aaf6a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.ConfigKey.html @@ -0,0 +1,341 @@ + + + + +ConfigDef.ConfigKey (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConfigDef.ConfigKey

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.ConfigDef.ConfigKey
    +
    +
    +
    +
    Enclosing class:
    +
    ConfigDef
    +
    +
    +
    public static class ConfigDef.ConfigKey +extends Object
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.Importance.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.Importance.html new file mode 100644 index 000000000..e6007b2c7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.Importance.html @@ -0,0 +1,230 @@ + + + + +ConfigDef.Importance (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class ConfigDef.Importance

    +
    +
    java.lang.Object +
    java.lang.Enum<ConfigDef.Importance> +
    org.apache.kafka.common.config.ConfigDef.Importance
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<ConfigDef.Importance>, Constable
    +
    +
    +
    Enclosing class:
    +
    ConfigDef
    +
    +
    +
    public static enum ConfigDef.Importance +extends Enum<ConfigDef.Importance>
    +
    The importance level for a configuration
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static ConfigDef.Importance[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static ConfigDef.Importance valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.LambdaValidator.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.LambdaValidator.html new file mode 100644 index 000000000..77389cf4f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.LambdaValidator.html @@ -0,0 +1,177 @@ + + + + +ConfigDef.LambdaValidator (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConfigDef.LambdaValidator

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.ConfigDef.LambdaValidator
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    ConfigDef.Validator
    +
    +
    +
    Enclosing class:
    +
    ConfigDef
    +
    +
    +
    public static class ConfigDef.LambdaValidator +extends Object +implements ConfigDef.Validator
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.ListSize.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.ListSize.html new file mode 100644 index 000000000..2bbd5c686 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.ListSize.html @@ -0,0 +1,175 @@ + + + + +ConfigDef.ListSize (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConfigDef.ListSize

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.ConfigDef.ListSize
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    ConfigDef.Validator
    +
    +
    +
    Enclosing class:
    +
    ConfigDef
    +
    +
    +
    public static class ConfigDef.ListSize +extends Object +implements ConfigDef.Validator
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        atMostOfSize

        +
        public static ConfigDef.ListSize atMostOfSize(int maxSize)
        +
        +
      • +
      • +
        +

        ensureValid

        +
        public void ensureValid(String name, + Object value)
        +
        Description copied from interface: ConfigDef.Validator
        +
        Perform single configuration validation.
        +
        +
        Specified by:
        +
        ensureValid in interface ConfigDef.Validator
        +
        Parameters:
        +
        name - The name of the configuration
        +
        value - The value of the configuration
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.NonEmptyString.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.NonEmptyString.html new file mode 100644 index 000000000..86297838b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.NonEmptyString.html @@ -0,0 +1,193 @@ + + + + +ConfigDef.NonEmptyString (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConfigDef.NonEmptyString

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.ConfigDef.NonEmptyString
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    ConfigDef.Validator
    +
    +
    +
    Enclosing class:
    +
    ConfigDef
    +
    +
    +
    public static class ConfigDef.NonEmptyString +extends Object +implements ConfigDef.Validator
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        NonEmptyString

        +
        public NonEmptyString()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        ensureValid

        +
        public void ensureValid(String name, + Object o)
        +
        Description copied from interface: ConfigDef.Validator
        +
        Perform single configuration validation.
        +
        +
        Specified by:
        +
        ensureValid in interface ConfigDef.Validator
        +
        Parameters:
        +
        name - The name of the configuration
        +
        o - The value of the configuration
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.NonEmptyStringWithoutControlChars.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.NonEmptyStringWithoutControlChars.html new file mode 100644 index 000000000..216fcd0c2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.NonEmptyStringWithoutControlChars.html @@ -0,0 +1,202 @@ + + + + +ConfigDef.NonEmptyStringWithoutControlChars (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConfigDef.NonEmptyStringWithoutControlChars

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.ConfigDef.NonEmptyStringWithoutControlChars
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    ConfigDef.Validator
    +
    +
    +
    Enclosing class:
    +
    ConfigDef
    +
    +
    +
    public static class ConfigDef.NonEmptyStringWithoutControlChars +extends Object +implements ConfigDef.Validator
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        NonEmptyStringWithoutControlChars

        +
        public NonEmptyStringWithoutControlChars()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.NonNullValidator.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.NonNullValidator.html new file mode 100644 index 000000000..c37e5f115 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.NonNullValidator.html @@ -0,0 +1,193 @@ + + + + +ConfigDef.NonNullValidator (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConfigDef.NonNullValidator

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.ConfigDef.NonNullValidator
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    ConfigDef.Validator
    +
    +
    +
    Enclosing class:
    +
    ConfigDef
    +
    +
    +
    public static class ConfigDef.NonNullValidator +extends Object +implements ConfigDef.Validator
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        NonNullValidator

        +
        public NonNullValidator()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        ensureValid

        +
        public void ensureValid(String name, + Object value)
        +
        Description copied from interface: ConfigDef.Validator
        +
        Perform single configuration validation.
        +
        +
        Specified by:
        +
        ensureValid in interface ConfigDef.Validator
        +
        Parameters:
        +
        name - The name of the configuration
        +
        value - The value of the configuration
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.Range.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.Range.html new file mode 100644 index 000000000..c56089d83 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.Range.html @@ -0,0 +1,197 @@ + + + + +ConfigDef.Range (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConfigDef.Range

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.ConfigDef.Range
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    ConfigDef.Validator
    +
    +
    +
    Enclosing class:
    +
    ConfigDef
    +
    +
    +
    public static class ConfigDef.Range +extends Object +implements ConfigDef.Validator
    +
    Validation logic for numeric ranges
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        atLeast

        +
        public static ConfigDef.Range atLeast(Number min)
        +
        A numeric range that checks only the lower bound
        +
        +
        Parameters:
        +
        min - The minimum acceptable value
        +
        +
        +
      • +
      • +
        +

        between

        +
        public static ConfigDef.Range between(Number min, + Number max)
        +
        A numeric range that checks both the upper (inclusive) and lower bound
        +
        +
      • +
      • +
        +

        ensureValid

        +
        public void ensureValid(String name, + Object o)
        +
        Description copied from interface: ConfigDef.Validator
        +
        Perform single configuration validation.
        +
        +
        Specified by:
        +
        ensureValid in interface ConfigDef.Validator
        +
        Parameters:
        +
        name - The name of the configuration
        +
        o - The value of the configuration
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.Recommender.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.Recommender.html new file mode 100644 index 000000000..d08848ac7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.Recommender.html @@ -0,0 +1,166 @@ + + + + +ConfigDef.Recommender (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ConfigDef.Recommender

    +
    +
    +
    +
    Enclosing class:
    +
    ConfigDef
    +
    +
    +
    public static interface ConfigDef.Recommender
    +
    This is used by the ConfigDef.validate(Map) to get valid values for a configuration given the current + configuration values in order to perform full configuration validation and visibility modification. + In case that there are dependencies between configurations, the valid values and visibility + for a configuration may change given the values of other configurations.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      validValues(String name, + Map<String,Object> parsedConfig)
      +
      +
      The valid values for the configuration given the current configuration values.
      +
      +
      boolean
      +
      visible(String name, + Map<String,Object> parsedConfig)
      +
      +
      Set the visibility of the configuration given the current configuration values.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        validValues

        +
        List<Object> validValues(String name, + Map<String,Object> parsedConfig)
        +
        The valid values for the configuration given the current configuration values.
        +
        +
        Parameters:
        +
        name - The name of the configuration
        +
        parsedConfig - The parsed configuration values
        +
        Returns:
        +
        The list of valid values. To function properly, the returned objects should have the type + defined for the configuration using the recommender.
        +
        +
        +
      • +
      • +
        +

        visible

        +
        boolean visible(String name, + Map<String,Object> parsedConfig)
        +
        Set the visibility of the configuration given the current configuration values.
        +
        +
        Parameters:
        +
        name - The name of the configuration
        +
        parsedConfig - The parsed configuration values
        +
        Returns:
        +
        The visibility of the configuration
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.Type.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.Type.html new file mode 100644 index 000000000..fd5584752 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.Type.html @@ -0,0 +1,334 @@ + + + + +ConfigDef.Type (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class ConfigDef.Type

    +
    +
    java.lang.Object +
    java.lang.Enum<ConfigDef.Type> +
    org.apache.kafka.common.config.ConfigDef.Type
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<ConfigDef.Type>, Constable
    +
    +
    +
    Enclosing class:
    +
    ConfigDef
    +
    +
    +
    public static enum ConfigDef.Type +extends Enum<ConfigDef.Type>
    +
    The type for a configuration value
    +
    +
    +
      + +
    • +
      +

      Nested Class Summary

      +
      +

      Nested classes/interfaces inherited from class java.lang.Enum

      +Enum.EnumDesc<E extends Enum<E>>
      +
      +
    • + +
    • +
      +

      Enum Constant Summary

      +
      Enum Constants
      +
      +
      Enum Constant
      +
      Description
      + +
      +
      Used for boolean values.
      +
      + +
      +
      Used for values that implement a Kafka interface.
      +
      + +
      +
      Used for numerical values within the Java Double range.
      +
      + +
      +
      Used for numerical values within the Java Integer range.
      +
      + +
      +
      Used for list values.
      +
      + +
      +
      Used for numerical values within the Java Long range.
      +
      + +
      +
      Used for string values containing sensitive data such as a password or key.
      +
      + +
      +
      Used for numerical values within the Java Short range.
      +
      + +
      +
      Used for string values.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      boolean
      + +
      +
      Whether this type contains sensitive data such as a password or key.
      +
      + + +
      +
      Returns the enum constant of this class with the specified name.
      +
      + + +
      +
      Returns an array containing the constants of this enum class, in +the order they are declared.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Enum

      +compareTo, describeConstable, equals, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
      +
      +

      Methods inherited from class java.lang.Object

      +getClass, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      +
        +
      • +
        +

        BOOLEAN

        +
        public static final ConfigDef.Type BOOLEAN
        +
        Used for boolean values. Values can be provided as a Boolean object or as a String with values + true or false (this is not case-sensitive), otherwise a ConfigException is + thrown.
        +
        +
      • +
      • +
        +

        STRING

        +
        public static final ConfigDef.Type STRING
        +
        Used for string values. Values must be provided as a String object, otherwise a ConfigException is + thrown.
        +
        +
      • +
      • +
        +

        INT

        +
        public static final ConfigDef.Type INT
        +
        Used for numerical values within the Java Integer range. Values must be provided as a Integer object or as + a String being a valid Integer value, otherwise a ConfigException is thrown.
        +
        +
      • +
      • +
        +

        SHORT

        +
        public static final ConfigDef.Type SHORT
        +
        Used for numerical values within the Java Short range. Values must be provided as a Short object or as + a String being a valid Short value, otherwise a ConfigException is thrown.
        +
        +
      • +
      • +
        +

        LONG

        +
        public static final ConfigDef.Type LONG
        +
        Used for numerical values within the Java Long range. Values must be provided as a Long object, as an Integer + object or as a String being a valid Long value, otherwise a ConfigException is thrown.
        +
        +
      • +
      • +
        +

        DOUBLE

        +
        public static final ConfigDef.Type DOUBLE
        +
        Used for numerical values within the Java Double range. Values must be provided as a Number object, as a + Double object or as a String being a valid Double value, otherwise a ConfigException is thrown.
        +
        +
      • +
      • +
        +

        LIST

        +
        public static final ConfigDef.Type LIST
        +
        Used for list values. Values must be provided as a List object, as a String object, otherwise a + ConfigException is thrown. When the value is provided as a String it must use commas to separate the + different entries (for example: first-entry, second-entry) and an empty String maps to an empty List.
        +
        +
      • +
      • +
        +

        CLASS

        +
        public static final ConfigDef.Type CLASS
        +
        Used for values that implement a Kafka interface. Values must be provided as a Class object or as a + String object, otherwise a ConfigException is thrown. When the value is provided as a String it must + be the binary name of the Class.
        +
        +
      • +
      • +
        +

        PASSWORD

        +
        public static final ConfigDef.Type PASSWORD
        +
        Used for string values containing sensitive data such as a password or key. The values of configurations with + of this type are not included in logs and instead replaced with "[hidden]". Values must be provided as a + String object, otherwise a ConfigException is thrown.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static ConfigDef.Type[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static ConfigDef.Type valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        isSensitive

        +
        public boolean isSensitive()
        +
        Whether this type contains sensitive data such as a password or key.
        +
        +
        Returns:
        +
        true if the type is PASSWORD
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.ValidList.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.ValidList.html new file mode 100644 index 000000000..2fe2e0e42 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.ValidList.html @@ -0,0 +1,175 @@ + + + + +ConfigDef.ValidList (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConfigDef.ValidList

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.ConfigDef.ValidList
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    ConfigDef.Validator
    +
    +
    +
    Enclosing class:
    +
    ConfigDef
    +
    +
    +
    public static class ConfigDef.ValidList +extends Object +implements ConfigDef.Validator
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.ValidString.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.ValidString.html new file mode 100644 index 000000000..266152ef2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.ValidString.html @@ -0,0 +1,175 @@ + + + + +ConfigDef.ValidString (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConfigDef.ValidString

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.ConfigDef.ValidString
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    ConfigDef.Validator
    +
    +
    +
    Enclosing class:
    +
    ConfigDef
    +
    +
    +
    public static class ConfigDef.ValidString +extends Object +implements ConfigDef.Validator
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.Validator.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.Validator.html new file mode 100644 index 000000000..c35e29d36 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.Validator.html @@ -0,0 +1,145 @@ + + + + +ConfigDef.Validator (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ConfigDef.Validator

    +
    +
    +
    +
    All Known Implementing Classes:
    +
    ConfigDef.CaseInsensitiveValidString, ConfigDef.CompositeValidator, ConfigDef.LambdaValidator, ConfigDef.ListSize, ConfigDef.NonEmptyString, ConfigDef.NonEmptyStringWithoutControlChars, ConfigDef.NonNullValidator, ConfigDef.Range, ConfigDef.ValidList, ConfigDef.ValidString
    +
    +
    +
    Enclosing class:
    +
    ConfigDef
    +
    +
    +
    public static interface ConfigDef.Validator
    +
    Validation logic the user may provide to perform single configuration validation.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      +
      ensureValid(String name, + Object value)
      +
      +
      Perform single configuration validation.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        ensureValid

        +
        void ensureValid(String name, + Object value)
        +
        Perform single configuration validation.
        +
        +
        Parameters:
        +
        name - The name of the configuration
        +
        value - The value of the configuration
        +
        Throws:
        +
        ConfigException - if the value is invalid.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.Width.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.Width.html new file mode 100644 index 000000000..0d8d0bece --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.Width.html @@ -0,0 +1,238 @@ + + + + +ConfigDef.Width (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class ConfigDef.Width

    +
    +
    java.lang.Object +
    java.lang.Enum<ConfigDef.Width> +
    org.apache.kafka.common.config.ConfigDef.Width
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<ConfigDef.Width>, Constable
    +
    +
    +
    Enclosing class:
    +
    ConfigDef
    +
    +
    +
    public static enum ConfigDef.Width +extends Enum<ConfigDef.Width>
    +
    The width of a configuration value
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static ConfigDef.Width[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static ConfigDef.Width valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.html new file mode 100644 index 000000000..18f44ac8f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigDef.html @@ -0,0 +1,1422 @@ + + + + +ConfigDef (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConfigDef

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.ConfigDef
    +
    +
    +
    +
    public class ConfigDef +extends Object
    +
    This class is used for specifying the set of expected configurations. For each configuration, you can specify + the name, the type, the default value, the documentation, the group information, the order in the group, + the width of the configuration value and the name suitable for display in the UI. + + You can provide special validation logic used for single configuration validation by overriding ConfigDef.Validator. + + Moreover, you can specify the dependents of a configuration. The valid values and visibility of a configuration + may change according to the values of other configurations. You can override ConfigDef.Recommender to get valid + values and set visibility of a configuration given the current configuration values. + +

    + To use the class: +

    +

    + ConfigDef defs = new ConfigDef();
    +
    + // check define(String, Type, Object, Importance, String) for more details.
    + defs.define("config_with_default", Type.STRING, "default string value", Importance.High, "Configuration with default value.");
    + // check define(String, Type, Object, Validator, Importance, String) for more details.
    + defs.define("config_with_validator", Type.INT, 42, Range.atLeast(0), Importance.High, "Configuration with user provided validator.");
    + // check define(String, Type, Importance, String, String, int, Width, String, List<String>) for more details.
    + defs.define("config_with_dependents", Type.INT, Importance.LOW, "Configuration with dependents.", "group", 1, Width.SHORT, "Config With Dependents", Arrays.asList("config_with_default","config_with_validator"));
    +
    + Map<String, String> props = new HashMap<>();
    + props.put("config_with_default", "some value");
    + props.put("config_with_dependents", "some other value");
    +
    + Map<String, Object> configs = defs.parse(props);
    + // will return "some value"
    + String someConfig = (String) configs.get("config_with_default");
    + // will return default value of 42
    + int anotherConfig = (Integer) configs.get("config_with_validator");
    +
    + // To validate the full configuration, use:
    + List<ConfigValue> configValues = defs.validate(props);
    + // The ConfigValue contains updated configuration information given the current configuration values.
    + 
    +

    + This class can be used standalone or in combination with AbstractConfig which provides some additional + functionality for accessing configs.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        NO_DEFAULT_VALUE

        +
        public static final Object NO_DEFAULT_VALUE
        +
        A unique Java object which represents the lack of a default value.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ConfigDef

        +
        public ConfigDef()
        +
        +
      • +
      • +
        +

        ConfigDef

        +
        public ConfigDef(ConfigDef base)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        names

        +
        public Set<String> names()
        +
        Returns unmodifiable set of properties names defined in this ConfigDef
        +
        +
        Returns:
        +
        new unmodifiable Set instance containing the keys
        +
        +
        +
      • +
      • +
        +

        defaultValues

        +
        public Map<String,Object> defaultValues()
        +
        +
      • +
      • +
        +

        define

        +
        public ConfigDef define(ConfigDef.ConfigKey key)
        +
        +
      • +
      • +
        +

        define

        +
        public ConfigDef define(String name, + ConfigDef.Type type, + Object defaultValue, + ConfigDef.Validator validator, + ConfigDef.Importance importance, + String documentation, + String group, + int orderInGroup, + ConfigDef.Width width, + String displayName, + List<String> dependents, + ConfigDef.Recommender recommender)
        +
        Define a new configuration
        +
        +
        Parameters:
        +
        name - the name of the config parameter
        +
        type - the type of the config
        +
        defaultValue - the default value to use if this config isn't present
        +
        validator - the validator to use in checking the correctness of the config
        +
        importance - the importance of this config
        +
        documentation - the documentation string for the config
        +
        group - the group this config belongs to
        +
        orderInGroup - the order of this config in the group
        +
        width - the width of the config
        +
        displayName - the name suitable for display
        +
        dependents - the configurations that are dependents of this configuration
        +
        recommender - the recommender provides valid values given the parent configuration values
        +
        Returns:
        +
        This ConfigDef so you can chain calls
        +
        +
        +
      • +
      • +
        +

        define

        +
        public ConfigDef define(String name, + ConfigDef.Type type, + Object defaultValue, + ConfigDef.Validator validator, + ConfigDef.Importance importance, + String documentation, + String group, + int orderInGroup, + ConfigDef.Width width, + String displayName, + List<String> dependents, + ConfigDef.Recommender recommender, + String alternativeString)
        +
        Define a new configuration
        +
        +
        Parameters:
        +
        name - the name of the config parameter
        +
        type - the type of the config
        +
        defaultValue - the default value to use if this config isn't present
        +
        validator - the validator to use in checking the correctness of the config
        +
        importance - the importance of this config
        +
        documentation - the documentation string for the config
        +
        group - the group this config belongs to
        +
        orderInGroup - the order of this config in the group
        +
        width - the width of the config
        +
        displayName - the name suitable for display
        +
        dependents - the configurations that are dependents of this configuration
        +
        recommender - the recommender provides valid values given the parent configuration values
        +
        alternativeString - the string which will be used to override the string of defaultValue
        +
        Returns:
        +
        This ConfigDef so you can chain calls
        +
        +
        +
      • +
      • +
        +

        define

        +
        public ConfigDef define(String name, + ConfigDef.Type type, + Object defaultValue, + ConfigDef.Validator validator, + ConfigDef.Importance importance, + String documentation, + String group, + int orderInGroup, + ConfigDef.Width width, + String displayName, + List<String> dependents)
        +
        Define a new configuration with no custom recommender
        +
        +
        Parameters:
        +
        name - the name of the config parameter
        +
        type - the type of the config
        +
        defaultValue - the default value to use if this config isn't present
        +
        validator - the validator to use in checking the correctness of the config
        +
        importance - the importance of this config
        +
        documentation - the documentation string for the config
        +
        group - the group this config belongs to
        +
        orderInGroup - the order of this config in the group
        +
        width - the width of the config
        +
        displayName - the name suitable for display
        +
        dependents - the configurations that are dependents of this configuration
        +
        Returns:
        +
        This ConfigDef so you can chain calls
        +
        +
        +
      • +
      • +
        +

        define

        +
        public ConfigDef define(String name, + ConfigDef.Type type, + Object defaultValue, + ConfigDef.Validator validator, + ConfigDef.Importance importance, + String documentation, + String group, + int orderInGroup, + ConfigDef.Width width, + String displayName, + ConfigDef.Recommender recommender)
        +
        Define a new configuration with no dependents
        +
        +
        Parameters:
        +
        name - the name of the config parameter
        +
        type - the type of the config
        +
        defaultValue - the default value to use if this config isn't present
        +
        validator - the validator to use in checking the correctness of the config
        +
        importance - the importance of this config
        +
        documentation - the documentation string for the config
        +
        group - the group this config belongs to
        +
        orderInGroup - the order of this config in the group
        +
        width - the width of the config
        +
        displayName - the name suitable for display
        +
        recommender - the recommender provides valid values given the parent configuration values
        +
        Returns:
        +
        This ConfigDef so you can chain calls
        +
        +
        +
      • +
      • +
        +

        define

        +
        public ConfigDef define(String name, + ConfigDef.Type type, + Object defaultValue, + ConfigDef.Validator validator, + ConfigDef.Importance importance, + String documentation, + String group, + int orderInGroup, + ConfigDef.Width width, + String displayName)
        +
        Define a new configuration with no dependents and no custom recommender
        +
        +
        Parameters:
        +
        name - the name of the config parameter
        +
        type - the type of the config
        +
        defaultValue - the default value to use if this config isn't present
        +
        validator - the validator to use in checking the correctness of the config
        +
        importance - the importance of this config
        +
        documentation - the documentation string for the config
        +
        group - the group this config belongs to
        +
        orderInGroup - the order of this config in the group
        +
        width - the width of the config
        +
        displayName - the name suitable for display
        +
        Returns:
        +
        This ConfigDef so you can chain calls
        +
        +
        +
      • +
      • +
        +

        define

        +
        public ConfigDef define(String name, + ConfigDef.Type type, + Object defaultValue, + ConfigDef.Importance importance, + String documentation, + String group, + int orderInGroup, + ConfigDef.Width width, + String displayName, + List<String> dependents, + ConfigDef.Recommender recommender)
        +
        Define a new configuration with no special validation logic
        +
        +
        Parameters:
        +
        name - the name of the config parameter
        +
        type - the type of the config
        +
        defaultValue - the default value to use if this config isn't present
        +
        importance - the importance of this config
        +
        documentation - the documentation string for the config
        +
        group - the group this config belongs to
        +
        orderInGroup - the order of this config in the group
        +
        width - the width of the config
        +
        displayName - the name suitable for display
        +
        dependents - the configurations that are dependents of this configuration
        +
        recommender - the recommender provides valid values given the parent configuration values
        +
        Returns:
        +
        This ConfigDef so you can chain calls
        +
        +
        +
      • +
      • +
        +

        define

        +
        public ConfigDef define(String name, + ConfigDef.Type type, + Object defaultValue, + ConfigDef.Importance importance, + String documentation, + String group, + int orderInGroup, + ConfigDef.Width width, + String displayName, + List<String> dependents)
        +
        Define a new configuration with no special validation logic and no custom recommender
        +
        +
        Parameters:
        +
        name - the name of the config parameter
        +
        type - the type of the config
        +
        defaultValue - the default value to use if this config isn't present
        +
        importance - the importance of this config
        +
        documentation - the documentation string for the config
        +
        group - the group this config belongs to
        +
        orderInGroup - the order of this config in the group
        +
        width - the width of the config
        +
        displayName - the name suitable for display
        +
        dependents - the configurations that are dependents of this configuration
        +
        Returns:
        +
        This ConfigDef so you can chain calls
        +
        +
        +
      • +
      • +
        +

        define

        +
        public ConfigDef define(String name, + ConfigDef.Type type, + Object defaultValue, + ConfigDef.Importance importance, + String documentation, + String group, + int orderInGroup, + ConfigDef.Width width, + String displayName, + ConfigDef.Recommender recommender)
        +
        Define a new configuration with no special validation logic and no custom recommender
        +
        +
        Parameters:
        +
        name - the name of the config parameter
        +
        type - the type of the config
        +
        defaultValue - the default value to use if this config isn't present
        +
        importance - the importance of this config
        +
        documentation - the documentation string for the config
        +
        group - the group this config belongs to
        +
        orderInGroup - the order of this config in the group
        +
        width - the width of the config
        +
        displayName - the name suitable for display
        +
        recommender - the recommender provides valid values given the parent configuration values
        +
        Returns:
        +
        This ConfigDef so you can chain calls
        +
        +
        +
      • +
      • +
        +

        define

        +
        public ConfigDef define(String name, + ConfigDef.Type type, + Object defaultValue, + ConfigDef.Importance importance, + String documentation, + String group, + int orderInGroup, + ConfigDef.Width width, + String displayName)
        +
        Define a new configuration with no special validation logic, not dependents and no custom recommender
        +
        +
        Parameters:
        +
        name - the name of the config parameter
        +
        type - the type of the config
        +
        defaultValue - the default value to use if this config isn't present
        +
        importance - the importance of this config
        +
        documentation - the documentation string for the config
        +
        group - the group this config belongs to
        +
        orderInGroup - the order of this config in the group
        +
        width - the width of the config
        +
        displayName - the name suitable for display
        +
        Returns:
        +
        This ConfigDef so you can chain calls
        +
        +
        +
      • +
      • +
        +

        define

        +
        public ConfigDef define(String name, + ConfigDef.Type type, + ConfigDef.Importance importance, + String documentation, + String group, + int orderInGroup, + ConfigDef.Width width, + String displayName, + List<String> dependents, + ConfigDef.Recommender recommender)
        +
        Define a new configuration with no default value and no special validation logic
        +
        +
        Parameters:
        +
        name - the name of the config parameter
        +
        type - the type of the config
        +
        importance - the importance of this config
        +
        documentation - the documentation string for the config
        +
        group - the group this config belongs to
        +
        orderInGroup - the order of this config in the group
        +
        width - the width of the config
        +
        displayName - the name suitable for display
        +
        dependents - the configurations that are dependents of this configuration
        +
        recommender - the recommender provides valid values given the parent configuration value
        +
        Returns:
        +
        This ConfigDef so you can chain calls
        +
        +
        +
      • +
      • +
        +

        define

        +
        public ConfigDef define(String name, + ConfigDef.Type type, + ConfigDef.Importance importance, + String documentation, + String group, + int orderInGroup, + ConfigDef.Width width, + String displayName, + List<String> dependents)
        +
        Define a new configuration with no default value, no special validation logic and no custom recommender
        +
        +
        Parameters:
        +
        name - the name of the config parameter
        +
        type - the type of the config
        +
        importance - the importance of this config
        +
        documentation - the documentation string for the config
        +
        group - the group this config belongs to
        +
        orderInGroup - the order of this config in the group
        +
        width - the width of the config
        +
        displayName - the name suitable for display
        +
        dependents - the configurations that are dependents of this configuration
        +
        Returns:
        +
        This ConfigDef so you can chain calls
        +
        +
        +
      • +
      • +
        +

        define

        +
        public ConfigDef define(String name, + ConfigDef.Type type, + ConfigDef.Importance importance, + String documentation, + String group, + int orderInGroup, + ConfigDef.Width width, + String displayName, + ConfigDef.Recommender recommender)
        +
        Define a new configuration with no default value, no special validation logic and no custom recommender
        +
        +
        Parameters:
        +
        name - the name of the config parameter
        +
        type - the type of the config
        +
        importance - the importance of this config
        +
        documentation - the documentation string for the config
        +
        group - the group this config belongs to
        +
        orderInGroup - the order of this config in the group
        +
        width - the width of the config
        +
        displayName - the name suitable for display
        +
        recommender - the recommender provides valid values given the parent configuration value
        +
        Returns:
        +
        This ConfigDef so you can chain calls
        +
        +
        +
      • +
      • +
        +

        define

        +
        public ConfigDef define(String name, + ConfigDef.Type type, + ConfigDef.Importance importance, + String documentation, + String group, + int orderInGroup, + ConfigDef.Width width, + String displayName)
        +
        Define a new configuration with no default value, no special validation logic, no dependents and no custom recommender
        +
        +
        Parameters:
        +
        name - the name of the config parameter
        +
        type - the type of the config
        +
        importance - the importance of this config
        +
        documentation - the documentation string for the config
        +
        group - the group this config belongs to
        +
        orderInGroup - the order of this config in the group
        +
        width - the width of the config
        +
        displayName - the name suitable for display
        +
        Returns:
        +
        This ConfigDef so you can chain calls
        +
        +
        +
      • +
      • +
        +

        define

        +
        public ConfigDef define(String name, + ConfigDef.Type type, + Object defaultValue, + ConfigDef.Validator validator, + ConfigDef.Importance importance, + String documentation)
        +
        Define a new configuration with no group, no order in group, no width, no display name, no dependents and no custom recommender
        +
        +
        Parameters:
        +
        name - the name of the config parameter
        +
        type - the type of the config
        +
        defaultValue - the default value to use if this config isn't present
        +
        validator - the validator to use in checking the correctness of the config
        +
        importance - the importance of this config
        +
        documentation - the documentation string for the config
        +
        Returns:
        +
        This ConfigDef so you can chain calls
        +
        +
        +
      • +
      • +
        +

        define

        +
        public ConfigDef define(String name, + ConfigDef.Type type, + Object defaultValue, + ConfigDef.Importance importance, + String documentation)
        +
        Define a new configuration with no special validation logic
        +
        +
        Parameters:
        +
        name - The name of the config parameter
        +
        type - The type of the config
        +
        defaultValue - The default value to use if this config isn't present
        +
        importance - The importance of this config: is this something you will likely need to change.
        +
        documentation - The documentation string for the config
        +
        Returns:
        +
        This ConfigDef so you can chain calls
        +
        +
        +
      • +
      • +
        +

        define

        +
        public ConfigDef define(String name, + ConfigDef.Type type, + Object defaultValue, + ConfigDef.Importance importance, + String documentation, + String alternativeString)
        +
        Define a new configuration with no special validation logic
        +
        +
        Parameters:
        +
        name - The name of the config parameter
        +
        type - The type of the config
        +
        defaultValue - The default value to use if this config isn't present
        +
        importance - The importance of this config: is this something you will likely need to change.
        +
        documentation - The documentation string for the config
        +
        alternativeString - The string which will be used to override the string of defaultValue
        +
        Returns:
        +
        This ConfigDef so you can chain calls
        +
        +
        +
      • +
      • +
        +

        define

        +
        public ConfigDef define(String name, + ConfigDef.Type type, + ConfigDef.Importance importance, + String documentation)
        +
        Define a new configuration with no default value and no special validation logic
        +
        +
        Parameters:
        +
        name - The name of the config parameter
        +
        type - The type of the config
        +
        importance - The importance of this config: is this something you will likely need to change.
        +
        documentation - The documentation string for the config
        +
        Returns:
        +
        This ConfigDef so you can chain calls
        +
        +
        +
      • +
      • +
        +

        defineInternal

        +
        public ConfigDef defineInternal(String name, + ConfigDef.Type type, + Object defaultValue, + ConfigDef.Importance importance)
        +
        Define a new internal configuration. Internal configuration won't show up in the docs and aren't + intended for general use.
        +
        +
        Parameters:
        +
        name - The name of the config parameter
        +
        type - The type of the config
        +
        defaultValue - The default value to use if this config isn't present
        +
        importance - The importance of this config (i.e. is this something you will likely need to change?)
        +
        Returns:
        +
        This ConfigDef so you can chain calls
        +
        +
        +
      • +
      • +
        +

        defineInternal

        +
        public ConfigDef defineInternal(String name, + ConfigDef.Type type, + Object defaultValue, + ConfigDef.Validator validator, + ConfigDef.Importance importance, + String documentation)
        +
        Define a new internal configuration. Internal configuration won't show up in the docs and aren't + intended for general use.
        +
        +
        Parameters:
        +
        name - The name of the config parameter
        +
        type - The type of the config
        +
        defaultValue - The default value to use if this config isn't present
        +
        validator - The validator to use in checking the correctness of the config
        +
        importance - The importance of this config (i.e. is this something you will likely need to change?)
        +
        documentation - The documentation string for the config
        +
        Returns:
        +
        This ConfigDef so you can chain calls
        +
        +
        +
      • +
      • +
        +

        configKeys

        +
        public Map<String,ConfigDef.ConfigKey> configKeys()
        +
        Get the configuration keys
        +
        +
        Returns:
        +
        a map containing all configuration keys
        +
        +
        +
      • +
      • +
        +

        groups

        +
        public List<String> groups()
        +
        Get the groups for the configuration
        +
        +
        Returns:
        +
        a list of group names
        +
        +
        +
      • +
      • +
        +

        withClientSslSupport

        +
        public ConfigDef withClientSslSupport()
        +
        Add standard SSL client configuration options.
        +
        +
        Returns:
        +
        this
        +
        +
        +
      • +
      • +
        +

        withClientSaslSupport

        +
        public ConfigDef withClientSaslSupport()
        +
        Add standard SASL client configuration options.
        +
        +
        Returns:
        +
        this
        +
        +
        +
      • +
      • +
        +

        parse

        +
        public Map<String,Object> parse(Map<?,?> props)
        +
        Parse and validate configs against this configuration definition. The input is a map of configs. It is expected + that the keys of the map are strings, but the values can either be strings or they may already be of the + appropriate type (int, string, etc). This will work equally well with either java.util.Properties instances or a + programmatically constructed map.
        +
        +
        Parameters:
        +
        props - The configs to parse and validate.
        +
        Returns:
        +
        Parsed and validated configs. The key will be the config name and the value will be the value parsed into + the appropriate type (int, string, etc).
        +
        +
        +
      • +
      • +
        +

        validate

        +
        public List<ConfigValue> validate(Map<String,String> props)
        +
        Validate the current configuration values with the configuration definition.
        +
        +
        Parameters:
        +
        props - the current configuration values
        +
        Returns:
        +
        List of Config, each Config contains the updated configuration information given + the current configuration values.
        +
        +
        +
      • +
      • +
        +

        validateAll

        +
        public Map<String,ConfigValue> validateAll(Map<String,String> props)
        +
        +
      • +
      • +
        +

        parseType

        +
        public static Object parseType(String name, + Object value, + ConfigDef.Type type)
        +
        Parse a value according to its expected type.
        +
        +
        Parameters:
        +
        name - The config name
        +
        value - The config value
        +
        type - The expected type
        +
        Returns:
        +
        The parsed object
        +
        +
        +
      • +
      • +
        +

        convertToString

        +
        public static String convertToString(Object parsedValue, + ConfigDef.Type type)
        +
        +
      • +
      • +
        +

        convertToStringMapWithPasswordValues

        +
        public static Map<String,String> convertToStringMapWithPasswordValues(Map<String,?> configs)
        +
        Converts a map of config (key, value) pairs to a map of strings where each value + is converted to a string. This method should be used with care since it stores + actual password values to String. Values from this map should never be used in log entries.
        +
        +
      • +
      • +
        +

        toHtmlTable

        +
        public String toHtmlTable()
        +
        +
      • +
      • +
        +

        toHtmlTable

        +
        public String toHtmlTable(Map<String,String> dynamicUpdateModes)
        +
        Converts this config into an HTML table that can be embedded into docs. + If dynamicUpdateModes is non-empty, a "Dynamic Update Mode" column + will be included n the table with the value of the update mode. Default + mode is "read-only".
        +
        +
        Parameters:
        +
        dynamicUpdateModes - Config name -> update mode mapping
        +
        +
        +
      • +
      • +
        +

        toRst

        +
        public String toRst()
        +
        Get the configs formatted with reStructuredText, suitable for embedding in Sphinx + documentation.
        +
        +
      • +
      • +
        +

        toEnrichedRst

        +
        public String toEnrichedRst()
        +
        Configs with new metadata (group, orderInGroup, dependents) formatted with reStructuredText, suitable for embedding in Sphinx + documentation.
        +
        +
      • +
      • +
        +

        embed

        +
        public void embed(String keyPrefix, + String groupPrefix, + int startingOrd, + ConfigDef child)
        +
        +
      • +
      • +
        +

        toHtml

        +
        public String toHtml()
        +
        +
      • +
      • +
        +

        toHtml

        +
        public String toHtml(int headerDepth, + Function<String,String> idGenerator)
        +
        Converts this config into an HTML list that can be embedded into docs.
        +
        +
        Parameters:
        +
        headerDepth - The top level header depth in the generated HTML.
        +
        idGenerator - A function for computing the HTML id attribute in the generated HTML from a given config name.
        +
        +
        +
      • +
      • +
        +

        toHtml

        +
        public String toHtml(Map<String,String> dynamicUpdateModes)
        +
        Converts this config into an HTML list that can be embedded into docs. + If dynamicUpdateModes is non-empty, a "Dynamic Update Mode" label + will be included in the config details with the value of the update mode. Default + mode is "read-only".
        +
        +
        Parameters:
        +
        dynamicUpdateModes - Config name -> update mode mapping.
        +
        +
        +
      • +
      • +
        +

        toHtml

        +
        public String toHtml(int headerDepth, + Function<String,String> idGenerator, + Map<String,String> dynamicUpdateModes)
        +
        Converts this config into an HTML list that can be embedded into docs. + If dynamicUpdateModes is non-empty, a "Dynamic Update Mode" label + will be included in the config details with the value of the update mode. Default + mode is "read-only".
        +
        +
        Parameters:
        +
        headerDepth - The top level header depth in the generated HTML.
        +
        idGenerator - A function for computing the HTML id attribute in the generated HTML from a given config name.
        +
        dynamicUpdateModes - Config name -> update mode mapping.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigException.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigException.html new file mode 100644 index 000000000..ef7d80288 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigException.html @@ -0,0 +1,173 @@ + + + + +ConfigException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConfigException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class ConfigException +extends KafkaException
    +
    Thrown if the user supplies an invalid configuration
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ConfigException

        +
        public ConfigException(String message)
        +
        +
      • +
      • +
        +

        ConfigException

        +
        public ConfigException(String name, + Object value)
        +
        +
      • +
      • +
        +

        ConfigException

        +
        public ConfigException(String name, + Object value, + String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigResource.Type.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigResource.Type.html new file mode 100644 index 000000000..4daf82485 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigResource.Type.html @@ -0,0 +1,272 @@ + + + + +ConfigResource.Type (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class ConfigResource.Type

    +
    +
    java.lang.Object +
    java.lang.Enum<ConfigResource.Type> +
    org.apache.kafka.common.config.ConfigResource.Type
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<ConfigResource.Type>, Constable
    +
    +
    +
    Enclosing class:
    +
    ConfigResource
    +
    +
    +
    public static enum ConfigResource.Type +extends Enum<ConfigResource.Type>
    +
    Type of resource.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static ConfigResource.Type[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static ConfigResource.Type valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        id

        +
        public byte id()
        +
        +
      • +
      • +
        +

        forId

        +
        public static ConfigResource.Type forId(byte id)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigResource.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigResource.html new file mode 100644 index 000000000..3493b6a33 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigResource.html @@ -0,0 +1,253 @@ + + + + +ConfigResource (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConfigResource

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.ConfigResource
    +
    +
    +
    +
    public final class ConfigResource +extends Object
    +
    A class representing resources that have configs.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ConfigResource

        +
        public ConfigResource(ConfigResource.Type type, + String name)
        +
        Create an instance of this class with the provided parameters.
        +
        +
        Parameters:
        +
        type - a non-null resource type
        +
        name - a non-null resource name
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        type

        +
        public ConfigResource.Type type()
        +
        Return the resource type.
        +
        +
      • +
      • +
        +

        name

        +
        public String name()
        +
        Return the resource name.
        +
        +
      • +
      • +
        +

        isDefault

        +
        public boolean isDefault()
        +
        Returns true if this is the default resource of a resource type. + Resource name is empty for the default resource.
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigTransformer.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigTransformer.html new file mode 100644 index 000000000..3e1b7a60d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigTransformer.html @@ -0,0 +1,227 @@ + + + + +ConfigTransformer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConfigTransformer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.ConfigTransformer
    +
    +
    +
    +
    public class ConfigTransformer +extends Object
    +
    This class wraps a set of ConfigProvider instances and uses them to perform + transformations. + +

    The default variable pattern is of the form ${provider:[path:]key}, + where the provider corresponds to a ConfigProvider instance, as passed to + ConfigTransformer(Map). The pattern will extract a set + of paths (which are optional) and keys and then pass them to ConfigProvider.get(String, Set) to obtain the + values with which to replace the variables. + +

    For example, if a Map consisting of an entry with a provider name "file" and provider instance + FileConfigProvider is passed to the ConfigTransformer(Map), and a Properties + file with contents +

    + fileKey=someValue
    + 
    + resides at the path "/tmp/properties.txt", then when a configuration Map which has an entry with a key "someKey" and + a value "${file:/tmp/properties.txt:fileKey}" is passed to the transform(Map) method, then the transformed + Map will have an entry with key "someKey" and a value "someValue". + +

    This class only depends on ConfigProvider.get(String, Set) and does not depend on subscription support + in a ConfigProvider, such as the ConfigProvider.subscribe(String, Set, ConfigChangeCallback) and + ConfigProvider.unsubscribe(String, Set, ConfigChangeCallback) methods.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        DEFAULT_PATTERN

        +
        public static final Pattern DEFAULT_PATTERN
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ConfigTransformer

        +
        public ConfigTransformer(Map<String,ConfigProvider> configProviders)
        +
        Creates a ConfigTransformer with the default pattern, of the form ${provider:[path:]key}.
        +
        +
        Parameters:
        +
        configProviders - a Map of provider names and ConfigProvider instances.
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigTransformerResult.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigTransformerResult.html new file mode 100644 index 000000000..d58412a5e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigTransformerResult.html @@ -0,0 +1,197 @@ + + + + +ConfigTransformerResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConfigTransformerResult

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.ConfigTransformerResult
    +
    +
    +
    +
    public class ConfigTransformerResult +extends Object
    +
    The result of a transformation from ConfigTransformer.
    +
    +
    +
      + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      + +
      +
      Creates a new ConfigTransformerResult with the given data and TTL values for a set of paths.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Returns the transformed data, with variables replaced with corresponding values from the + ConfigProvider instances if found.
      +
      + + +
      +
      Returns the TTL values (in milliseconds) returned from the ConfigProvider instances for a given set of paths.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ConfigTransformerResult

        +
        public ConfigTransformerResult(Map<String,String> data, + Map<String,Long> ttls)
        +
        Creates a new ConfigTransformerResult with the given data and TTL values for a set of paths.
        +
        +
        Parameters:
        +
        data - a Map of key-value pairs
        +
        ttls - a Map of path and TTL values (in milliseconds)
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        data

        +
        public Map<String,String> data()
        +
        Returns the transformed data, with variables replaced with corresponding values from the + ConfigProvider instances if found. + +

        Modifying the transformed data that is returned does not affect the ConfigProvider nor the + original data that was used as the source of the transformation.

        +
        +
        Returns:
        +
        data a Map of key-value pairs
        +
        +
        +
      • +
      • +
        +

        ttls

        +
        public Map<String,Long> ttls()
        +
        Returns the TTL values (in milliseconds) returned from the ConfigProvider instances for a given set of paths.
        +
        +
        Returns:
        +
        data a Map of path and TTL values
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/ConfigValue.html b/static/41/javadoc/org/apache/kafka/common/config/ConfigValue.html new file mode 100644 index 000000000..f0220b1ca --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/ConfigValue.html @@ -0,0 +1,283 @@ + + + + +ConfigValue (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConfigValue

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.ConfigValue
    +
    +
    +
    +
    public class ConfigValue +extends Object
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ConfigValue

        +
        public ConfigValue(String name)
        +
        +
      • +
      • +
        +

        ConfigValue

        +
        public ConfigValue(String name, + Object value, + List<Object> recommendedValues, + List<String> errorMessages)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        name

        +
        public String name()
        +
        +
      • +
      • +
        +

        value

        +
        public Object value()
        +
        +
      • +
      • +
        +

        recommendedValues

        +
        public List<Object> recommendedValues()
        +
        +
      • +
      • +
        +

        errorMessages

        +
        public List<String> errorMessages()
        +
        +
      • +
      • +
        +

        visible

        +
        public boolean visible()
        +
        +
      • +
      • +
        +

        value

        +
        public void value(Object value)
        +
        +
      • +
      • +
        +

        recommendedValues

        +
        public void recommendedValues(List<Object> recommendedValues)
        +
        +
      • +
      • +
        +

        addErrorMessage

        +
        public void addErrorMessage(String errorMessage)
        +
        +
      • +
      • +
        +

        visible

        +
        public void visible(boolean visible)
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/LogLevelConfig.html b/static/41/javadoc/org/apache/kafka/common/config/LogLevelConfig.html new file mode 100644 index 000000000..7e1b2304c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/LogLevelConfig.html @@ -0,0 +1,287 @@ + + + + +LogLevelConfig (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class LogLevelConfig

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.LogLevelConfig
    +
    +
    +
    +
    public class LogLevelConfig +extends Object
    +
    This class holds definitions for log level configurations related to Kafka's application logging. See KIP-412 for additional information
    +
    +
    +
      + +
    • +
      +

      Field Summary

      +
      Fields
      +
      +
      Modifier and Type
      +
      Field
      +
      Description
      +
      static final String
      + +
      +
      The DEBUG level designates fine-grained + informational events that are most useful to debug Kafka
      +
      +
      static final String
      + +
      +
      The ERROR level designates error events that + might still allow the broker to continue running.
      +
      +
      static final String
      + +
      +
      The FATAL level designates a very severe error + that will lead the Kafka broker to abort.
      +
      +
      static final String
      + +
      +
      The INFO level designates informational messages + that highlight normal Kafka events at a coarse-grained level
      +
      +
      static final String
      + +
      +
      The TRACE level designates finer-grained + informational events than the DEBUG level.
      +
      +
      static final Set<String>
      + +
       
      +
      static final String
      + +
      +
      The WARN level designates potentially harmful situations.
      +
      +
      +
      +
    • + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        FATAL_LOG_LEVEL

        +
        public static final String FATAL_LOG_LEVEL
        +
        The FATAL level designates a very severe error + that will lead the Kafka broker to abort.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        ERROR_LOG_LEVEL

        +
        public static final String ERROR_LOG_LEVEL
        +
        The ERROR level designates error events that + might still allow the broker to continue running.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        WARN_LOG_LEVEL

        +
        public static final String WARN_LOG_LEVEL
        +
        The WARN level designates potentially harmful situations.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        INFO_LOG_LEVEL

        +
        public static final String INFO_LOG_LEVEL
        +
        The INFO level designates informational messages + that highlight normal Kafka events at a coarse-grained level
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEBUG_LOG_LEVEL

        +
        public static final String DEBUG_LOG_LEVEL
        +
        The DEBUG level designates fine-grained + informational events that are most useful to debug Kafka
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        TRACE_LOG_LEVEL

        +
        public static final String TRACE_LOG_LEVEL
        +
        The TRACE level designates finer-grained + informational events than the DEBUG level.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        VALID_LOG_LEVELS

        +
        public static final Set<String> VALID_LOG_LEVELS
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        LogLevelConfig

        +
        public LogLevelConfig()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/SaslConfigs.html b/static/41/javadoc/org/apache/kafka/common/config/SaslConfigs.html new file mode 100644 index 000000000..12dbebf7b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/SaslConfigs.html @@ -0,0 +1,2136 @@ + + + + +SaslConfigs (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SaslConfigs

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.SaslConfigs
    +
    +
    +
    +
    public class SaslConfigs +extends Object
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        SASL_MECHANISM

        +
        public static final String SASL_MECHANISM
        +
        SASL mechanism configuration - standard mechanism names are listed here.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_MECHANISM_DOC

        +
        public static final String SASL_MECHANISM_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        GSSAPI_MECHANISM

        +
        public static final String GSSAPI_MECHANISM
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SASL_MECHANISM

        +
        public static final String DEFAULT_SASL_MECHANISM
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_JAAS_CONFIG

        +
        public static final String SASL_JAAS_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_JAAS_CONFIG_DOC

        +
        public static final String SASL_JAAS_CONFIG_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_CLIENT_CALLBACK_HANDLER_CLASS

        +
        public static final String SASL_CLIENT_CALLBACK_HANDLER_CLASS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_CLIENT_CALLBACK_HANDLER_CLASS_DOC

        +
        public static final String SASL_CLIENT_CALLBACK_HANDLER_CLASS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_LOGIN_CALLBACK_HANDLER_CLASS

        +
        public static final String SASL_LOGIN_CALLBACK_HANDLER_CLASS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_LOGIN_CALLBACK_HANDLER_CLASS_DOC

        +
        public static final String SASL_LOGIN_CALLBACK_HANDLER_CLASS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_LOGIN_CLASS

        +
        public static final String SASL_LOGIN_CLASS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_LOGIN_CLASS_DOC

        +
        public static final String SASL_LOGIN_CLASS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_KERBEROS_SERVICE_NAME

        +
        public static final String SASL_KERBEROS_SERVICE_NAME
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_KERBEROS_SERVICE_NAME_DOC

        +
        public static final String SASL_KERBEROS_SERVICE_NAME_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_KERBEROS_KINIT_CMD

        +
        public static final String SASL_KERBEROS_KINIT_CMD
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_KERBEROS_KINIT_CMD_DOC

        +
        public static final String SASL_KERBEROS_KINIT_CMD_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_KERBEROS_KINIT_CMD

        +
        public static final String DEFAULT_KERBEROS_KINIT_CMD
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_KERBEROS_TICKET_RENEW_WINDOW_FACTOR

        +
        public static final String SASL_KERBEROS_TICKET_RENEW_WINDOW_FACTOR
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_KERBEROS_TICKET_RENEW_WINDOW_FACTOR_DOC

        +
        public static final String SASL_KERBEROS_TICKET_RENEW_WINDOW_FACTOR_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_KERBEROS_TICKET_RENEW_WINDOW_FACTOR

        +
        public static final double DEFAULT_KERBEROS_TICKET_RENEW_WINDOW_FACTOR
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_KERBEROS_TICKET_RENEW_JITTER

        +
        public static final String SASL_KERBEROS_TICKET_RENEW_JITTER
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_KERBEROS_TICKET_RENEW_JITTER_DOC

        +
        public static final String SASL_KERBEROS_TICKET_RENEW_JITTER_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_KERBEROS_TICKET_RENEW_JITTER

        +
        public static final double DEFAULT_KERBEROS_TICKET_RENEW_JITTER
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_KERBEROS_MIN_TIME_BEFORE_RELOGIN

        +
        public static final String SASL_KERBEROS_MIN_TIME_BEFORE_RELOGIN
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_KERBEROS_MIN_TIME_BEFORE_RELOGIN_DOC

        +
        public static final String SASL_KERBEROS_MIN_TIME_BEFORE_RELOGIN_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_KERBEROS_MIN_TIME_BEFORE_RELOGIN

        +
        public static final long DEFAULT_KERBEROS_MIN_TIME_BEFORE_RELOGIN
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_LOGIN_REFRESH_WINDOW_FACTOR

        +
        public static final String SASL_LOGIN_REFRESH_WINDOW_FACTOR
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_LOGIN_REFRESH_WINDOW_FACTOR_DOC

        +
        public static final String SASL_LOGIN_REFRESH_WINDOW_FACTOR_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_LOGIN_REFRESH_WINDOW_FACTOR

        +
        public static final double DEFAULT_LOGIN_REFRESH_WINDOW_FACTOR
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_LOGIN_REFRESH_WINDOW_JITTER

        +
        public static final String SASL_LOGIN_REFRESH_WINDOW_JITTER
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_LOGIN_REFRESH_WINDOW_JITTER_DOC

        +
        public static final String SASL_LOGIN_REFRESH_WINDOW_JITTER_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_LOGIN_REFRESH_WINDOW_JITTER

        +
        public static final double DEFAULT_LOGIN_REFRESH_WINDOW_JITTER
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_LOGIN_REFRESH_MIN_PERIOD_SECONDS

        +
        public static final String SASL_LOGIN_REFRESH_MIN_PERIOD_SECONDS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_LOGIN_REFRESH_MIN_PERIOD_SECONDS_DOC

        +
        public static final String SASL_LOGIN_REFRESH_MIN_PERIOD_SECONDS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_LOGIN_REFRESH_MIN_PERIOD_SECONDS

        +
        public static final short DEFAULT_LOGIN_REFRESH_MIN_PERIOD_SECONDS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_LOGIN_REFRESH_BUFFER_SECONDS

        +
        public static final String SASL_LOGIN_REFRESH_BUFFER_SECONDS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_LOGIN_REFRESH_BUFFER_SECONDS_DOC

        +
        public static final String SASL_LOGIN_REFRESH_BUFFER_SECONDS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_LOGIN_REFRESH_BUFFER_SECONDS

        +
        public static final short DEFAULT_LOGIN_REFRESH_BUFFER_SECONDS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_LOGIN_CONNECT_TIMEOUT_MS

        +
        public static final String SASL_LOGIN_CONNECT_TIMEOUT_MS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_LOGIN_CONNECT_TIMEOUT_MS_DOC

        +
        public static final String SASL_LOGIN_CONNECT_TIMEOUT_MS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_LOGIN_READ_TIMEOUT_MS

        +
        public static final String SASL_LOGIN_READ_TIMEOUT_MS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_LOGIN_READ_TIMEOUT_MS_DOC

        +
        public static final String SASL_LOGIN_READ_TIMEOUT_MS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_LOGIN_RETRY_BACKOFF_MAX_MS

        +
        public static final String SASL_LOGIN_RETRY_BACKOFF_MAX_MS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SASL_LOGIN_RETRY_BACKOFF_MAX_MS

        +
        public static final long DEFAULT_SASL_LOGIN_RETRY_BACKOFF_MAX_MS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_LOGIN_RETRY_BACKOFF_MAX_MS_DOC

        +
        public static final String SASL_LOGIN_RETRY_BACKOFF_MAX_MS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_LOGIN_RETRY_BACKOFF_MS

        +
        public static final String SASL_LOGIN_RETRY_BACKOFF_MS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SASL_LOGIN_RETRY_BACKOFF_MS

        +
        public static final long DEFAULT_SASL_LOGIN_RETRY_BACKOFF_MS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_LOGIN_RETRY_BACKOFF_MS_DOC

        +
        public static final String SASL_LOGIN_RETRY_BACKOFF_MS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS

        +
        public static final String SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS

        +
        public static final String DEFAULT_SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS_DOC

        +
        public static final String SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS

        +
        public static final String SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS

        +
        public static final String DEFAULT_SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS_DOC

        +
        public static final String SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_SCOPE

        +
        public static final String SASL_OAUTHBEARER_SCOPE
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_SCOPE_DOC

        +
        public static final String SASL_OAUTHBEARER_SCOPE_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID

        +
        public static final String SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID_DOC

        +
        public static final String SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET

        +
        public static final String SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET_DOC

        +
        public static final String SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_ALGORITHM

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_ALGORITHM
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SASL_OAUTHBEARER_ASSERTION_ALGORITHM

        +
        public static final String DEFAULT_SASL_OAUTHBEARER_ASSERTION_ALGORITHM
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_ALGORITHM_DOC

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_ALGORITHM_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD_DOC

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS

        +
        public static final int DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS_DOC

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS_DOC

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE

        +
        public static final boolean DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE_DOC

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS

        +
        public static final int DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS_DOC

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB_DOC

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_FILE

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_FILE
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_FILE_DOC

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_FILE_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE_DOC

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE_DOC

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE_DOC

        +
        public static final String SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_SCOPE_CLAIM_NAME

        +
        public static final String SASL_OAUTHBEARER_SCOPE_CLAIM_NAME
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SASL_OAUTHBEARER_SCOPE_CLAIM_NAME

        +
        public static final String DEFAULT_SASL_OAUTHBEARER_SCOPE_CLAIM_NAME
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_SCOPE_CLAIM_NAME_DOC

        +
        public static final String SASL_OAUTHBEARER_SCOPE_CLAIM_NAME_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_SUB_CLAIM_NAME

        +
        public static final String SASL_OAUTHBEARER_SUB_CLAIM_NAME
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SASL_OAUTHBEARER_SUB_CLAIM_NAME

        +
        public static final String DEFAULT_SASL_OAUTHBEARER_SUB_CLAIM_NAME
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_SUB_CLAIM_NAME_DOC

        +
        public static final String SASL_OAUTHBEARER_SUB_CLAIM_NAME_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL

        +
        public static final String SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL_DOC

        +
        public static final String SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_JWKS_ENDPOINT_URL

        +
        public static final String SASL_OAUTHBEARER_JWKS_ENDPOINT_URL
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_JWKS_ENDPOINT_URL_DOC

        +
        public static final String SASL_OAUTHBEARER_JWKS_ENDPOINT_URL_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS

        +
        public static final String SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS

        +
        public static final long DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS_DOC

        +
        public static final String SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS

        +
        public static final String SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS

        +
        public static final long DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS_DOC

        +
        public static final String SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS

        +
        public static final String SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS

        +
        public static final long DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS_DOC

        +
        public static final String SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS

        +
        public static final String SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS

        +
        public static final int DEFAULT_SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS_DOC

        +
        public static final String SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_EXPECTED_AUDIENCE

        +
        public static final String SASL_OAUTHBEARER_EXPECTED_AUDIENCE
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_EXPECTED_AUDIENCE_DOC

        +
        public static final String SASL_OAUTHBEARER_EXPECTED_AUDIENCE_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_EXPECTED_ISSUER

        +
        public static final String SASL_OAUTHBEARER_EXPECTED_ISSUER
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_EXPECTED_ISSUER_DOC

        +
        public static final String SASL_OAUTHBEARER_EXPECTED_ISSUER_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_HEADER_URLENCODE

        +
        public static final String SASL_OAUTHBEARER_HEADER_URLENCODE
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SASL_OAUTHBEARER_HEADER_URLENCODE

        +
        public static final boolean DEFAULT_SASL_OAUTHBEARER_HEADER_URLENCODE
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SASL_OAUTHBEARER_HEADER_URLENCODE_DOC

        +
        public static final String SASL_OAUTHBEARER_HEADER_URLENCODE_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SaslConfigs

        +
        public SaslConfigs()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        addClientSaslSupport

        +
        public static void addClientSaslSupport(ConfigDef config)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/SecurityConfig.html b/static/41/javadoc/org/apache/kafka/common/config/SecurityConfig.html new file mode 100644 index 000000000..3dcb7e734 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/SecurityConfig.html @@ -0,0 +1,182 @@ + + + + +SecurityConfig (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SecurityConfig

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.SecurityConfig
    +
    +
    +
    +
    public class SecurityConfig +extends Object
    +
    Contains the common security config for SSL and SASL
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        SECURITY_PROVIDERS_CONFIG

        +
        public static final String SECURITY_PROVIDERS_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SECURITY_PROVIDERS_DOC

        +
        public static final String SECURITY_PROVIDERS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SecurityConfig

        +
        public SecurityConfig()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/SslClientAuth.html b/static/41/javadoc/org/apache/kafka/common/config/SslClientAuth.html new file mode 100644 index 000000000..1d5258828 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/SslClientAuth.html @@ -0,0 +1,277 @@ + + + + +SslClientAuth (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class SslClientAuth

    +
    +
    java.lang.Object +
    java.lang.Enum<SslClientAuth> +
    org.apache.kafka.common.config.SslClientAuth
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<SslClientAuth>, Constable
    +
    +
    +
    public enum SslClientAuth +extends Enum<SslClientAuth>
    +
    Describes whether the server should require or request client authentication.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static SslClientAuth[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static SslClientAuth valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        forConfig

        +
        public static SslClientAuth forConfig(String key)
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Enum<SslClientAuth>
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/SslConfigs.html b/static/41/javadoc/org/apache/kafka/common/config/SslConfigs.html new file mode 100644 index 000000000..f7ec94c6e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/SslConfigs.html @@ -0,0 +1,945 @@ + + + + +SslConfigs (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SslConfigs

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.SslConfigs
    +
    +
    +
    +
    public class SslConfigs +extends Object
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        SSL_PROTOCOL_CONFIG

        +
        public static final String SSL_PROTOCOL_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_PROTOCOL_DOC

        +
        public static final String SSL_PROTOCOL_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SSL_PROTOCOL

        +
        public static final String DEFAULT_SSL_PROTOCOL
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_PROVIDER_CONFIG

        +
        public static final String SSL_PROVIDER_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_PROVIDER_DOC

        +
        public static final String SSL_PROVIDER_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_CIPHER_SUITES_CONFIG

        +
        public static final String SSL_CIPHER_SUITES_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_CIPHER_SUITES_DOC

        +
        public static final String SSL_CIPHER_SUITES_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_ENABLED_PROTOCOLS_CONFIG

        +
        public static final String SSL_ENABLED_PROTOCOLS_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_ENABLED_PROTOCOLS_DOC

        +
        public static final String SSL_ENABLED_PROTOCOLS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SSL_ENABLED_PROTOCOLS

        +
        public static final String DEFAULT_SSL_ENABLED_PROTOCOLS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_KEYSTORE_TYPE_CONFIG

        +
        public static final String SSL_KEYSTORE_TYPE_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_KEYSTORE_TYPE_DOC

        +
        public static final String SSL_KEYSTORE_TYPE_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SSL_KEYSTORE_TYPE

        +
        public static final String DEFAULT_SSL_KEYSTORE_TYPE
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_KEYSTORE_KEY_CONFIG

        +
        public static final String SSL_KEYSTORE_KEY_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_KEYSTORE_KEY_DOC

        +
        public static final String SSL_KEYSTORE_KEY_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_KEYSTORE_CERTIFICATE_CHAIN_CONFIG

        +
        public static final String SSL_KEYSTORE_CERTIFICATE_CHAIN_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_KEYSTORE_CERTIFICATE_CHAIN_DOC

        +
        public static final String SSL_KEYSTORE_CERTIFICATE_CHAIN_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_TRUSTSTORE_CERTIFICATES_CONFIG

        +
        public static final String SSL_TRUSTSTORE_CERTIFICATES_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_TRUSTSTORE_CERTIFICATES_DOC

        +
        public static final String SSL_TRUSTSTORE_CERTIFICATES_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_KEYSTORE_LOCATION_CONFIG

        +
        public static final String SSL_KEYSTORE_LOCATION_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_KEYSTORE_LOCATION_DOC

        +
        public static final String SSL_KEYSTORE_LOCATION_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_KEYSTORE_PASSWORD_CONFIG

        +
        public static final String SSL_KEYSTORE_PASSWORD_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_KEYSTORE_PASSWORD_DOC

        +
        public static final String SSL_KEYSTORE_PASSWORD_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_KEY_PASSWORD_CONFIG

        +
        public static final String SSL_KEY_PASSWORD_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_KEY_PASSWORD_DOC

        +
        public static final String SSL_KEY_PASSWORD_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_TRUSTSTORE_TYPE_CONFIG

        +
        public static final String SSL_TRUSTSTORE_TYPE_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_TRUSTSTORE_TYPE_DOC

        +
        public static final String SSL_TRUSTSTORE_TYPE_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SSL_TRUSTSTORE_TYPE

        +
        public static final String DEFAULT_SSL_TRUSTSTORE_TYPE
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_TRUSTSTORE_LOCATION_CONFIG

        +
        public static final String SSL_TRUSTSTORE_LOCATION_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_TRUSTSTORE_LOCATION_DOC

        +
        public static final String SSL_TRUSTSTORE_LOCATION_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_TRUSTSTORE_PASSWORD_CONFIG

        +
        public static final String SSL_TRUSTSTORE_PASSWORD_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_TRUSTSTORE_PASSWORD_DOC

        +
        public static final String SSL_TRUSTSTORE_PASSWORD_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_KEYMANAGER_ALGORITHM_CONFIG

        +
        public static final String SSL_KEYMANAGER_ALGORITHM_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_KEYMANAGER_ALGORITHM_DOC

        +
        public static final String SSL_KEYMANAGER_ALGORITHM_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SSL_KEYMANGER_ALGORITHM

        +
        public static final String DEFAULT_SSL_KEYMANGER_ALGORITHM
        +
        +
      • +
      • +
        +

        SSL_TRUSTMANAGER_ALGORITHM_CONFIG

        +
        public static final String SSL_TRUSTMANAGER_ALGORITHM_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_TRUSTMANAGER_ALGORITHM_DOC

        +
        public static final String SSL_TRUSTMANAGER_ALGORITHM_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SSL_TRUSTMANAGER_ALGORITHM

        +
        public static final String DEFAULT_SSL_TRUSTMANAGER_ALGORITHM
        +
        +
      • +
      • +
        +

        SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG

        +
        public static final String SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_DOC

        +
        public static final String SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM

        +
        public static final String DEFAULT_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_SECURE_RANDOM_IMPLEMENTATION_CONFIG

        +
        public static final String SSL_SECURE_RANDOM_IMPLEMENTATION_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_SECURE_RANDOM_IMPLEMENTATION_DOC

        +
        public static final String SSL_SECURE_RANDOM_IMPLEMENTATION_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_ENGINE_FACTORY_CLASS_CONFIG

        +
        public static final String SSL_ENGINE_FACTORY_CLASS_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SSL_ENGINE_FACTORY_CLASS_DOC

        +
        public static final String SSL_ENGINE_FACTORY_CLASS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        RECONFIGURABLE_CONFIGS

        +
        public static final Set<String> RECONFIGURABLE_CONFIGS
        +
        +
      • +
      • +
        +

        NON_RECONFIGURABLE_CONFIGS

        +
        public static final Set<String> NON_RECONFIGURABLE_CONFIGS
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SslConfigs

        +
        public SslConfigs()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        addClientSslSupport

        +
        public static void addClientSslSupport(ConfigDef config)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/TopicConfig.html b/static/41/javadoc/org/apache/kafka/common/config/TopicConfig.html new file mode 100644 index 000000000..1fbdf8cea --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/TopicConfig.html @@ -0,0 +1,1293 @@ + + + + +TopicConfig (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TopicConfig

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.TopicConfig
    +
    +
    +
    +
    public class TopicConfig +extends Object
    +

    Keys that can be used to configure a topic. These keys are useful when creating or reconfiguring a + topic using the AdminClient. + +

    The intended pattern is for broker configs to include a `log.` prefix. For example, to set the default broker + cleanup policy, one would set log.cleanup.policy instead of cleanup.policy. Unfortunately, there are many cases + where this pattern is not followed.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        SEGMENT_BYTES_CONFIG

        +
        public static final String SEGMENT_BYTES_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SEGMENT_BYTES_DOC

        +
        public static final String SEGMENT_BYTES_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SEGMENT_MS_CONFIG

        +
        public static final String SEGMENT_MS_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SEGMENT_MS_DOC

        +
        public static final String SEGMENT_MS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SEGMENT_JITTER_MS_CONFIG

        +
        public static final String SEGMENT_JITTER_MS_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SEGMENT_JITTER_MS_DOC

        +
        public static final String SEGMENT_JITTER_MS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SEGMENT_INDEX_BYTES_CONFIG

        +
        public static final String SEGMENT_INDEX_BYTES_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SEGMENT_INDEX_BYTES_DOC

        +
        public static final String SEGMENT_INDEX_BYTES_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        FLUSH_MESSAGES_INTERVAL_CONFIG

        +
        public static final String FLUSH_MESSAGES_INTERVAL_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        FLUSH_MESSAGES_INTERVAL_DOC

        +
        public static final String FLUSH_MESSAGES_INTERVAL_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        FLUSH_MS_CONFIG

        +
        public static final String FLUSH_MS_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        FLUSH_MS_DOC

        +
        public static final String FLUSH_MS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        RETENTION_BYTES_CONFIG

        +
        public static final String RETENTION_BYTES_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        RETENTION_BYTES_DOC

        +
        public static final String RETENTION_BYTES_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        RETENTION_MS_CONFIG

        +
        public static final String RETENTION_MS_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        RETENTION_MS_DOC

        +
        public static final String RETENTION_MS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        REMOTE_LOG_STORAGE_ENABLE_CONFIG

        +
        public static final String REMOTE_LOG_STORAGE_ENABLE_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        REMOTE_LOG_STORAGE_ENABLE_DOC

        +
        public static final String REMOTE_LOG_STORAGE_ENABLE_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        LOCAL_LOG_RETENTION_MS_CONFIG

        +
        public static final String LOCAL_LOG_RETENTION_MS_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        LOCAL_LOG_RETENTION_MS_DOC

        +
        public static final String LOCAL_LOG_RETENTION_MS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        LOCAL_LOG_RETENTION_BYTES_CONFIG

        +
        public static final String LOCAL_LOG_RETENTION_BYTES_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        LOCAL_LOG_RETENTION_BYTES_DOC

        +
        public static final String LOCAL_LOG_RETENTION_BYTES_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        REMOTE_LOG_COPY_DISABLE_CONFIG

        +
        public static final String REMOTE_LOG_COPY_DISABLE_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        REMOTE_LOG_COPY_DISABLE_DOC

        +
        public static final String REMOTE_LOG_COPY_DISABLE_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        REMOTE_LOG_DELETE_ON_DISABLE_CONFIG

        +
        public static final String REMOTE_LOG_DELETE_ON_DISABLE_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        REMOTE_LOG_DELETE_ON_DISABLE_DOC

        +
        public static final String REMOTE_LOG_DELETE_ON_DISABLE_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MAX_MESSAGE_BYTES_CONFIG

        +
        public static final String MAX_MESSAGE_BYTES_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MAX_MESSAGE_BYTES_DOC

        +
        public static final String MAX_MESSAGE_BYTES_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        INDEX_INTERVAL_BYTES_CONFIG

        +
        public static final String INDEX_INTERVAL_BYTES_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        INDEX_INTERVAL_BYTES_DOC

        +
        public static final String INDEX_INTERVAL_BYTES_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        FILE_DELETE_DELAY_MS_CONFIG

        +
        public static final String FILE_DELETE_DELAY_MS_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        FILE_DELETE_DELAY_MS_DOC

        +
        public static final String FILE_DELETE_DELAY_MS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DELETE_RETENTION_MS_CONFIG

        +
        public static final String DELETE_RETENTION_MS_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DELETE_RETENTION_MS_DOC

        +
        public static final String DELETE_RETENTION_MS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MIN_COMPACTION_LAG_MS_CONFIG

        +
        public static final String MIN_COMPACTION_LAG_MS_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MIN_COMPACTION_LAG_MS_DOC

        +
        public static final String MIN_COMPACTION_LAG_MS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MAX_COMPACTION_LAG_MS_CONFIG

        +
        public static final String MAX_COMPACTION_LAG_MS_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MAX_COMPACTION_LAG_MS_DOC

        +
        public static final String MAX_COMPACTION_LAG_MS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MIN_CLEANABLE_DIRTY_RATIO_CONFIG

        +
        public static final String MIN_CLEANABLE_DIRTY_RATIO_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MIN_CLEANABLE_DIRTY_RATIO_DOC

        +
        public static final String MIN_CLEANABLE_DIRTY_RATIO_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        CLEANUP_POLICY_CONFIG

        +
        public static final String CLEANUP_POLICY_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        CLEANUP_POLICY_COMPACT

        +
        public static final String CLEANUP_POLICY_COMPACT
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        CLEANUP_POLICY_DELETE

        +
        public static final String CLEANUP_POLICY_DELETE
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        CLEANUP_POLICY_DOC

        +
        public static final String CLEANUP_POLICY_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG

        +
        public static final String UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        UNCLEAN_LEADER_ELECTION_ENABLE_DOC

        +
        public static final String UNCLEAN_LEADER_ELECTION_ENABLE_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MIN_IN_SYNC_REPLICAS_CONFIG

        +
        public static final String MIN_IN_SYNC_REPLICAS_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MIN_IN_SYNC_REPLICAS_DOC

        +
        public static final String MIN_IN_SYNC_REPLICAS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        COMPRESSION_TYPE_CONFIG

        +
        public static final String COMPRESSION_TYPE_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        COMPRESSION_TYPE_DOC

        +
        public static final String COMPRESSION_TYPE_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        COMPRESSION_GZIP_LEVEL_CONFIG

        +
        public static final String COMPRESSION_GZIP_LEVEL_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        COMPRESSION_GZIP_LEVEL_DOC

        +
        public static final String COMPRESSION_GZIP_LEVEL_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        COMPRESSION_LZ4_LEVEL_CONFIG

        +
        public static final String COMPRESSION_LZ4_LEVEL_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        COMPRESSION_LZ4_LEVEL_DOC

        +
        public static final String COMPRESSION_LZ4_LEVEL_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        COMPRESSION_ZSTD_LEVEL_CONFIG

        +
        public static final String COMPRESSION_ZSTD_LEVEL_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        COMPRESSION_ZSTD_LEVEL_DOC

        +
        public static final String COMPRESSION_ZSTD_LEVEL_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        PREALLOCATE_CONFIG

        +
        public static final String PREALLOCATE_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        PREALLOCATE_DOC

        +
        public static final String PREALLOCATE_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MESSAGE_TIMESTAMP_TYPE_CONFIG

        +
        public static final String MESSAGE_TIMESTAMP_TYPE_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MESSAGE_TIMESTAMP_TYPE_DOC

        +
        public static final String MESSAGE_TIMESTAMP_TYPE_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG

        +
        public static final String MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MESSAGE_TIMESTAMP_BEFORE_MAX_MS_DOC

        +
        public static final String MESSAGE_TIMESTAMP_BEFORE_MAX_MS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG

        +
        public static final String MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MESSAGE_TIMESTAMP_AFTER_MAX_MS_DOC

        +
        public static final String MESSAGE_TIMESTAMP_AFTER_MAX_MS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MESSAGE_DOWNCONVERSION_ENABLE_CONFIG

        +
        @Deprecated +public static final String MESSAGE_DOWNCONVERSION_ENABLE_CONFIG
        +
        Deprecated. +
        down-conversion is not possible in Apache Kafka 4.0 and newer, hence this configuration is a no-op, + and it is deprecated for removal in Apache Kafka 5.0.
        +
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MESSAGE_DOWNCONVERSION_ENABLE_DOC

        +
        @Deprecated +public static final String MESSAGE_DOWNCONVERSION_ENABLE_DOC
        + +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TopicConfig

        +
        public TopicConfig()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/package-summary.html b/static/41/javadoc/org/apache/kafka/common/config/package-summary.html new file mode 100644 index 000000000..dd8519d3d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/package-summary.html @@ -0,0 +1,212 @@ + + + + +org.apache.kafka.common.config (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.common.config

    +
    +
    +
    package org.apache.kafka.common.config
    +
    +
    Provides common mechanisms for defining, parsing, validating, and documenting user-configurable parameters.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/package-tree.html b/static/41/javadoc/org/apache/kafka/common/config/package-tree.html new file mode 100644 index 000000000..2b9fee319 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/package-tree.html @@ -0,0 +1,137 @@ + + + + +org.apache.kafka.common.config Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.common.config

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/provider/ConfigProvider.html b/static/41/javadoc/org/apache/kafka/common/config/provider/ConfigProvider.html new file mode 100644 index 000000000..63eb9830c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/provider/ConfigProvider.html @@ -0,0 +1,238 @@ + + + + +ConfigProvider (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ConfigProvider

    +
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Closeable, Configurable
    +
    +
    +
    All Known Implementing Classes:
    +
    DirectoryConfigProvider, EnvVarConfigProvider, FileConfigProvider
    +
    +
    +
    public interface ConfigProvider +extends Configurable, Closeable
    +
    A provider of configuration data, which may optionally support subscriptions to configuration changes. +

    Implementations are required to safely support concurrent calls to any of the methods in this interface. +

    Kafka Connect discovers implementations of this interface using the Java ServiceLoader mechanism. + To support this, implementations of this interface should also contain a service provider configuration file in + META-INF/services/org.apache.kafka.common.config.provider.ConfigProvider.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        get

        +
        ConfigData get(String path)
        +
        Retrieves the data at the given path.
        +
        +
        Parameters:
        +
        path - the path where the data resides
        +
        Returns:
        +
        the configuration data
        +
        +
        +
      • +
      • +
        +

        get

        +
        ConfigData get(String path, + Set<String> keys)
        +
        Retrieves the data with the given keys at the given path.
        +
        +
        Parameters:
        +
        path - the path where the data resides
        +
        keys - the keys whose values will be retrieved
        +
        Returns:
        +
        the configuration data
        +
        +
        +
      • +
      • +
        +

        subscribe

        +
        default void subscribe(String path, + Set<String> keys, + ConfigChangeCallback callback)
        +
        Subscribes to changes for the given keys at the given path (optional operation).
        +
        +
        Parameters:
        +
        path - the path where the data resides
        +
        keys - the keys whose values will be retrieved
        +
        callback - the callback to invoke upon change
        +
        Throws:
        +
        UnsupportedOperationException - if the subscribe operation is not supported
        +
        +
        +
      • +
      • +
        +

        unsubscribe

        +
        default void unsubscribe(String path, + Set<String> keys, + ConfigChangeCallback callback)
        +
        Unsubscribes to changes for the given keys at the given path (optional operation).
        +
        +
        Parameters:
        +
        path - the path where the data resides
        +
        keys - the keys whose values will be retrieved
        +
        callback - the callback to be unsubscribed from changes
        +
        Throws:
        +
        UnsupportedOperationException - if the unsubscribe operation is not supported
        +
        +
        +
      • +
      • +
        +

        unsubscribeAll

        +
        default void unsubscribeAll()
        +
        Clears all subscribers (optional operation).
        +
        +
        Throws:
        +
        UnsupportedOperationException - if the unsubscribeAll operation is not supported
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/provider/DirectoryConfigProvider.html b/static/41/javadoc/org/apache/kafka/common/config/provider/DirectoryConfigProvider.html new file mode 100644 index 000000000..08e4845c0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/provider/DirectoryConfigProvider.html @@ -0,0 +1,295 @@ + + + + +DirectoryConfigProvider (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DirectoryConfigProvider

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.provider.DirectoryConfigProvider
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, ConfigProvider, Configurable
    +
    +
    +
    public class DirectoryConfigProvider +extends Object +implements ConfigProvider
    +
    An implementation of ConfigProvider based on a directory of files. + Property keys correspond to the names of the regular (i.e. non-directory) + files in a directory given by the path parameter. + Property values are taken from the file contents corresponding to each key.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DirectoryConfigProvider

        +
        public DirectoryConfigProvider()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs)
        +
        Description copied from interface: Configurable
        +
        Configure this class with the given key-value pairs
        +
        +
        Specified by:
        +
        configure in interface Configurable
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close() + throws IOException
        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        Throws:
        +
        IOException
        +
        +
        +
      • +
      • +
        +

        get

        +
        public ConfigData get(String path)
        +
        Retrieves the data contained in regular files in the directory given by path. + Non-regular files (such as directories) in the given directory are silently ignored.
        +
        +
        Specified by:
        +
        get in interface ConfigProvider
        +
        Parameters:
        +
        path - the directory where data files reside.
        +
        Returns:
        +
        the configuration data.
        +
        +
        +
      • +
      • +
        +

        get

        +
        public ConfigData get(String path, + Set<String> keys)
        +
        Retrieves the data contained in the regular files named by keys in the directory given by path. + Non-regular files (such as directories) in the given directory are silently ignored.
        +
        +
        Specified by:
        +
        get in interface ConfigProvider
        +
        Parameters:
        +
        path - the directory where data files reside.
        +
        keys - the keys whose values will be retrieved.
        +
        Returns:
        +
        the configuration data.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/provider/EnvVarConfigProvider.html b/static/41/javadoc/org/apache/kafka/common/config/provider/EnvVarConfigProvider.html new file mode 100644 index 000000000..7a54d005d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/provider/EnvVarConfigProvider.html @@ -0,0 +1,303 @@ + + + + +EnvVarConfigProvider (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class EnvVarConfigProvider

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.provider.EnvVarConfigProvider
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, ConfigProvider, Configurable
    +
    +
    +
    public class EnvVarConfigProvider +extends Object +implements ConfigProvider
    +
    An implementation of ConfigProvider based on environment variables. + Keys correspond to the names of the environment variables, paths are currently not being used. + Using an allowlist pattern ALLOWLIST_PATTERN_CONFIG that supports regular expressions, + it is possible to limit access to specific environment variables. Default allowlist pattern is ".*".
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        ALLOWLIST_PATTERN_CONFIG

        +
        public static final String ALLOWLIST_PATTERN_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        ALLOWLIST_PATTERN_CONFIG_DOC

        +
        public static final String ALLOWLIST_PATTERN_CONFIG_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        EnvVarConfigProvider

        +
        public EnvVarConfigProvider()
        +
        +
      • +
      • +
        +

        EnvVarConfigProvider

        +
        public EnvVarConfigProvider(Map<String,String> envVarsAsArgument)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs)
        +
        Description copied from interface: Configurable
        +
        Configure this class with the given key-value pairs
        +
        +
        Specified by:
        +
        configure in interface Configurable
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close() + throws IOException
        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        Throws:
        +
        IOException
        +
        +
        +
      • +
      • +
        +

        get

        +
        public ConfigData get(String path)
        +
        Description copied from interface: ConfigProvider
        +
        Retrieves the data at the given path.
        +
        +
        Specified by:
        +
        get in interface ConfigProvider
        +
        Parameters:
        +
        path - unused
        +
        Returns:
        +
        returns environment variables as configuration
        +
        +
        +
      • +
      • +
        +

        get

        +
        public ConfigData get(String path, + Set<String> keys)
        +
        Description copied from interface: ConfigProvider
        +
        Retrieves the data with the given keys at the given path.
        +
        +
        Specified by:
        +
        get in interface ConfigProvider
        +
        Parameters:
        +
        path - path, not used for environment variables
        +
        keys - the keys whose values will be retrieved.
        +
        Returns:
        +
        the configuration data.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/provider/FileConfigProvider.html b/static/41/javadoc/org/apache/kafka/common/config/provider/FileConfigProvider.html new file mode 100644 index 000000000..2bc7b22b9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/provider/FileConfigProvider.html @@ -0,0 +1,288 @@ + + + + +FileConfigProvider (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class FileConfigProvider

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.provider.FileConfigProvider
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, ConfigProvider, Configurable
    +
    +
    +
    public class FileConfigProvider +extends Object +implements ConfigProvider
    +
    An implementation of ConfigProvider that represents a Properties file. + All property keys and values are stored as cleartext.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        FileConfigProvider

        +
        public FileConfigProvider()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs)
        +
        Description copied from interface: Configurable
        +
        Configure this class with the given key-value pairs
        +
        +
        Specified by:
        +
        configure in interface Configurable
        +
        +
        +
      • +
      • +
        +

        get

        +
        public ConfigData get(String path)
        +
        Retrieves the data at the given Properties file.
        +
        +
        Specified by:
        +
        get in interface ConfigProvider
        +
        Parameters:
        +
        path - the file where the data resides
        +
        Returns:
        +
        the configuration data
        +
        +
        +
      • +
      • +
        +

        get

        +
        public ConfigData get(String path, + Set<String> keys)
        +
        Retrieves the data with the given keys at the given Properties file.
        +
        +
        Specified by:
        +
        get in interface ConfigProvider
        +
        Parameters:
        +
        path - the file where the data resides
        +
        keys - the keys whose values will be retrieved
        +
        Returns:
        +
        the configuration data
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close()
        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/provider/package-summary.html b/static/41/javadoc/org/apache/kafka/common/config/provider/package-summary.html new file mode 100644 index 000000000..edf4f630b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/provider/package-summary.html @@ -0,0 +1,118 @@ + + + + +org.apache.kafka.common.config.provider (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.common.config.provider

    +
    +
    +
    package org.apache.kafka.common.config.provider
    +
    +
    Provides a pluggable interface and some implementations for late-binding in configuration values.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/config/provider/package-tree.html b/static/41/javadoc/org/apache/kafka/common/config/provider/package-tree.html new file mode 100644 index 000000000..9118759dd --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/config/provider/package-tree.html @@ -0,0 +1,92 @@ + + + + +org.apache.kafka.common.config.provider Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.common.config.provider

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/ApiException.html b/static/41/javadoc/org/apache/kafka/common/errors/ApiException.html new file mode 100644 index 000000000..8998fcebd --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/ApiException.html @@ -0,0 +1,217 @@ + + + + +ApiException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ApiException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    Direct Known Subclasses:
    +
    ApplicationRecoverableException, BrokerIdNotRegisteredException, BrokerNotAvailableException, ControllerMovedException, DelegationTokenDisabledException, DelegationTokenExpiredException, DelegationTokenNotFoundException, DelegationTokenOwnerMismatchException, DuplicateBrokerRegistrationException, DuplicateResourceException, DuplicateSequenceException, DuplicateVoterException, FeatureUpdateFailedException, FencedMemberEpochException, FencedStateEpochException, GroupIdNotFoundException, GroupMaxSizeReachedException, GroupNotEmptyException, GroupSubscribedToTopicException, InconsistentClusterIdException, InconsistentGroupProtocolException, InconsistentVoterSetException, IneligibleReplicaException, InvalidCommitOffsetSizeException, InvalidConfigurationException, InvalidFetchSizeException, InvalidGroupIdException, InvalidOffsetException, InvalidPartitionsException, InvalidPrincipalTypeException, InvalidRecordStateException, InvalidRegistrationException, InvalidRegularExpression, InvalidReplicaAssignmentException, InvalidRequestException, InvalidSessionTimeoutException, InvalidTimestampException, InvalidTxnStateException, InvalidTxnTimeoutException, InvalidUpdateVersionException, InvalidVoterKeyException, LogDirNotFoundException, MemberIdRequiredException, MismatchedEndpointTypeException, NewLeaderElectedException, NoReassignmentInProgressException, OffsetMetadataTooLarge, OffsetMovedToTieredStorageException, OperationNotAttemptedException, OutOfOrderSequenceException, PartitionAssignorException, PolicyViolationException, PositionOutOfRangeException, PrincipalDeserializationException, ReassignmentInProgressException, RebalanceInProgressException, RebootstrapRequiredException, RecordTooLargeException, ResourceNotFoundException, RetriableException, SecurityDisabledException, SnapshotNotFoundException, StaleBrokerEpochException, StaleMemberEpochException, StreamsInvalidTopologyEpochException, StreamsInvalidTopologyException, StreamsTopologyFencedException, TelemetryTooLargeException, TopicDeletionDisabledException, TopicExistsException, TransactionAbortableException, TransactionAbortedException, TransactionalIdNotFoundException, TransactionCoordinatorFencedException, UnacceptableCredentialException, UnknownControllerIdException, UnknownServerException, UnknownSubscriptionIdException, UnreleasedInstanceIdException, UnsupportedAssignorException, UnsupportedByAuthenticationException, UnsupportedCompressionTypeException, UnsupportedEndpointTypeException, VoterNotFoundException
    +
    +
    +
    public class ApiException +extends KafkaException
    +
    Any API exception that is part of the public protocol and should be a subclass of this class and be part of this + package.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ApiException

        +
        public ApiException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        ApiException

        +
        public ApiException(String message)
        +
        +
      • +
      • +
        +

        ApiException

        +
        public ApiException(Throwable cause)
        +
        +
      • +
      • +
        +

        ApiException

        +
        public ApiException()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/ApplicationRecoverableException.html b/static/41/javadoc/org/apache/kafka/common/errors/ApplicationRecoverableException.html new file mode 100644 index 000000000..f75d2974e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/ApplicationRecoverableException.html @@ -0,0 +1,188 @@ + + + + +ApplicationRecoverableException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ApplicationRecoverableException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    Direct Known Subclasses:
    +
    FencedInstanceIdException, IllegalGenerationException, InvalidPidMappingException, InvalidProducerEpochException, ProducerFencedException, UnknownMemberIdException
    +
    +
    +
    public abstract class ApplicationRecoverableException +extends ApiException
    +
    Indicates that the error is fatal to the producer, and the application + needs to restart the producer after handling the error. Depending on the application, + different recovery strategies (e.g., re-balancing task, restoring from checkpoints) may be employed.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ApplicationRecoverableException

        +
        public ApplicationRecoverableException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        ApplicationRecoverableException

        +
        public ApplicationRecoverableException(String message)
        +
        +
      • +
      • +
        +

        ApplicationRecoverableException

        +
        public ApplicationRecoverableException(Throwable cause)
        +
        +
      • +
      • +
        +

        ApplicationRecoverableException

        +
        public ApplicationRecoverableException()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/AuthenticationException.html b/static/41/javadoc/org/apache/kafka/common/errors/AuthenticationException.html new file mode 100644 index 000000000..9f72d017c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/AuthenticationException.html @@ -0,0 +1,191 @@ + + + + +AuthenticationException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class AuthenticationException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    Direct Known Subclasses:
    +
    IllegalSaslStateException, SaslAuthenticationException, SslAuthenticationException, UnsupportedSaslMechanismException
    +
    +
    +
    public class AuthenticationException +extends InvalidConfigurationException
    +
    This exception indicates that SASL authentication has failed. + On authentication failure, clients abort the operation requested and raise one + of the subclasses of this exception: +
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        AuthenticationException

        +
        public AuthenticationException(String message)
        +
        +
      • +
      • +
        +

        AuthenticationException

        +
        public AuthenticationException(Throwable cause)
        +
        +
      • +
      • +
        +

        AuthenticationException

        +
        public AuthenticationException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/AuthorizationException.html b/static/41/javadoc/org/apache/kafka/common/errors/AuthorizationException.html new file mode 100644 index 000000000..a794b28c1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/AuthorizationException.html @@ -0,0 +1,171 @@ + + + + +AuthorizationException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class AuthorizationException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    Direct Known Subclasses:
    +
    ClusterAuthorizationException, DelegationTokenAuthorizationException, GroupAuthorizationException, TopicAuthorizationException, TransactionalIdAuthorizationException
    +
    +
    +
    public class AuthorizationException +extends InvalidConfigurationException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        AuthorizationException

        +
        public AuthorizationException(String message)
        +
        +
      • +
      • +
        +

        AuthorizationException

        +
        public AuthorizationException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/AuthorizerNotReadyException.html b/static/41/javadoc/org/apache/kafka/common/errors/AuthorizerNotReadyException.html new file mode 100644 index 000000000..ede3dca77 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/AuthorizerNotReadyException.html @@ -0,0 +1,158 @@ + + + + +AuthorizerNotReadyException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class AuthorizerNotReadyException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class AuthorizerNotReadyException +extends RetriableException
    +
    An exception that indicates that the authorizer is not ready to receive the request yet.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        AuthorizerNotReadyException

        +
        public AuthorizerNotReadyException()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/BrokerIdNotRegisteredException.html b/static/41/javadoc/org/apache/kafka/common/errors/BrokerIdNotRegisteredException.html new file mode 100644 index 000000000..70020011d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/BrokerIdNotRegisteredException.html @@ -0,0 +1,165 @@ + + + + +BrokerIdNotRegisteredException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class BrokerIdNotRegisteredException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class BrokerIdNotRegisteredException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        BrokerIdNotRegisteredException

        +
        public BrokerIdNotRegisteredException(String message)
        +
        +
      • +
      • +
        +

        BrokerIdNotRegisteredException

        +
        public BrokerIdNotRegisteredException(String message, + Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/BrokerNotAvailableException.html b/static/41/javadoc/org/apache/kafka/common/errors/BrokerNotAvailableException.html new file mode 100644 index 000000000..1decf8059 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/BrokerNotAvailableException.html @@ -0,0 +1,165 @@ + + + + +BrokerNotAvailableException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class BrokerNotAvailableException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class BrokerNotAvailableException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        BrokerNotAvailableException

        +
        public BrokerNotAvailableException(String message)
        +
        +
      • +
      • +
        +

        BrokerNotAvailableException

        +
        public BrokerNotAvailableException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/ClusterAuthorizationException.html b/static/41/javadoc/org/apache/kafka/common/errors/ClusterAuthorizationException.html new file mode 100644 index 000000000..0a852cc2c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/ClusterAuthorizationException.html @@ -0,0 +1,169 @@ + + + + +ClusterAuthorizationException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ClusterAuthorizationException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class ClusterAuthorizationException +extends AuthorizationException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ClusterAuthorizationException

        +
        public ClusterAuthorizationException(String message)
        +
        +
      • +
      • +
        +

        ClusterAuthorizationException

        +
        public ClusterAuthorizationException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/ConcurrentTransactionsException.html b/static/41/javadoc/org/apache/kafka/common/errors/ConcurrentTransactionsException.html new file mode 100644 index 000000000..327acd96f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/ConcurrentTransactionsException.html @@ -0,0 +1,157 @@ + + + + +ConcurrentTransactionsException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConcurrentTransactionsException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class ConcurrentTransactionsException +extends RetriableException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ConcurrentTransactionsException

        +
        public ConcurrentTransactionsException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/ControllerMovedException.html b/static/41/javadoc/org/apache/kafka/common/errors/ControllerMovedException.html new file mode 100644 index 000000000..9deb26901 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/ControllerMovedException.html @@ -0,0 +1,165 @@ + + + + +ControllerMovedException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ControllerMovedException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class ControllerMovedException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ControllerMovedException

        +
        public ControllerMovedException(String message)
        +
        +
      • +
      • +
        +

        ControllerMovedException

        +
        public ControllerMovedException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/CoordinatorLoadInProgressException.html b/static/41/javadoc/org/apache/kafka/common/errors/CoordinatorLoadInProgressException.html new file mode 100644 index 000000000..bcb6bb44c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/CoordinatorLoadInProgressException.html @@ -0,0 +1,173 @@ + + + + +CoordinatorLoadInProgressException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class CoordinatorLoadInProgressException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class CoordinatorLoadInProgressException +extends RetriableException
    +
    In the context of the group coordinator, the broker returns this error code for any coordinator request if + it is still loading the group metadata (e.g. after a leader change for that group metadata topic partition). + + In the context of the transactional coordinator, this error will be returned if there is a pending transactional + request with the same transactional id, or if the transaction cache is currently being populated from the transaction + log.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        CoordinatorLoadInProgressException

        +
        public CoordinatorLoadInProgressException(String message)
        +
        +
      • +
      • +
        +

        CoordinatorLoadInProgressException

        +
        public CoordinatorLoadInProgressException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/CoordinatorNotAvailableException.html b/static/41/javadoc/org/apache/kafka/common/errors/CoordinatorNotAvailableException.html new file mode 100644 index 000000000..7bc79c740 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/CoordinatorNotAvailableException.html @@ -0,0 +1,203 @@ + + + + +CoordinatorNotAvailableException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class CoordinatorNotAvailableException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class CoordinatorNotAvailableException +extends RefreshRetriableException
    +
    In the context of the group coordinator, the broker returns this error code for metadata or offset commit + requests if the group metadata topic has not been created yet. + + In the context of the transactional coordinator, this error will be returned if the underlying transactional log + is under replicated or if an append to the log times out.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        CoordinatorNotAvailableException

        +
        public CoordinatorNotAvailableException(String message)
        +
        +
      • +
      • +
        +

        CoordinatorNotAvailableException

        +
        public CoordinatorNotAvailableException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/CorruptRecordException.html b/static/41/javadoc/org/apache/kafka/common/errors/CorruptRecordException.html new file mode 100644 index 000000000..ca8cafb02 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/CorruptRecordException.html @@ -0,0 +1,185 @@ + + + + +CorruptRecordException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class CorruptRecordException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class CorruptRecordException +extends RetriableException
    +
    This exception indicates a record has failed its internal CRC check, this generally indicates network or disk + corruption.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        CorruptRecordException

        +
        public CorruptRecordException()
        +
        +
      • +
      • +
        +

        CorruptRecordException

        +
        public CorruptRecordException(String message)
        +
        +
      • +
      • +
        +

        CorruptRecordException

        +
        public CorruptRecordException(Throwable cause)
        +
        +
      • +
      • +
        +

        CorruptRecordException

        +
        public CorruptRecordException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/DelegationTokenAuthorizationException.html b/static/41/javadoc/org/apache/kafka/common/errors/DelegationTokenAuthorizationException.html new file mode 100644 index 000000000..2fa59dc07 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/DelegationTokenAuthorizationException.html @@ -0,0 +1,169 @@ + + + + +DelegationTokenAuthorizationException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DelegationTokenAuthorizationException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class DelegationTokenAuthorizationException +extends AuthorizationException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DelegationTokenAuthorizationException

        +
        public DelegationTokenAuthorizationException(String message)
        +
        +
      • +
      • +
        +

        DelegationTokenAuthorizationException

        +
        public DelegationTokenAuthorizationException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/DelegationTokenDisabledException.html b/static/41/javadoc/org/apache/kafka/common/errors/DelegationTokenDisabledException.html new file mode 100644 index 000000000..5f677561c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/DelegationTokenDisabledException.html @@ -0,0 +1,165 @@ + + + + +DelegationTokenDisabledException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DelegationTokenDisabledException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class DelegationTokenDisabledException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DelegationTokenDisabledException

        +
        public DelegationTokenDisabledException(String message)
        +
        +
      • +
      • +
        +

        DelegationTokenDisabledException

        +
        public DelegationTokenDisabledException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/DelegationTokenExpiredException.html b/static/41/javadoc/org/apache/kafka/common/errors/DelegationTokenExpiredException.html new file mode 100644 index 000000000..a5a34e0fb --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/DelegationTokenExpiredException.html @@ -0,0 +1,165 @@ + + + + +DelegationTokenExpiredException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DelegationTokenExpiredException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class DelegationTokenExpiredException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DelegationTokenExpiredException

        +
        public DelegationTokenExpiredException(String message)
        +
        +
      • +
      • +
        +

        DelegationTokenExpiredException

        +
        public DelegationTokenExpiredException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/DelegationTokenNotFoundException.html b/static/41/javadoc/org/apache/kafka/common/errors/DelegationTokenNotFoundException.html new file mode 100644 index 000000000..b0a678249 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/DelegationTokenNotFoundException.html @@ -0,0 +1,165 @@ + + + + +DelegationTokenNotFoundException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DelegationTokenNotFoundException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class DelegationTokenNotFoundException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DelegationTokenNotFoundException

        +
        public DelegationTokenNotFoundException(String message)
        +
        +
      • +
      • +
        +

        DelegationTokenNotFoundException

        +
        public DelegationTokenNotFoundException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/DelegationTokenOwnerMismatchException.html b/static/41/javadoc/org/apache/kafka/common/errors/DelegationTokenOwnerMismatchException.html new file mode 100644 index 000000000..299004999 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/DelegationTokenOwnerMismatchException.html @@ -0,0 +1,165 @@ + + + + +DelegationTokenOwnerMismatchException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DelegationTokenOwnerMismatchException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class DelegationTokenOwnerMismatchException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DelegationTokenOwnerMismatchException

        +
        public DelegationTokenOwnerMismatchException(String message)
        +
        +
      • +
      • +
        +

        DelegationTokenOwnerMismatchException

        +
        public DelegationTokenOwnerMismatchException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/DisconnectException.html b/static/41/javadoc/org/apache/kafka/common/errors/DisconnectException.html new file mode 100644 index 000000000..5c7745467 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/DisconnectException.html @@ -0,0 +1,213 @@ + + + + +DisconnectException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DisconnectException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class DisconnectException +extends RetriableException
    +
    Server disconnected before a request could be completed.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DisconnectException

        +
        public DisconnectException()
        +
        +
      • +
      • +
        +

        DisconnectException

        +
        public DisconnectException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        DisconnectException

        +
        public DisconnectException(String message)
        +
        +
      • +
      • +
        +

        DisconnectException

        +
        public DisconnectException(Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/DuplicateBrokerRegistrationException.html b/static/41/javadoc/org/apache/kafka/common/errors/DuplicateBrokerRegistrationException.html new file mode 100644 index 000000000..7cfee4d12 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/DuplicateBrokerRegistrationException.html @@ -0,0 +1,165 @@ + + + + +DuplicateBrokerRegistrationException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DuplicateBrokerRegistrationException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class DuplicateBrokerRegistrationException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DuplicateBrokerRegistrationException

        +
        public DuplicateBrokerRegistrationException(String message)
        +
        +
      • +
      • +
        +

        DuplicateBrokerRegistrationException

        +
        public DuplicateBrokerRegistrationException(String message, + Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/DuplicateResourceException.html b/static/41/javadoc/org/apache/kafka/common/errors/DuplicateResourceException.html new file mode 100644 index 000000000..cfd12314f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/DuplicateResourceException.html @@ -0,0 +1,253 @@ + + + + +DuplicateResourceException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DuplicateResourceException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class DuplicateResourceException +extends ApiException
    +
    Exception thrown due to a request that illegally refers to the same resource twice + (for example, trying to both create and delete the same SCRAM credential for a particular user in a single request).
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DuplicateResourceException

        +
        public DuplicateResourceException(String message)
        +
        Constructor
        +
        +
        Parameters:
        +
        message - the exception's message
        +
        +
        +
      • +
      • +
        +

        DuplicateResourceException

        +
        public DuplicateResourceException(String message, + Throwable cause)
        +
        +
        Parameters:
        +
        message - the exception's message
        +
        cause - the exception's cause
        +
        +
        +
      • +
      • +
        +

        DuplicateResourceException

        +
        public DuplicateResourceException(String resource, + String message)
        +
        Constructor
        +
        +
        Parameters:
        +
        resource - the (potentially null) resource that was referred to twice
        +
        message - the exception's message
        +
        +
        +
      • +
      • +
        +

        DuplicateResourceException

        +
        public DuplicateResourceException(String resource, + String message, + Throwable cause)
        +
        Constructor
        +
        +
        Parameters:
        +
        resource - the (potentially null) resource that was referred to twice
        +
        message - the exception's message
        +
        cause - the exception's cause
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        resource

        +
        public String resource()
        +
        +
        Returns:
        +
        the (potentially null) resource that was referred to twice
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/DuplicateSequenceException.html b/static/41/javadoc/org/apache/kafka/common/errors/DuplicateSequenceException.html new file mode 100644 index 000000000..23bcba0b0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/DuplicateSequenceException.html @@ -0,0 +1,155 @@ + + + + +DuplicateSequenceException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DuplicateSequenceException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class DuplicateSequenceException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DuplicateSequenceException

        +
        public DuplicateSequenceException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/DuplicateVoterException.html b/static/41/javadoc/org/apache/kafka/common/errors/DuplicateVoterException.html new file mode 100644 index 000000000..79d4a47bd --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/DuplicateVoterException.html @@ -0,0 +1,165 @@ + + + + +DuplicateVoterException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DuplicateVoterException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class DuplicateVoterException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DuplicateVoterException

        +
        public DuplicateVoterException(String message)
        +
        +
      • +
      • +
        +

        DuplicateVoterException

        +
        public DuplicateVoterException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/ElectionNotNeededException.html b/static/41/javadoc/org/apache/kafka/common/errors/ElectionNotNeededException.html new file mode 100644 index 000000000..004bba275 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/ElectionNotNeededException.html @@ -0,0 +1,171 @@ + + + + +ElectionNotNeededException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ElectionNotNeededException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class ElectionNotNeededException +extends InvalidMetadataException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ElectionNotNeededException

        +
        public ElectionNotNeededException(String message)
        +
        +
      • +
      • +
        +

        ElectionNotNeededException

        +
        public ElectionNotNeededException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/EligibleLeadersNotAvailableException.html b/static/41/javadoc/org/apache/kafka/common/errors/EligibleLeadersNotAvailableException.html new file mode 100644 index 000000000..cc33428da --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/EligibleLeadersNotAvailableException.html @@ -0,0 +1,171 @@ + + + + +EligibleLeadersNotAvailableException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class EligibleLeadersNotAvailableException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class EligibleLeadersNotAvailableException +extends InvalidMetadataException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        EligibleLeadersNotAvailableException

        +
        public EligibleLeadersNotAvailableException(String message)
        +
        +
      • +
      • +
        +

        EligibleLeadersNotAvailableException

        +
        public EligibleLeadersNotAvailableException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/FeatureUpdateFailedException.html b/static/41/javadoc/org/apache/kafka/common/errors/FeatureUpdateFailedException.html new file mode 100644 index 000000000..06a67cb02 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/FeatureUpdateFailedException.html @@ -0,0 +1,165 @@ + + + + +FeatureUpdateFailedException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class FeatureUpdateFailedException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class FeatureUpdateFailedException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        FeatureUpdateFailedException

        +
        public FeatureUpdateFailedException(String message)
        +
        +
      • +
      • +
        +

        FeatureUpdateFailedException

        +
        public FeatureUpdateFailedException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/FencedInstanceIdException.html b/static/41/javadoc/org/apache/kafka/common/errors/FencedInstanceIdException.html new file mode 100644 index 000000000..873e022f6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/FencedInstanceIdException.html @@ -0,0 +1,167 @@ + + + + +FencedInstanceIdException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class FencedInstanceIdException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class FencedInstanceIdException +extends ApplicationRecoverableException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        FencedInstanceIdException

        +
        public FencedInstanceIdException(String message)
        +
        +
      • +
      • +
        +

        FencedInstanceIdException

        +
        public FencedInstanceIdException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/FencedLeaderEpochException.html b/static/41/javadoc/org/apache/kafka/common/errors/FencedLeaderEpochException.html new file mode 100644 index 000000000..61001a3c0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/FencedLeaderEpochException.html @@ -0,0 +1,174 @@ + + + + +FencedLeaderEpochException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class FencedLeaderEpochException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class FencedLeaderEpochException +extends InvalidMetadataException
    +
    The request contained a leader epoch which is smaller than that on the broker that received the + request. This can happen when an operation is attempted before a pending metadata update has been + received. Clients will typically refresh metadata before retrying.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        FencedLeaderEpochException

        +
        public FencedLeaderEpochException(String message)
        +
        +
      • +
      • +
        +

        FencedLeaderEpochException

        +
        public FencedLeaderEpochException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/FencedMemberEpochException.html b/static/41/javadoc/org/apache/kafka/common/errors/FencedMemberEpochException.html new file mode 100644 index 000000000..0db86cf33 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/FencedMemberEpochException.html @@ -0,0 +1,155 @@ + + + + +FencedMemberEpochException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class FencedMemberEpochException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class FencedMemberEpochException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        FencedMemberEpochException

        +
        public FencedMemberEpochException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/FencedStateEpochException.html b/static/41/javadoc/org/apache/kafka/common/errors/FencedStateEpochException.html new file mode 100644 index 000000000..635cf0417 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/FencedStateEpochException.html @@ -0,0 +1,156 @@ + + + + +FencedStateEpochException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class FencedStateEpochException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class FencedStateEpochException +extends ApiException
    +
    Thrown when the share coordinator rejected the request because the share-group state epoch did not match.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        FencedStateEpochException

        +
        public FencedStateEpochException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/FetchSessionIdNotFoundException.html b/static/41/javadoc/org/apache/kafka/common/errors/FetchSessionIdNotFoundException.html new file mode 100644 index 000000000..bea4876a7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/FetchSessionIdNotFoundException.html @@ -0,0 +1,165 @@ + + + + +FetchSessionIdNotFoundException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class FetchSessionIdNotFoundException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class FetchSessionIdNotFoundException +extends RetriableException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        FetchSessionIdNotFoundException

        +
        public FetchSessionIdNotFoundException()
        +
        +
      • +
      • +
        +

        FetchSessionIdNotFoundException

        +
        public FetchSessionIdNotFoundException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/FetchSessionTopicIdException.html b/static/41/javadoc/org/apache/kafka/common/errors/FetchSessionTopicIdException.html new file mode 100644 index 000000000..7b2b542f3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/FetchSessionTopicIdException.html @@ -0,0 +1,157 @@ + + + + +FetchSessionTopicIdException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class FetchSessionTopicIdException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class FetchSessionTopicIdException +extends RetriableException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        FetchSessionTopicIdException

        +
        public FetchSessionTopicIdException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/GroupAuthorizationException.html b/static/41/javadoc/org/apache/kafka/common/errors/GroupAuthorizationException.html new file mode 100644 index 000000000..5e9c237ed --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/GroupAuthorizationException.html @@ -0,0 +1,217 @@ + + + + +GroupAuthorizationException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class GroupAuthorizationException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class GroupAuthorizationException +extends AuthorizationException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        GroupAuthorizationException

        +
        public GroupAuthorizationException(String message, + String groupId)
        +
        +
      • +
      • +
        +

        GroupAuthorizationException

        +
        public GroupAuthorizationException(String message)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        groupId

        +
        public String groupId()
        +
        Return the group ID that failed authorization. May be null if it is not known + in the context the exception was raised in.
        +
        +
        Returns:
        +
        nullable groupId
        +
        +
        +
      • +
      • +
        +

        forGroupId

        +
        public static GroupAuthorizationException forGroupId(String groupId)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/GroupIdNotFoundException.html b/static/41/javadoc/org/apache/kafka/common/errors/GroupIdNotFoundException.html new file mode 100644 index 000000000..5a285cc07 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/GroupIdNotFoundException.html @@ -0,0 +1,155 @@ + + + + +GroupIdNotFoundException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class GroupIdNotFoundException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class GroupIdNotFoundException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        GroupIdNotFoundException

        +
        public GroupIdNotFoundException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/GroupMaxSizeReachedException.html b/static/41/javadoc/org/apache/kafka/common/errors/GroupMaxSizeReachedException.html new file mode 100644 index 000000000..c7d6b8fdb --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/GroupMaxSizeReachedException.html @@ -0,0 +1,156 @@ + + + + +GroupMaxSizeReachedException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class GroupMaxSizeReachedException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class GroupMaxSizeReachedException +extends ApiException
    +
    Indicates that a group is already at its configured maximum capacity and cannot accommodate more members
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        GroupMaxSizeReachedException

        +
        public GroupMaxSizeReachedException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/GroupNotEmptyException.html b/static/41/javadoc/org/apache/kafka/common/errors/GroupNotEmptyException.html new file mode 100644 index 000000000..3fe58c946 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/GroupNotEmptyException.html @@ -0,0 +1,155 @@ + + + + +GroupNotEmptyException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class GroupNotEmptyException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class GroupNotEmptyException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        GroupNotEmptyException

        +
        public GroupNotEmptyException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/GroupSubscribedToTopicException.html b/static/41/javadoc/org/apache/kafka/common/errors/GroupSubscribedToTopicException.html new file mode 100644 index 000000000..f7d89d81a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/GroupSubscribedToTopicException.html @@ -0,0 +1,155 @@ + + + + +GroupSubscribedToTopicException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class GroupSubscribedToTopicException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class GroupSubscribedToTopicException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        GroupSubscribedToTopicException

        +
        public GroupSubscribedToTopicException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/IllegalGenerationException.html b/static/41/javadoc/org/apache/kafka/common/errors/IllegalGenerationException.html new file mode 100644 index 000000000..2424abca4 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/IllegalGenerationException.html @@ -0,0 +1,183 @@ + + + + +IllegalGenerationException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class IllegalGenerationException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class IllegalGenerationException +extends ApplicationRecoverableException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        IllegalGenerationException

        +
        public IllegalGenerationException()
        +
        +
      • +
      • +
        +

        IllegalGenerationException

        +
        public IllegalGenerationException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        IllegalGenerationException

        +
        public IllegalGenerationException(String message)
        +
        +
      • +
      • +
        +

        IllegalGenerationException

        +
        public IllegalGenerationException(Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/IllegalSaslStateException.html b/static/41/javadoc/org/apache/kafka/common/errors/IllegalSaslStateException.html new file mode 100644 index 000000000..6511a3ede --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/IllegalSaslStateException.html @@ -0,0 +1,172 @@ + + + + +IllegalSaslStateException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class IllegalSaslStateException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class IllegalSaslStateException +extends AuthenticationException
    +
    This exception indicates unexpected requests prior to SASL authentication. + This could be due to misconfigured security, e.g. if PLAINTEXT protocol + is used to connect to a SASL endpoint.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        IllegalSaslStateException

        +
        public IllegalSaslStateException(String message)
        +
        +
      • +
      • +
        +

        IllegalSaslStateException

        +
        public IllegalSaslStateException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InconsistentClusterIdException.html b/static/41/javadoc/org/apache/kafka/common/errors/InconsistentClusterIdException.html new file mode 100644 index 000000000..753f56caa --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InconsistentClusterIdException.html @@ -0,0 +1,165 @@ + + + + +InconsistentClusterIdException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InconsistentClusterIdException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InconsistentClusterIdException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InconsistentClusterIdException

        +
        public InconsistentClusterIdException(String message)
        +
        +
      • +
      • +
        +

        InconsistentClusterIdException

        +
        public InconsistentClusterIdException(String message, + Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InconsistentGroupProtocolException.html b/static/41/javadoc/org/apache/kafka/common/errors/InconsistentGroupProtocolException.html new file mode 100644 index 000000000..b376cce1f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InconsistentGroupProtocolException.html @@ -0,0 +1,165 @@ + + + + +InconsistentGroupProtocolException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InconsistentGroupProtocolException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InconsistentGroupProtocolException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InconsistentGroupProtocolException

        +
        public InconsistentGroupProtocolException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        InconsistentGroupProtocolException

        +
        public InconsistentGroupProtocolException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InconsistentTopicIdException.html b/static/41/javadoc/org/apache/kafka/common/errors/InconsistentTopicIdException.html new file mode 100644 index 000000000..81dec2184 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InconsistentTopicIdException.html @@ -0,0 +1,161 @@ + + + + +InconsistentTopicIdException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InconsistentTopicIdException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InconsistentTopicIdException +extends InvalidMetadataException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InconsistentTopicIdException

        +
        public InconsistentTopicIdException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InconsistentVoterSetException.html b/static/41/javadoc/org/apache/kafka/common/errors/InconsistentVoterSetException.html new file mode 100644 index 000000000..e6202b420 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InconsistentVoterSetException.html @@ -0,0 +1,165 @@ + + + + +InconsistentVoterSetException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InconsistentVoterSetException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InconsistentVoterSetException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InconsistentVoterSetException

        +
        public InconsistentVoterSetException(String s)
        +
        +
      • +
      • +
        +

        InconsistentVoterSetException

        +
        public InconsistentVoterSetException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/IneligibleReplicaException.html b/static/41/javadoc/org/apache/kafka/common/errors/IneligibleReplicaException.html new file mode 100644 index 000000000..c654b7edd --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/IneligibleReplicaException.html @@ -0,0 +1,155 @@ + + + + +IneligibleReplicaException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class IneligibleReplicaException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class IneligibleReplicaException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        IneligibleReplicaException

        +
        public IneligibleReplicaException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InterruptException.html b/static/41/javadoc/org/apache/kafka/common/errors/InterruptException.html new file mode 100644 index 000000000..74f1e5386 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InterruptException.html @@ -0,0 +1,169 @@ + + + + +InterruptException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InterruptException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InterruptException +extends KafkaException
    +
    An unchecked wrapper for InterruptedException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InterruptException

        +
        public InterruptException(InterruptedException cause)
        +
        +
      • +
      • +
        +

        InterruptException

        +
        public InterruptException(String message, + InterruptedException cause)
        +
        +
      • +
      • +
        +

        InterruptException

        +
        public InterruptException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidCommitOffsetSizeException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidCommitOffsetSizeException.html new file mode 100644 index 000000000..25cf6934b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidCommitOffsetSizeException.html @@ -0,0 +1,165 @@ + + + + +InvalidCommitOffsetSizeException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidCommitOffsetSizeException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidCommitOffsetSizeException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidCommitOffsetSizeException

        +
        public InvalidCommitOffsetSizeException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        InvalidCommitOffsetSizeException

        +
        public InvalidCommitOffsetSizeException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidConfigurationException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidConfigurationException.html new file mode 100644 index 000000000..dbf9870c1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidConfigurationException.html @@ -0,0 +1,185 @@ + + + + +InvalidConfigurationException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidConfigurationException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    Direct Known Subclasses:
    +
    AuthenticationException, AuthorizationException, InvalidRecordException, InvalidReplicationFactorException, InvalidRequiredAcksException, InvalidTopicException, RecordBatchTooLargeException, UnsupportedForMessageFormatException, UnsupportedVersionException
    +
    +
    +
    public class InvalidConfigurationException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidConfigurationException

        +
        public InvalidConfigurationException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        InvalidConfigurationException

        +
        public InvalidConfigurationException(String message)
        +
        +
      • +
      • +
        +

        InvalidConfigurationException

        +
        public InvalidConfigurationException(Throwable cause)
        +
        +
      • +
      • +
        +

        InvalidConfigurationException

        +
        public InvalidConfigurationException()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidFetchSessionEpochException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidFetchSessionEpochException.html new file mode 100644 index 000000000..98512621a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidFetchSessionEpochException.html @@ -0,0 +1,165 @@ + + + + +InvalidFetchSessionEpochException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidFetchSessionEpochException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidFetchSessionEpochException +extends RetriableException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidFetchSessionEpochException

        +
        public InvalidFetchSessionEpochException()
        +
        +
      • +
      • +
        +

        InvalidFetchSessionEpochException

        +
        public InvalidFetchSessionEpochException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidFetchSizeException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidFetchSizeException.html new file mode 100644 index 000000000..bc83bf4d3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidFetchSizeException.html @@ -0,0 +1,165 @@ + + + + +InvalidFetchSizeException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidFetchSizeException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidFetchSizeException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidFetchSizeException

        +
        public InvalidFetchSizeException(String message)
        +
        +
      • +
      • +
        +

        InvalidFetchSizeException

        +
        public InvalidFetchSizeException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidGroupIdException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidGroupIdException.html new file mode 100644 index 000000000..083939c40 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidGroupIdException.html @@ -0,0 +1,165 @@ + + + + +InvalidGroupIdException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidGroupIdException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidGroupIdException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidGroupIdException

        +
        public InvalidGroupIdException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        InvalidGroupIdException

        +
        public InvalidGroupIdException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidMetadataException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidMetadataException.html new file mode 100644 index 000000000..3d4d31cc1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidMetadataException.html @@ -0,0 +1,133 @@ + + + + +InvalidMetadataException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidMetadataException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    Direct Known Subclasses:
    +
    ElectionNotNeededException, EligibleLeadersNotAvailableException, FencedLeaderEpochException, InconsistentTopicIdException, KafkaStorageException, LeaderNotAvailableException, ListenerNotFoundException, NetworkException, NotLeaderOrFollowerException, PreferredLeaderNotAvailableException, ReplicaNotAvailableException, UnknownTopicIdException, UnknownTopicOrPartitionException
    +
    +
    +
    public abstract class InvalidMetadataException +extends RefreshRetriableException
    +
    An exception that may indicate the client's metadata is out of date
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidOffsetException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidOffsetException.html new file mode 100644 index 000000000..7d2bd9a0b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidOffsetException.html @@ -0,0 +1,172 @@ + + + + +InvalidOffsetException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidOffsetException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    Direct Known Subclasses:
    +
    OffsetOutOfRangeException
    +
    +
    +
    public class InvalidOffsetException +extends ApiException
    +
    Thrown when the offset for a set of partitions is invalid (either undefined or out of range), + and no reset policy has been configured.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidOffsetException

        +
        public InvalidOffsetException(String message)
        +
        +
      • +
      • +
        +

        InvalidOffsetException

        +
        public InvalidOffsetException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidPartitionsException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidPartitionsException.html new file mode 100644 index 000000000..0ab2eed73 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidPartitionsException.html @@ -0,0 +1,165 @@ + + + + +InvalidPartitionsException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidPartitionsException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidPartitionsException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidPartitionsException

        +
        public InvalidPartitionsException(String message)
        +
        +
      • +
      • +
        +

        InvalidPartitionsException

        +
        public InvalidPartitionsException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidPidMappingException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidPidMappingException.html new file mode 100644 index 000000000..c63ebbce8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidPidMappingException.html @@ -0,0 +1,157 @@ + + + + +InvalidPidMappingException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidPidMappingException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidPidMappingException +extends ApplicationRecoverableException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidPidMappingException

        +
        public InvalidPidMappingException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidPrincipalTypeException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidPrincipalTypeException.html new file mode 100644 index 000000000..37e8471cd --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidPrincipalTypeException.html @@ -0,0 +1,165 @@ + + + + +InvalidPrincipalTypeException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidPrincipalTypeException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidPrincipalTypeException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidPrincipalTypeException

        +
        public InvalidPrincipalTypeException(String message)
        +
        +
      • +
      • +
        +

        InvalidPrincipalTypeException

        +
        public InvalidPrincipalTypeException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidProducerEpochException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidProducerEpochException.html new file mode 100644 index 000000000..3b7ba03bc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidProducerEpochException.html @@ -0,0 +1,161 @@ + + + + +InvalidProducerEpochException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidProducerEpochException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidProducerEpochException +extends ApplicationRecoverableException
    +
    This exception indicates that the produce request sent to the partition leader + contains a non-matching producer epoch. When encountering this exception, user should abort the ongoing transaction + by calling KafkaProducer#abortTransaction which would try to send initPidRequest and reinitialize the producer + under the hood.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidProducerEpochException

        +
        public InvalidProducerEpochException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidRecordStateException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidRecordStateException.html new file mode 100644 index 000000000..b8ef37974 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidRecordStateException.html @@ -0,0 +1,157 @@ + + + + +InvalidRecordStateException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidRecordStateException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidRecordStateException +extends ApiException
    +
    Thrown when the acknowledgement of delivery of a record could not be completed because the record + state is invalid.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidRecordStateException

        +
        public InvalidRecordStateException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidRegistrationException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidRegistrationException.html new file mode 100644 index 000000000..cded62768 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidRegistrationException.html @@ -0,0 +1,156 @@ + + + + +InvalidRegistrationException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidRegistrationException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidRegistrationException +extends ApiException
    +
    Thrown when a broker registration request is considered invalid by the controller.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidRegistrationException

        +
        public InvalidRegistrationException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidRegularExpression.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidRegularExpression.html new file mode 100644 index 000000000..42571401c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidRegularExpression.html @@ -0,0 +1,156 @@ + + + + +InvalidRegularExpression (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidRegularExpression

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidRegularExpression +extends ApiException
    +
    Thrown when a regular expression received in a request is not valid.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidRegularExpression

        +
        public InvalidRegularExpression(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidReplicaAssignmentException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidReplicaAssignmentException.html new file mode 100644 index 000000000..d5e468e04 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidReplicaAssignmentException.html @@ -0,0 +1,165 @@ + + + + +InvalidReplicaAssignmentException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidReplicaAssignmentException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidReplicaAssignmentException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidReplicaAssignmentException

        +
        public InvalidReplicaAssignmentException(String message)
        +
        +
      • +
      • +
        +

        InvalidReplicaAssignmentException

        +
        public InvalidReplicaAssignmentException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidReplicationFactorException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidReplicationFactorException.html new file mode 100644 index 000000000..ebb499e42 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidReplicationFactorException.html @@ -0,0 +1,167 @@ + + + + +InvalidReplicationFactorException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidReplicationFactorException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidReplicationFactorException +extends InvalidConfigurationException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidReplicationFactorException

        +
        public InvalidReplicationFactorException(String message)
        +
        +
      • +
      • +
        +

        InvalidReplicationFactorException

        +
        public InvalidReplicationFactorException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidRequestException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidRequestException.html new file mode 100644 index 000000000..8add1914e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidRequestException.html @@ -0,0 +1,168 @@ + + + + +InvalidRequestException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidRequestException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidRequestException +extends ApiException
    +
    Thrown when a request breaks basic wire protocol rules. + This most likely occurs because of a request being malformed by the client library or + the message was sent to an incompatible broker.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidRequestException

        +
        public InvalidRequestException(String message)
        +
        +
      • +
      • +
        +

        InvalidRequestException

        +
        public InvalidRequestException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidRequiredAcksException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidRequiredAcksException.html new file mode 100644 index 000000000..e213f9225 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidRequiredAcksException.html @@ -0,0 +1,157 @@ + + + + +InvalidRequiredAcksException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidRequiredAcksException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidRequiredAcksException +extends InvalidConfigurationException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidRequiredAcksException

        +
        public InvalidRequiredAcksException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidSessionTimeoutException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidSessionTimeoutException.html new file mode 100644 index 000000000..5d2d49834 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidSessionTimeoutException.html @@ -0,0 +1,165 @@ + + + + +InvalidSessionTimeoutException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidSessionTimeoutException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidSessionTimeoutException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidSessionTimeoutException

        +
        public InvalidSessionTimeoutException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        InvalidSessionTimeoutException

        +
        public InvalidSessionTimeoutException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidShareSessionEpochException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidShareSessionEpochException.html new file mode 100644 index 000000000..89831a02f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidShareSessionEpochException.html @@ -0,0 +1,158 @@ + + + + +InvalidShareSessionEpochException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidShareSessionEpochException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidShareSessionEpochException +extends RetriableException
    +
    Thrown when the share session epoch is invalid.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidShareSessionEpochException

        +
        public InvalidShareSessionEpochException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidTimestampException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidTimestampException.html new file mode 100644 index 000000000..37b550bce --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidTimestampException.html @@ -0,0 +1,166 @@ + + + + +InvalidTimestampException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidTimestampException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidTimestampException +extends ApiException
    +
    Indicate the timestamp of a record is invalid.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidTimestampException

        +
        public InvalidTimestampException(String message)
        +
        +
      • +
      • +
        +

        InvalidTimestampException

        +
        public InvalidTimestampException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidTopicException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidTopicException.html new file mode 100644 index 000000000..5bb861b96 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidTopicException.html @@ -0,0 +1,236 @@ + + + + +InvalidTopicException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidTopicException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidTopicException +extends InvalidConfigurationException
    +
    The client has attempted to perform an operation on an invalid topic. + For example the topic name is too long, contains invalid characters etc. + This exception is not retriable because the operation won't suddenly become valid.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidTopicException

        +
        public InvalidTopicException()
        +
        +
      • +
      • +
        +

        InvalidTopicException

        +
        public InvalidTopicException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        InvalidTopicException

        +
        public InvalidTopicException(String message)
        +
        +
      • +
      • +
        +

        InvalidTopicException

        +
        public InvalidTopicException(Throwable cause)
        +
        +
      • +
      • +
        +

        InvalidTopicException

        +
        public InvalidTopicException(Set<String> invalidTopics)
        +
        +
      • +
      • +
        +

        InvalidTopicException

        +
        public InvalidTopicException(String message, + Set<String> invalidTopics)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        invalidTopics

        +
        public Set<String> invalidTopics()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidTxnStateException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidTxnStateException.html new file mode 100644 index 000000000..15d6d831a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidTxnStateException.html @@ -0,0 +1,155 @@ + + + + +InvalidTxnStateException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidTxnStateException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidTxnStateException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidTxnStateException

        +
        public InvalidTxnStateException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidTxnTimeoutException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidTxnTimeoutException.html new file mode 100644 index 000000000..ce2e0897f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidTxnTimeoutException.html @@ -0,0 +1,167 @@ + + + + +InvalidTxnTimeoutException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidTxnTimeoutException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidTxnTimeoutException +extends ApiException
    +
    The transaction coordinator returns this error code if the timeout received via the InitProducerIdRequest is larger than + the `transaction.max.timeout.ms` config value.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidTxnTimeoutException

        +
        public InvalidTxnTimeoutException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        InvalidTxnTimeoutException

        +
        public InvalidTxnTimeoutException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidUpdateVersionException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidUpdateVersionException.html new file mode 100644 index 000000000..e6377da99 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidUpdateVersionException.html @@ -0,0 +1,165 @@ + + + + +InvalidUpdateVersionException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidUpdateVersionException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidUpdateVersionException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidUpdateVersionException

        +
        public InvalidUpdateVersionException(String message)
        +
        +
      • +
      • +
        +

        InvalidUpdateVersionException

        +
        public InvalidUpdateVersionException(String message, + Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/InvalidVoterKeyException.html b/static/41/javadoc/org/apache/kafka/common/errors/InvalidVoterKeyException.html new file mode 100644 index 000000000..a94d5bc33 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/InvalidVoterKeyException.html @@ -0,0 +1,165 @@ + + + + +InvalidVoterKeyException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidVoterKeyException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidVoterKeyException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidVoterKeyException

        +
        public InvalidVoterKeyException(String s)
        +
        +
      • +
      • +
        +

        InvalidVoterKeyException

        +
        public InvalidVoterKeyException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/KafkaStorageException.html b/static/41/javadoc/org/apache/kafka/common/errors/KafkaStorageException.html new file mode 100644 index 000000000..cea8f1a64 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/KafkaStorageException.html @@ -0,0 +1,196 @@ + + + + +KafkaStorageException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class KafkaStorageException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class KafkaStorageException +extends InvalidMetadataException
    +
    Miscellaneous disk-related IOException occurred when handling a request. + Client should request metadata update and retry if the response shows KafkaStorageException + + Here are the guidelines on how to handle KafkaStorageException and IOException: + + 1) If the server has not finished loading logs, IOException does not need to be converted to KafkaStorageException + 2) After the server has finished loading logs, IOException should be caught and trigger LogDirFailureChannel.maybeAddOfflineLogDir() + Then the IOException should either be swallowed and logged, or be converted and re-thrown as KafkaStorageException + 3) It is preferred for IOException to be caught in Log rather than in ReplicaManager or LogSegment.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        KafkaStorageException

        +
        public KafkaStorageException()
        +
        +
      • +
      • +
        +

        KafkaStorageException

        +
        public KafkaStorageException(String message)
        +
        +
      • +
      • +
        +

        KafkaStorageException

        +
        public KafkaStorageException(Throwable cause)
        +
        +
      • +
      • +
        +

        KafkaStorageException

        +
        public KafkaStorageException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/LeaderNotAvailableException.html b/static/41/javadoc/org/apache/kafka/common/errors/LeaderNotAvailableException.html new file mode 100644 index 000000000..128620aef --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/LeaderNotAvailableException.html @@ -0,0 +1,173 @@ + + + + +LeaderNotAvailableException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class LeaderNotAvailableException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class LeaderNotAvailableException +extends InvalidMetadataException
    +
    There is no currently available leader for the given partition (either because a leadership election is in progress + or because all replicas are down).
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        LeaderNotAvailableException

        +
        public LeaderNotAvailableException(String message)
        +
        +
      • +
      • +
        +

        LeaderNotAvailableException

        +
        public LeaderNotAvailableException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/ListenerNotFoundException.html b/static/41/javadoc/org/apache/kafka/common/errors/ListenerNotFoundException.html new file mode 100644 index 000000000..1b3cabc02 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/ListenerNotFoundException.html @@ -0,0 +1,176 @@ + + + + +ListenerNotFoundException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ListenerNotFoundException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class ListenerNotFoundException +extends InvalidMetadataException
    +
    The leader does not have an endpoint corresponding to the listener on which metadata was requested. + This could indicate a broker configuration error or a transient error when listeners are updated + dynamically and client requests are processed before all brokers have updated their listeners. + This is currently used only for missing listeners on leader brokers, but may be used for followers + in future.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ListenerNotFoundException

        +
        public ListenerNotFoundException(String message)
        +
        +
      • +
      • +
        +

        ListenerNotFoundException

        +
        public ListenerNotFoundException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/LogDirNotFoundException.html b/static/41/javadoc/org/apache/kafka/common/errors/LogDirNotFoundException.html new file mode 100644 index 000000000..b6a2fa886 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/LogDirNotFoundException.html @@ -0,0 +1,174 @@ + + + + +LogDirNotFoundException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class LogDirNotFoundException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class LogDirNotFoundException +extends ApiException
    +
    Thrown when a request is made for a log directory that is not present on the broker
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        LogDirNotFoundException

        +
        public LogDirNotFoundException(String message)
        +
        +
      • +
      • +
        +

        LogDirNotFoundException

        +
        public LogDirNotFoundException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        LogDirNotFoundException

        +
        public LogDirNotFoundException(Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/MemberIdRequiredException.html b/static/41/javadoc/org/apache/kafka/common/errors/MemberIdRequiredException.html new file mode 100644 index 000000000..7604a49ac --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/MemberIdRequiredException.html @@ -0,0 +1,165 @@ + + + + +MemberIdRequiredException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class MemberIdRequiredException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class MemberIdRequiredException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        MemberIdRequiredException

        +
        public MemberIdRequiredException(String message)
        +
        +
      • +
      • +
        +

        MemberIdRequiredException

        +
        public MemberIdRequiredException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/MismatchedEndpointTypeException.html b/static/41/javadoc/org/apache/kafka/common/errors/MismatchedEndpointTypeException.html new file mode 100644 index 000000000..5f8fd4b53 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/MismatchedEndpointTypeException.html @@ -0,0 +1,155 @@ + + + + +MismatchedEndpointTypeException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class MismatchedEndpointTypeException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class MismatchedEndpointTypeException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        MismatchedEndpointTypeException

        +
        public MismatchedEndpointTypeException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/NetworkException.html b/static/41/javadoc/org/apache/kafka/common/errors/NetworkException.html new file mode 100644 index 000000000..a16ff0b83 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/NetworkException.html @@ -0,0 +1,189 @@ + + + + +NetworkException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class NetworkException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class NetworkException +extends InvalidMetadataException
    +
    A misc. network-related IOException occurred when making a request. This could be because the client's metadata is + out of date and it is making a request to a node that is now dead.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        NetworkException

        +
        public NetworkException()
        +
        +
      • +
      • +
        +

        NetworkException

        +
        public NetworkException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        NetworkException

        +
        public NetworkException(String message)
        +
        +
      • +
      • +
        +

        NetworkException

        +
        public NetworkException(Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/NewLeaderElectedException.html b/static/41/javadoc/org/apache/kafka/common/errors/NewLeaderElectedException.html new file mode 100644 index 000000000..32e7f04a3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/NewLeaderElectedException.html @@ -0,0 +1,155 @@ + + + + +NewLeaderElectedException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class NewLeaderElectedException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class NewLeaderElectedException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        NewLeaderElectedException

        +
        public NewLeaderElectedException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/NoReassignmentInProgressException.html b/static/41/javadoc/org/apache/kafka/common/errors/NoReassignmentInProgressException.html new file mode 100644 index 000000000..add540be8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/NoReassignmentInProgressException.html @@ -0,0 +1,166 @@ + + + + +NoReassignmentInProgressException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class NoReassignmentInProgressException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class NoReassignmentInProgressException +extends ApiException
    +
    Thrown if a reassignment cannot be cancelled because none is in progress.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        NoReassignmentInProgressException

        +
        public NoReassignmentInProgressException(String message)
        +
        +
      • +
      • +
        +

        NoReassignmentInProgressException

        +
        public NoReassignmentInProgressException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/NotControllerException.html b/static/41/javadoc/org/apache/kafka/common/errors/NotControllerException.html new file mode 100644 index 000000000..e2c4dbfc6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/NotControllerException.html @@ -0,0 +1,167 @@ + + + + +NotControllerException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class NotControllerException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class NotControllerException +extends RetriableException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        NotControllerException

        +
        public NotControllerException(String message)
        +
        +
      • +
      • +
        +

        NotControllerException

        +
        public NotControllerException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/NotCoordinatorException.html b/static/41/javadoc/org/apache/kafka/common/errors/NotCoordinatorException.html new file mode 100644 index 000000000..4e5f77c52 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/NotCoordinatorException.html @@ -0,0 +1,174 @@ + + + + +NotCoordinatorException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class NotCoordinatorException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class NotCoordinatorException +extends RefreshRetriableException
    +
    In the context of the group coordinator, the broker returns this error code if it receives an offset fetch + or commit request for a group it's not the coordinator of. + + In the context of the transactional coordinator, it returns this error when it receives a transactional + request with a transactionalId the coordinator doesn't own.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        NotCoordinatorException

        +
        public NotCoordinatorException(String message)
        +
        +
      • +
      • +
        +

        NotCoordinatorException

        +
        public NotCoordinatorException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/NotEnoughReplicasAfterAppendException.html b/static/41/javadoc/org/apache/kafka/common/errors/NotEnoughReplicasAfterAppendException.html new file mode 100644 index 000000000..8425fceef --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/NotEnoughReplicasAfterAppendException.html @@ -0,0 +1,159 @@ + + + + +NotEnoughReplicasAfterAppendException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class NotEnoughReplicasAfterAppendException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class NotEnoughReplicasAfterAppendException +extends RetriableException
    +
    Number of insync replicas for the partition is lower than min.insync.replicas This exception is raised when the low + ISR size is discovered *after* the message was already appended to the log. Producer retries will cause duplicates.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        NotEnoughReplicasAfterAppendException

        +
        public NotEnoughReplicasAfterAppendException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/NotEnoughReplicasException.html b/static/41/javadoc/org/apache/kafka/common/errors/NotEnoughReplicasException.html new file mode 100644 index 000000000..e83e629c9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/NotEnoughReplicasException.html @@ -0,0 +1,184 @@ + + + + +NotEnoughReplicasException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class NotEnoughReplicasException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class NotEnoughReplicasException +extends RetriableException
    +
    Number of insync replicas for the partition is lower than min.insync.replicas
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        NotEnoughReplicasException

        +
        public NotEnoughReplicasException()
        +
        +
      • +
      • +
        +

        NotEnoughReplicasException

        +
        public NotEnoughReplicasException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        NotEnoughReplicasException

        +
        public NotEnoughReplicasException(String message)
        +
        +
      • +
      • +
        +

        NotEnoughReplicasException

        +
        public NotEnoughReplicasException(Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/NotLeaderOrFollowerException.html b/static/41/javadoc/org/apache/kafka/common/errors/NotLeaderOrFollowerException.html new file mode 100644 index 000000000..7c88cdaac --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/NotLeaderOrFollowerException.html @@ -0,0 +1,193 @@ + + + + +NotLeaderOrFollowerException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class NotLeaderOrFollowerException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class NotLeaderOrFollowerException +extends InvalidMetadataException
    +
    Broker returns this error if a request could not be processed because the broker is not the leader + or follower for a topic partition. This could be a transient exception during leader elections and + reassignments. For `Produce` and other requests which are intended only for the leader, this exception + indicates that the broker is not the current leader. For consumer `Fetch` requests which may be + satisfied by a leader or follower, this exception indicates that the broker is not a replica + of the topic partition.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        NotLeaderOrFollowerException

        +
        public NotLeaderOrFollowerException()
        +
        +
      • +
      • +
        +

        NotLeaderOrFollowerException

        +
        public NotLeaderOrFollowerException(String message)
        +
        +
      • +
      • +
        +

        NotLeaderOrFollowerException

        +
        public NotLeaderOrFollowerException(Throwable cause)
        +
        +
      • +
      • +
        +

        NotLeaderOrFollowerException

        +
        public NotLeaderOrFollowerException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/OffsetMetadataTooLarge.html b/static/41/javadoc/org/apache/kafka/common/errors/OffsetMetadataTooLarge.html new file mode 100644 index 000000000..d6a47af9b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/OffsetMetadataTooLarge.html @@ -0,0 +1,182 @@ + + + + +OffsetMetadataTooLarge (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class OffsetMetadataTooLarge

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class OffsetMetadataTooLarge +extends ApiException
    +
    The client has tried to save its offset with associated metadata larger than the maximum size allowed by the server.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        OffsetMetadataTooLarge

        +
        public OffsetMetadataTooLarge()
        +
        +
      • +
      • +
        +

        OffsetMetadataTooLarge

        +
        public OffsetMetadataTooLarge(String message)
        +
        +
      • +
      • +
        +

        OffsetMetadataTooLarge

        +
        public OffsetMetadataTooLarge(Throwable cause)
        +
        +
      • +
      • +
        +

        OffsetMetadataTooLarge

        +
        public OffsetMetadataTooLarge(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/OffsetMovedToTieredStorageException.html b/static/41/javadoc/org/apache/kafka/common/errors/OffsetMovedToTieredStorageException.html new file mode 100644 index 000000000..239484bdf --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/OffsetMovedToTieredStorageException.html @@ -0,0 +1,165 @@ + + + + +OffsetMovedToTieredStorageException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class OffsetMovedToTieredStorageException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class OffsetMovedToTieredStorageException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        OffsetMovedToTieredStorageException

        +
        public OffsetMovedToTieredStorageException(String message)
        +
        +
      • +
      • +
        +

        OffsetMovedToTieredStorageException

        +
        public OffsetMovedToTieredStorageException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/OffsetNotAvailableException.html b/static/41/javadoc/org/apache/kafka/common/errors/OffsetNotAvailableException.html new file mode 100644 index 000000000..fc8975566 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/OffsetNotAvailableException.html @@ -0,0 +1,159 @@ + + + + +OffsetNotAvailableException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class OffsetNotAvailableException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class OffsetNotAvailableException +extends RetriableException
    +
    Indicates that the leader is not able to guarantee monotonically increasing offsets + due to the high watermark lagging behind the epoch start offset after a recent leader election
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        OffsetNotAvailableException

        +
        public OffsetNotAvailableException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/OffsetOutOfRangeException.html b/static/41/javadoc/org/apache/kafka/common/errors/OffsetOutOfRangeException.html new file mode 100644 index 000000000..a3b220eb1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/OffsetOutOfRangeException.html @@ -0,0 +1,169 @@ + + + + +OffsetOutOfRangeException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class OffsetOutOfRangeException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class OffsetOutOfRangeException +extends InvalidOffsetException
    +
    No reset policy has been defined, and the offsets for these partitions are either larger or smaller + than the range of offsets the server has for the given partition.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        OffsetOutOfRangeException

        +
        public OffsetOutOfRangeException(String message)
        +
        +
      • +
      • +
        +

        OffsetOutOfRangeException

        +
        public OffsetOutOfRangeException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/OperationNotAttemptedException.html b/static/41/javadoc/org/apache/kafka/common/errors/OperationNotAttemptedException.html new file mode 100644 index 000000000..6903cbc34 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/OperationNotAttemptedException.html @@ -0,0 +1,157 @@ + + + + +OperationNotAttemptedException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class OperationNotAttemptedException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class OperationNotAttemptedException +extends ApiException
    +
    Indicates that the broker did not attempt to execute this operation. This may happen for batched RPCs where some + operations in the batch failed, causing the broker to respond without trying the rest.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        OperationNotAttemptedException

        +
        public OperationNotAttemptedException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/OutOfOrderSequenceException.html b/static/41/javadoc/org/apache/kafka/common/errors/OutOfOrderSequenceException.html new file mode 100644 index 000000000..7cfbe7726 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/OutOfOrderSequenceException.html @@ -0,0 +1,165 @@ + + + + +OutOfOrderSequenceException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class OutOfOrderSequenceException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    Direct Known Subclasses:
    +
    UnknownProducerIdException
    +
    +
    +
    public class OutOfOrderSequenceException +extends ApiException
    +
    This exception indicates that the broker received an unexpected sequence number from the producer, + which means that data may have been lost. If the producer is configured for idempotence only (i.e. + if enable.idempotence is set and no transactional.id is configured), it + is possible to continue sending with the same producer instance, but doing so risks reordering + of sent records. For transactional producers, this is a fatal error and you should close the + producer.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        OutOfOrderSequenceException

        +
        public OutOfOrderSequenceException(String msg)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/PolicyViolationException.html b/static/41/javadoc/org/apache/kafka/common/errors/PolicyViolationException.html new file mode 100644 index 000000000..df82d2a3e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/PolicyViolationException.html @@ -0,0 +1,166 @@ + + + + +PolicyViolationException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class PolicyViolationException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class PolicyViolationException +extends ApiException
    +
    Exception thrown if a create topics request does not satisfy the configured policy for a topic.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        PolicyViolationException

        +
        public PolicyViolationException(String message)
        +
        +
      • +
      • +
        +

        PolicyViolationException

        +
        public PolicyViolationException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/PositionOutOfRangeException.html b/static/41/javadoc/org/apache/kafka/common/errors/PositionOutOfRangeException.html new file mode 100644 index 000000000..aff838596 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/PositionOutOfRangeException.html @@ -0,0 +1,165 @@ + + + + +PositionOutOfRangeException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class PositionOutOfRangeException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class PositionOutOfRangeException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        PositionOutOfRangeException

        +
        public PositionOutOfRangeException(String s)
        +
        +
      • +
      • +
        +

        PositionOutOfRangeException

        +
        public PositionOutOfRangeException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/PreferredLeaderNotAvailableException.html b/static/41/javadoc/org/apache/kafka/common/errors/PreferredLeaderNotAvailableException.html new file mode 100644 index 000000000..b35e42a23 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/PreferredLeaderNotAvailableException.html @@ -0,0 +1,171 @@ + + + + +PreferredLeaderNotAvailableException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class PreferredLeaderNotAvailableException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class PreferredLeaderNotAvailableException +extends InvalidMetadataException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        PreferredLeaderNotAvailableException

        +
        public PreferredLeaderNotAvailableException(String message)
        +
        +
      • +
      • +
        +

        PreferredLeaderNotAvailableException

        +
        public PreferredLeaderNotAvailableException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/PrincipalDeserializationException.html b/static/41/javadoc/org/apache/kafka/common/errors/PrincipalDeserializationException.html new file mode 100644 index 000000000..1118c9295 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/PrincipalDeserializationException.html @@ -0,0 +1,166 @@ + + + + +PrincipalDeserializationException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class PrincipalDeserializationException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class PrincipalDeserializationException +extends ApiException
    +
    Exception used to indicate a kafka principal deserialization failure during request forwarding.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        PrincipalDeserializationException

        +
        public PrincipalDeserializationException(String message)
        +
        +
      • +
      • +
        +

        PrincipalDeserializationException

        +
        public PrincipalDeserializationException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/ProducerFencedException.html b/static/41/javadoc/org/apache/kafka/common/errors/ProducerFencedException.html new file mode 100644 index 000000000..12cc27983 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/ProducerFencedException.html @@ -0,0 +1,161 @@ + + + + +ProducerFencedException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ProducerFencedException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class ProducerFencedException +extends ApplicationRecoverableException
    +
    This fatal exception indicates that another producer with the same transactional.id has been + started. It is only possible to have one producer instance with a transactional.id at any + given time, and the latest one to be started "fences" the previous instances so that they can no longer + make transactional requests. When you encounter this exception, you must close the producer instance.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ProducerFencedException

        +
        public ProducerFencedException(String msg)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/ReassignmentInProgressException.html b/static/41/javadoc/org/apache/kafka/common/errors/ReassignmentInProgressException.html new file mode 100644 index 000000000..8171227d5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/ReassignmentInProgressException.html @@ -0,0 +1,166 @@ + + + + +ReassignmentInProgressException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ReassignmentInProgressException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class ReassignmentInProgressException +extends ApiException
    +
    Thrown if a request cannot be completed because a partition reassignment is in progress.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ReassignmentInProgressException

        +
        public ReassignmentInProgressException(String msg)
        +
        +
      • +
      • +
        +

        ReassignmentInProgressException

        +
        public ReassignmentInProgressException(String msg, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/RebalanceInProgressException.html b/static/41/javadoc/org/apache/kafka/common/errors/RebalanceInProgressException.html new file mode 100644 index 000000000..3df066910 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/RebalanceInProgressException.html @@ -0,0 +1,181 @@ + + + + +RebalanceInProgressException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class RebalanceInProgressException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class RebalanceInProgressException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        RebalanceInProgressException

        +
        public RebalanceInProgressException()
        +
        +
      • +
      • +
        +

        RebalanceInProgressException

        +
        public RebalanceInProgressException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        RebalanceInProgressException

        +
        public RebalanceInProgressException(String message)
        +
        +
      • +
      • +
        +

        RebalanceInProgressException

        +
        public RebalanceInProgressException(Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/RebootstrapRequiredException.html b/static/41/javadoc/org/apache/kafka/common/errors/RebootstrapRequiredException.html new file mode 100644 index 000000000..baf1a49fc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/RebootstrapRequiredException.html @@ -0,0 +1,165 @@ + + + + +RebootstrapRequiredException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class RebootstrapRequiredException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class RebootstrapRequiredException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        RebootstrapRequiredException

        +
        public RebootstrapRequiredException(String message)
        +
        +
      • +
      • +
        +

        RebootstrapRequiredException

        +
        public RebootstrapRequiredException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/RecordBatchTooLargeException.html b/static/41/javadoc/org/apache/kafka/common/errors/RecordBatchTooLargeException.html new file mode 100644 index 000000000..73eff37fe --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/RecordBatchTooLargeException.html @@ -0,0 +1,184 @@ + + + + +RecordBatchTooLargeException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class RecordBatchTooLargeException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class RecordBatchTooLargeException +extends InvalidConfigurationException
    +
    This record batch is larger than the maximum allowable size
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        RecordBatchTooLargeException

        +
        public RecordBatchTooLargeException()
        +
        +
      • +
      • +
        +

        RecordBatchTooLargeException

        +
        public RecordBatchTooLargeException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        RecordBatchTooLargeException

        +
        public RecordBatchTooLargeException(String message)
        +
        +
      • +
      • +
        +

        RecordBatchTooLargeException

        +
        public RecordBatchTooLargeException(Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/RecordDeserializationException.DeserializationExceptionOrigin.html b/static/41/javadoc/org/apache/kafka/common/errors/RecordDeserializationException.DeserializationExceptionOrigin.html new file mode 100644 index 000000000..d97874b29 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/RecordDeserializationException.DeserializationExceptionOrigin.html @@ -0,0 +1,221 @@ + + + + +RecordDeserializationException.DeserializationExceptionOrigin (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class RecordDeserializationException.DeserializationExceptionOrigin

    +
    +
    java.lang.Object +
    java.lang.Enum<RecordDeserializationException.DeserializationExceptionOrigin> +
    org.apache.kafka.common.errors.RecordDeserializationException.DeserializationExceptionOrigin
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<RecordDeserializationException.DeserializationExceptionOrigin>, Constable
    +
    +
    +
    Enclosing class:
    +
    RecordDeserializationException
    +
    +
    +
    public static enum RecordDeserializationException.DeserializationExceptionOrigin +extends Enum<RecordDeserializationException.DeserializationExceptionOrigin>
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/RecordDeserializationException.html b/static/41/javadoc/org/apache/kafka/common/errors/RecordDeserializationException.html new file mode 100644 index 000000000..f2ee07a39 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/RecordDeserializationException.html @@ -0,0 +1,303 @@ + + + + +RecordDeserializationException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class RecordDeserializationException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class RecordDeserializationException +extends SerializationException
    +
    This exception is raised for any error that occurs while deserializing records received by the consumer using + the configured Deserializer.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/RecordTooLargeException.html b/static/41/javadoc/org/apache/kafka/common/errors/RecordTooLargeException.html new file mode 100644 index 000000000..b29845b6e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/RecordTooLargeException.html @@ -0,0 +1,223 @@ + + + + +RecordTooLargeException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class RecordTooLargeException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class RecordTooLargeException +extends ApiException
    +
    This record is larger than the maximum allowable size
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        RecordTooLargeException

        +
        public RecordTooLargeException()
        +
        +
      • +
      • +
        +

        RecordTooLargeException

        +
        public RecordTooLargeException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        RecordTooLargeException

        +
        public RecordTooLargeException(String message)
        +
        +
      • +
      • +
        +

        RecordTooLargeException

        +
        public RecordTooLargeException(Throwable cause)
        +
        +
      • +
      • +
        +

        RecordTooLargeException

        +
        public RecordTooLargeException(String message, + Map<TopicPartition,Long> recordTooLargePartitions)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/RefreshRetriableException.html b/static/41/javadoc/org/apache/kafka/common/errors/RefreshRetriableException.html new file mode 100644 index 000000000..f57df2b5d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/RefreshRetriableException.html @@ -0,0 +1,190 @@ + + + + +RefreshRetriableException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class RefreshRetriableException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    Direct Known Subclasses:
    +
    CoordinatorNotAvailableException, InvalidMetadataException, NotCoordinatorException
    +
    +
    +
    public abstract class RefreshRetriableException +extends RetriableException
    +
    Indicates that an operation failed due to outdated or invalid metadata, + requiring a refresh (e.g., refreshing producer metadata) before retrying the request. + The request can be modified or updated with fresh metadata before being retried.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        RefreshRetriableException

        +
        public RefreshRetriableException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        RefreshRetriableException

        +
        public RefreshRetriableException(String message)
        +
        +
      • +
      • +
        +

        RefreshRetriableException

        +
        public RefreshRetriableException(Throwable cause)
        +
        +
      • +
      • +
        +

        RefreshRetriableException

        +
        public RefreshRetriableException()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/ReplicaNotAvailableException.html b/static/41/javadoc/org/apache/kafka/common/errors/ReplicaNotAvailableException.html new file mode 100644 index 000000000..68adc8f9e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/ReplicaNotAvailableException.html @@ -0,0 +1,183 @@ + + + + +ReplicaNotAvailableException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ReplicaNotAvailableException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class ReplicaNotAvailableException +extends InvalidMetadataException
    +
    The replica is not available for the requested topic partition. This may be + a transient exception during reassignments. From version 2.6 onwards, Fetch requests + and other requests intended only for the leader or follower of the topic partition return + NotLeaderOrFollowerException if the broker is a not a replica of the partition.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ReplicaNotAvailableException

        +
        public ReplicaNotAvailableException(String message)
        +
        +
      • +
      • +
        +

        ReplicaNotAvailableException

        +
        public ReplicaNotAvailableException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        ReplicaNotAvailableException

        +
        public ReplicaNotAvailableException(Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/ResourceNotFoundException.html b/static/41/javadoc/org/apache/kafka/common/errors/ResourceNotFoundException.html new file mode 100644 index 000000000..44fe2eb4e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/ResourceNotFoundException.html @@ -0,0 +1,252 @@ + + + + +ResourceNotFoundException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ResourceNotFoundException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class ResourceNotFoundException +extends ApiException
    +
    Exception thrown due to a request for a resource that does not exist.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ResourceNotFoundException

        +
        public ResourceNotFoundException(String message)
        +
        Constructor
        +
        +
        Parameters:
        +
        message - the exception's message
        +
        +
        +
      • +
      • +
        +

        ResourceNotFoundException

        +
        public ResourceNotFoundException(String message, + Throwable cause)
        +
        +
        Parameters:
        +
        message - the exception's message
        +
        cause - the exception's cause
        +
        +
        +
      • +
      • +
        +

        ResourceNotFoundException

        +
        public ResourceNotFoundException(String resource, + String message)
        +
        Constructor
        +
        +
        Parameters:
        +
        resource - the (potentially null) resource that was not found
        +
        message - the exception's message
        +
        +
        +
      • +
      • +
        +

        ResourceNotFoundException

        +
        public ResourceNotFoundException(String resource, + String message, + Throwable cause)
        +
        Constructor
        +
        +
        Parameters:
        +
        resource - the (potentially null) resource that was not found
        +
        message - the exception's message
        +
        cause - the exception's cause
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        resource

        +
        public String resource()
        +
        +
        Returns:
        +
        the (potentially null) resource that was not found
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/RetriableException.html b/static/41/javadoc/org/apache/kafka/common/errors/RetriableException.html new file mode 100644 index 000000000..d2eecb8de --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/RetriableException.html @@ -0,0 +1,186 @@ + + + + +RetriableException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class RetriableException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    Direct Known Subclasses:
    +
    AuthorizerNotReadyException, ConcurrentTransactionsException, CoordinatorLoadInProgressException, CorruptRecordException, DisconnectException, FetchSessionIdNotFoundException, FetchSessionTopicIdException, InvalidFetchSessionEpochException, InvalidShareSessionEpochException, NotControllerException, NotEnoughReplicasAfterAppendException, NotEnoughReplicasException, OffsetNotAvailableException, RefreshRetriableException, RetriableCommitFailedException, ShareSessionLimitReachedException, ShareSessionNotFoundException, ThrottlingQuotaExceededException, TimeoutException, UnknownLeaderEpochException, UnstableOffsetCommitException
    +
    +
    +
    public abstract class RetriableException +extends ApiException
    +
    A retriable exception is a transient exception that if retried may succeed.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        RetriableException

        +
        public RetriableException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        RetriableException

        +
        public RetriableException(String message)
        +
        +
      • +
      • +
        +

        RetriableException

        +
        public RetriableException(Throwable cause)
        +
        +
      • +
      • +
        +

        RetriableException

        +
        public RetriableException()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/SaslAuthenticationException.html b/static/41/javadoc/org/apache/kafka/common/errors/SaslAuthenticationException.html new file mode 100644 index 000000000..5015eb20c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/SaslAuthenticationException.html @@ -0,0 +1,182 @@ + + + + +SaslAuthenticationException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SaslAuthenticationException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class SaslAuthenticationException +extends AuthenticationException
    +
    This exception indicates that SASL authentication has failed. The error message + in the exception indicates the actual cause of failure. +

    + SASL authentication failures typically indicate invalid credentials, but + could also include other failures specific to the SASL mechanism used + for authentication. +

    +

    Note:If SaslServer.evaluateResponse(byte[]) throws this exception during + authentication, the message from the exception will be sent to clients in the SaslAuthenticate + response. Custom SaslServer implementations may throw this exception in order to + provide custom error messages to clients, but should take care not to include any + security-critical information in the message that should not be leaked to unauthenticated clients. +

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SaslAuthenticationException

        +
        public SaslAuthenticationException(String message)
        +
        +
      • +
      • +
        +

        SaslAuthenticationException

        +
        public SaslAuthenticationException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/SecurityDisabledException.html b/static/41/javadoc/org/apache/kafka/common/errors/SecurityDisabledException.html new file mode 100644 index 000000000..89c0ad504 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/SecurityDisabledException.html @@ -0,0 +1,166 @@ + + + + +SecurityDisabledException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SecurityDisabledException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class SecurityDisabledException +extends ApiException
    +
    An error indicating that security is disabled on the broker.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SecurityDisabledException

        +
        public SecurityDisabledException(String message)
        +
        +
      • +
      • +
        +

        SecurityDisabledException

        +
        public SecurityDisabledException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/SerializationException.html b/static/41/javadoc/org/apache/kafka/common/errors/SerializationException.html new file mode 100644 index 000000000..782bc9e6b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/SerializationException.html @@ -0,0 +1,181 @@ + + + + +SerializationException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SerializationException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    Direct Known Subclasses:
    +
    RecordDeserializationException
    +
    +
    +
    public class SerializationException +extends KafkaException
    +
    Any exception during serialization in the producer
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SerializationException

        +
        public SerializationException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        SerializationException

        +
        public SerializationException(String message)
        +
        +
      • +
      • +
        +

        SerializationException

        +
        public SerializationException(Throwable cause)
        +
        +
      • +
      • +
        +

        SerializationException

        +
        public SerializationException()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/ShareSessionLimitReachedException.html b/static/41/javadoc/org/apache/kafka/common/errors/ShareSessionLimitReachedException.html new file mode 100644 index 000000000..a30b75fc0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/ShareSessionLimitReachedException.html @@ -0,0 +1,158 @@ + + + + +ShareSessionLimitReachedException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ShareSessionLimitReachedException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class ShareSessionLimitReachedException +extends RetriableException
    +
    Indicates that a new share session could not be opened because the limit of share sessions has been reached.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ShareSessionLimitReachedException

        +
        public ShareSessionLimitReachedException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/ShareSessionNotFoundException.html b/static/41/javadoc/org/apache/kafka/common/errors/ShareSessionNotFoundException.html new file mode 100644 index 000000000..4787b3e06 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/ShareSessionNotFoundException.html @@ -0,0 +1,158 @@ + + + + +ShareSessionNotFoundException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ShareSessionNotFoundException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class ShareSessionNotFoundException +extends RetriableException
    +
    Thrown when the share session was not found.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ShareSessionNotFoundException

        +
        public ShareSessionNotFoundException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/SnapshotNotFoundException.html b/static/41/javadoc/org/apache/kafka/common/errors/SnapshotNotFoundException.html new file mode 100644 index 000000000..85150cf38 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/SnapshotNotFoundException.html @@ -0,0 +1,165 @@ + + + + +SnapshotNotFoundException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SnapshotNotFoundException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class SnapshotNotFoundException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SnapshotNotFoundException

        +
        public SnapshotNotFoundException(String s)
        +
        +
      • +
      • +
        +

        SnapshotNotFoundException

        +
        public SnapshotNotFoundException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/SslAuthenticationException.html b/static/41/javadoc/org/apache/kafka/common/errors/SslAuthenticationException.html new file mode 100644 index 000000000..dc639139f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/SslAuthenticationException.html @@ -0,0 +1,178 @@ + + + + +SslAuthenticationException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SslAuthenticationException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class SslAuthenticationException +extends AuthenticationException
    +
    This exception indicates that SSL handshake has failed. See Throwable.getCause() + for the SSLException that caused this failure. +

    + SSL handshake failures in clients may indicate client authentication + failure due to untrusted certificates if server is configured to request + client certificates. Handshake failures could also indicate misconfigured + security including protocol/cipher suite mismatch, server certificate + authentication failure or server host name verification failure. +

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SslAuthenticationException

        +
        public SslAuthenticationException(String message)
        +
        +
      • +
      • +
        +

        SslAuthenticationException

        +
        public SslAuthenticationException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/StaleBrokerEpochException.html b/static/41/javadoc/org/apache/kafka/common/errors/StaleBrokerEpochException.html new file mode 100644 index 000000000..4695f7ea9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/StaleBrokerEpochException.html @@ -0,0 +1,165 @@ + + + + +StaleBrokerEpochException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StaleBrokerEpochException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class StaleBrokerEpochException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StaleBrokerEpochException

        +
        public StaleBrokerEpochException(String message)
        +
        +
      • +
      • +
        +

        StaleBrokerEpochException

        +
        public StaleBrokerEpochException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/StaleMemberEpochException.html b/static/41/javadoc/org/apache/kafka/common/errors/StaleMemberEpochException.html new file mode 100644 index 000000000..500ea3ac2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/StaleMemberEpochException.html @@ -0,0 +1,159 @@ + + + + +StaleMemberEpochException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StaleMemberEpochException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class StaleMemberEpochException +extends ApiException
    +
    The StaleMemberEpochException is used in the context of the new + consumer group protocol (KIP-848). This error is returned in the + OffsetCommit/Fetch APIs when the member epoch received does not + match the current member epoch.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StaleMemberEpochException

        +
        public StaleMemberEpochException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/StreamsInvalidTopologyEpochException.html b/static/41/javadoc/org/apache/kafka/common/errors/StreamsInvalidTopologyEpochException.html new file mode 100644 index 000000000..0170b2439 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/StreamsInvalidTopologyEpochException.html @@ -0,0 +1,155 @@ + + + + +StreamsInvalidTopologyEpochException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StreamsInvalidTopologyEpochException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class StreamsInvalidTopologyEpochException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StreamsInvalidTopologyEpochException

        +
        public StreamsInvalidTopologyEpochException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/StreamsInvalidTopologyException.html b/static/41/javadoc/org/apache/kafka/common/errors/StreamsInvalidTopologyException.html new file mode 100644 index 000000000..0fdf86896 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/StreamsInvalidTopologyException.html @@ -0,0 +1,155 @@ + + + + +StreamsInvalidTopologyException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StreamsInvalidTopologyException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class StreamsInvalidTopologyException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StreamsInvalidTopologyException

        +
        public StreamsInvalidTopologyException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/StreamsTopologyFencedException.html b/static/41/javadoc/org/apache/kafka/common/errors/StreamsTopologyFencedException.html new file mode 100644 index 000000000..54475460c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/StreamsTopologyFencedException.html @@ -0,0 +1,155 @@ + + + + +StreamsTopologyFencedException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StreamsTopologyFencedException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class StreamsTopologyFencedException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StreamsTopologyFencedException

        +
        public StreamsTopologyFencedException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/TelemetryTooLargeException.html b/static/41/javadoc/org/apache/kafka/common/errors/TelemetryTooLargeException.html new file mode 100644 index 000000000..bc28fae03 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/TelemetryTooLargeException.html @@ -0,0 +1,156 @@ + + + + +TelemetryTooLargeException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TelemetryTooLargeException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class TelemetryTooLargeException +extends ApiException
    +
    This exception indicates that the size of the telemetry metrics data is too large.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TelemetryTooLargeException

        +
        public TelemetryTooLargeException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/ThrottlingQuotaExceededException.html b/static/41/javadoc/org/apache/kafka/common/errors/ThrottlingQuotaExceededException.html new file mode 100644 index 000000000..3e9fb6ff8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/ThrottlingQuotaExceededException.html @@ -0,0 +1,199 @@ + + + + +ThrottlingQuotaExceededException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ThrottlingQuotaExceededException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class ThrottlingQuotaExceededException +extends RetriableException
    +
    Exception thrown if an operation on a resource exceeds the throttling quota.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ThrottlingQuotaExceededException

        +
        public ThrottlingQuotaExceededException(String message)
        +
        +
      • +
      • +
        +

        ThrottlingQuotaExceededException

        +
        public ThrottlingQuotaExceededException(int throttleTimeMs, + String message)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        throttleTimeMs

        +
        public int throttleTimeMs()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/TimeoutException.html b/static/41/javadoc/org/apache/kafka/common/errors/TimeoutException.html new file mode 100644 index 000000000..066ccdd42 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/TimeoutException.html @@ -0,0 +1,188 @@ + + + + +TimeoutException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TimeoutException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    Direct Known Subclasses:
    +
    BufferExhaustedException
    +
    +
    +
    public class TimeoutException +extends RetriableException
    +
    Indicates that a request timed out.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TimeoutException

        +
        public TimeoutException()
        +
        +
      • +
      • +
        +

        TimeoutException

        +
        public TimeoutException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        TimeoutException

        +
        public TimeoutException(String message)
        +
        +
      • +
      • +
        +

        TimeoutException

        +
        public TimeoutException(Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/TopicAuthorizationException.html b/static/41/javadoc/org/apache/kafka/common/errors/TopicAuthorizationException.html new file mode 100644 index 000000000..9387f91ff --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/TopicAuthorizationException.html @@ -0,0 +1,216 @@ + + + + +TopicAuthorizationException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TopicAuthorizationException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class TopicAuthorizationException +extends AuthorizationException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TopicAuthorizationException

        +
        public TopicAuthorizationException(String message, + Set<String> unauthorizedTopics)
        +
        +
      • +
      • +
        +

        TopicAuthorizationException

        +
        public TopicAuthorizationException(Set<String> unauthorizedTopics)
        +
        +
      • +
      • +
        +

        TopicAuthorizationException

        +
        public TopicAuthorizationException(String message)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        unauthorizedTopics

        +
        public Set<String> unauthorizedTopics()
        +
        Get the set of topics which failed authorization. May be empty if the set is not known + in the context the exception was raised in.
        +
        +
        Returns:
        +
        possibly empty set of unauthorized topics
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/TopicDeletionDisabledException.html b/static/41/javadoc/org/apache/kafka/common/errors/TopicDeletionDisabledException.html new file mode 100644 index 000000000..6c058b623 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/TopicDeletionDisabledException.html @@ -0,0 +1,163 @@ + + + + +TopicDeletionDisabledException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TopicDeletionDisabledException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class TopicDeletionDisabledException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TopicDeletionDisabledException

        +
        public TopicDeletionDisabledException()
        +
        +
      • +
      • +
        +

        TopicDeletionDisabledException

        +
        public TopicDeletionDisabledException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/TopicExistsException.html b/static/41/javadoc/org/apache/kafka/common/errors/TopicExistsException.html new file mode 100644 index 000000000..522183c40 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/TopicExistsException.html @@ -0,0 +1,165 @@ + + + + +TopicExistsException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TopicExistsException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class TopicExistsException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TopicExistsException

        +
        public TopicExistsException(String message)
        +
        +
      • +
      • +
        +

        TopicExistsException

        +
        public TopicExistsException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/TransactionAbortableException.html b/static/41/javadoc/org/apache/kafka/common/errors/TransactionAbortableException.html new file mode 100644 index 000000000..21c4e46b6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/TransactionAbortableException.html @@ -0,0 +1,165 @@ + + + + +TransactionAbortableException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TransactionAbortableException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class TransactionAbortableException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TransactionAbortableException

        +
        public TransactionAbortableException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        TransactionAbortableException

        +
        public TransactionAbortableException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/TransactionAbortedException.html b/static/41/javadoc/org/apache/kafka/common/errors/TransactionAbortedException.html new file mode 100644 index 000000000..274cf5de0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/TransactionAbortedException.html @@ -0,0 +1,175 @@ + + + + +TransactionAbortedException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TransactionAbortedException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class TransactionAbortedException +extends ApiException
    +
    This is the Exception thrown when we are aborting any undrained batches during + a transaction which is aborted without any underlying cause - which likely means that the user chose to abort.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TransactionAbortedException

        +
        public TransactionAbortedException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        TransactionAbortedException

        +
        public TransactionAbortedException(String message)
        +
        +
      • +
      • +
        +

        TransactionAbortedException

        +
        public TransactionAbortedException()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/TransactionCoordinatorFencedException.html b/static/41/javadoc/org/apache/kafka/common/errors/TransactionCoordinatorFencedException.html new file mode 100644 index 000000000..decde5b82 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/TransactionCoordinatorFencedException.html @@ -0,0 +1,165 @@ + + + + +TransactionCoordinatorFencedException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TransactionCoordinatorFencedException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class TransactionCoordinatorFencedException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TransactionCoordinatorFencedException

        +
        public TransactionCoordinatorFencedException(String message)
        +
        +
      • +
      • +
        +

        TransactionCoordinatorFencedException

        +
        public TransactionCoordinatorFencedException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/TransactionalIdAuthorizationException.html b/static/41/javadoc/org/apache/kafka/common/errors/TransactionalIdAuthorizationException.html new file mode 100644 index 000000000..e6a6c7a76 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/TransactionalIdAuthorizationException.html @@ -0,0 +1,159 @@ + + + + +TransactionalIdAuthorizationException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TransactionalIdAuthorizationException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class TransactionalIdAuthorizationException +extends AuthorizationException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TransactionalIdAuthorizationException

        +
        public TransactionalIdAuthorizationException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/TransactionalIdNotFoundException.html b/static/41/javadoc/org/apache/kafka/common/errors/TransactionalIdNotFoundException.html new file mode 100644 index 000000000..3bcd1916e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/TransactionalIdNotFoundException.html @@ -0,0 +1,155 @@ + + + + +TransactionalIdNotFoundException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TransactionalIdNotFoundException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class TransactionalIdNotFoundException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TransactionalIdNotFoundException

        +
        public TransactionalIdNotFoundException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/UnacceptableCredentialException.html b/static/41/javadoc/org/apache/kafka/common/errors/UnacceptableCredentialException.html new file mode 100644 index 000000000..55cc02c4d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/UnacceptableCredentialException.html @@ -0,0 +1,179 @@ + + + + +UnacceptableCredentialException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UnacceptableCredentialException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class UnacceptableCredentialException +extends ApiException
    +
    Exception thrown when attempting to define a credential that does not meet the criteria for acceptability + (for example, attempting to create a SCRAM credential with an empty username or password or too few/many iterations).
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UnacceptableCredentialException

        +
        public UnacceptableCredentialException(String message)
        +
        Constructor
        +
        +
        Parameters:
        +
        message - the exception's message
        +
        +
        +
      • +
      • +
        +

        UnacceptableCredentialException

        +
        public UnacceptableCredentialException(String message, + Throwable cause)
        +
        +
        Parameters:
        +
        message - the exception's message
        +
        cause - the exception's cause
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/UnknownControllerIdException.html b/static/41/javadoc/org/apache/kafka/common/errors/UnknownControllerIdException.html new file mode 100644 index 000000000..8f6266b4d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/UnknownControllerIdException.html @@ -0,0 +1,155 @@ + + + + +UnknownControllerIdException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UnknownControllerIdException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class UnknownControllerIdException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UnknownControllerIdException

        +
        public UnknownControllerIdException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/UnknownLeaderEpochException.html b/static/41/javadoc/org/apache/kafka/common/errors/UnknownLeaderEpochException.html new file mode 100644 index 000000000..e2015b89d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/UnknownLeaderEpochException.html @@ -0,0 +1,170 @@ + + + + +UnknownLeaderEpochException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UnknownLeaderEpochException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class UnknownLeaderEpochException +extends RetriableException
    +
    The request contained a leader epoch which is larger than that on the broker that received the + request. This can happen if the client observes a metadata update before it has been propagated + to all brokers. Clients need not refresh metadata before retrying.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UnknownLeaderEpochException

        +
        public UnknownLeaderEpochException(String message)
        +
        +
      • +
      • +
        +

        UnknownLeaderEpochException

        +
        public UnknownLeaderEpochException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/UnknownMemberIdException.html b/static/41/javadoc/org/apache/kafka/common/errors/UnknownMemberIdException.html new file mode 100644 index 000000000..c682033dc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/UnknownMemberIdException.html @@ -0,0 +1,183 @@ + + + + +UnknownMemberIdException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UnknownMemberIdException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class UnknownMemberIdException +extends ApplicationRecoverableException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UnknownMemberIdException

        +
        public UnknownMemberIdException()
        +
        +
      • +
      • +
        +

        UnknownMemberIdException

        +
        public UnknownMemberIdException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        UnknownMemberIdException

        +
        public UnknownMemberIdException(String message)
        +
        +
      • +
      • +
        +

        UnknownMemberIdException

        +
        public UnknownMemberIdException(Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/UnknownProducerIdException.html b/static/41/javadoc/org/apache/kafka/common/errors/UnknownProducerIdException.html new file mode 100644 index 000000000..eca0233a3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/UnknownProducerIdException.html @@ -0,0 +1,161 @@ + + + + +UnknownProducerIdException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UnknownProducerIdException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class UnknownProducerIdException +extends OutOfOrderSequenceException
    +
    This exception is raised by the broker if it could not locate the producer metadata associated with the producerId + in question. This could happen if, for instance, the producer's records were deleted because their retention time + had elapsed. Once the last records of the producerId are removed, the producer's metadata is removed from the broker, + and future appends by the producer will return this exception.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UnknownProducerIdException

        +
        public UnknownProducerIdException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/UnknownServerException.html b/static/41/javadoc/org/apache/kafka/common/errors/UnknownServerException.html new file mode 100644 index 000000000..a4e358b5e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/UnknownServerException.html @@ -0,0 +1,183 @@ + + + + +UnknownServerException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UnknownServerException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class UnknownServerException +extends ApiException
    +
    An error occurred on the server for which the client doesn't have a corresponding error code. This is generally an + unexpected error.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UnknownServerException

        +
        public UnknownServerException()
        +
        +
      • +
      • +
        +

        UnknownServerException

        +
        public UnknownServerException(String message)
        +
        +
      • +
      • +
        +

        UnknownServerException

        +
        public UnknownServerException(Throwable cause)
        +
        +
      • +
      • +
        +

        UnknownServerException

        +
        public UnknownServerException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/UnknownSubscriptionIdException.html b/static/41/javadoc/org/apache/kafka/common/errors/UnknownSubscriptionIdException.html new file mode 100644 index 000000000..89c613a15 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/UnknownSubscriptionIdException.html @@ -0,0 +1,156 @@ + + + + +UnknownSubscriptionIdException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UnknownSubscriptionIdException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class UnknownSubscriptionIdException +extends ApiException
    +
    This exception indicates that the client sent an invalid or outdated SubscriptionId
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UnknownSubscriptionIdException

        +
        public UnknownSubscriptionIdException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/UnknownTopicIdException.html b/static/41/javadoc/org/apache/kafka/common/errors/UnknownTopicIdException.html new file mode 100644 index 000000000..49f0e9e42 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/UnknownTopicIdException.html @@ -0,0 +1,161 @@ + + + + +UnknownTopicIdException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UnknownTopicIdException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class UnknownTopicIdException +extends InvalidMetadataException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UnknownTopicIdException

        +
        public UnknownTopicIdException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/UnknownTopicOrPartitionException.html b/static/41/javadoc/org/apache/kafka/common/errors/UnknownTopicOrPartitionException.html new file mode 100644 index 000000000..cb80fc815 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/UnknownTopicOrPartitionException.html @@ -0,0 +1,191 @@ + + + + +UnknownTopicOrPartitionException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UnknownTopicOrPartitionException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class UnknownTopicOrPartitionException +extends InvalidMetadataException
    +
    This topic/partition doesn't exist. + This exception is used in contexts where a topic doesn't seem to exist based on possibly stale metadata. + This exception is retriable because the topic or partition might subsequently be created.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UnknownTopicOrPartitionException

        +
        public UnknownTopicOrPartitionException()
        +
        +
      • +
      • +
        +

        UnknownTopicOrPartitionException

        +
        public UnknownTopicOrPartitionException(String message)
        +
        +
      • +
      • +
        +

        UnknownTopicOrPartitionException

        +
        public UnknownTopicOrPartitionException(Throwable throwable)
        +
        +
      • +
      • +
        +

        UnknownTopicOrPartitionException

        +
        public UnknownTopicOrPartitionException(String message, + Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/UnreleasedInstanceIdException.html b/static/41/javadoc/org/apache/kafka/common/errors/UnreleasedInstanceIdException.html new file mode 100644 index 000000000..f7882d2f2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/UnreleasedInstanceIdException.html @@ -0,0 +1,155 @@ + + + + +UnreleasedInstanceIdException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UnreleasedInstanceIdException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class UnreleasedInstanceIdException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UnreleasedInstanceIdException

        +
        public UnreleasedInstanceIdException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/UnstableOffsetCommitException.html b/static/41/javadoc/org/apache/kafka/common/errors/UnstableOffsetCommitException.html new file mode 100644 index 000000000..bbac11004 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/UnstableOffsetCommitException.html @@ -0,0 +1,158 @@ + + + + +UnstableOffsetCommitException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UnstableOffsetCommitException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class UnstableOffsetCommitException +extends RetriableException
    +
    Exception thrown when there are unstable offsets for the requested topic partitions.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UnstableOffsetCommitException

        +
        public UnstableOffsetCommitException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/UnsupportedAssignorException.html b/static/41/javadoc/org/apache/kafka/common/errors/UnsupportedAssignorException.html new file mode 100644 index 000000000..80661e4c9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/UnsupportedAssignorException.html @@ -0,0 +1,155 @@ + + + + +UnsupportedAssignorException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UnsupportedAssignorException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class UnsupportedAssignorException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UnsupportedAssignorException

        +
        public UnsupportedAssignorException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/UnsupportedByAuthenticationException.html b/static/41/javadoc/org/apache/kafka/common/errors/UnsupportedByAuthenticationException.html new file mode 100644 index 000000000..18bc4d661 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/UnsupportedByAuthenticationException.html @@ -0,0 +1,166 @@ + + + + +UnsupportedByAuthenticationException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UnsupportedByAuthenticationException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class UnsupportedByAuthenticationException +extends ApiException
    +
    Authentication mechanism does not support the requested function.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UnsupportedByAuthenticationException

        +
        public UnsupportedByAuthenticationException(String message)
        +
        +
      • +
      • +
        +

        UnsupportedByAuthenticationException

        +
        public UnsupportedByAuthenticationException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/UnsupportedCompressionTypeException.html b/static/41/javadoc/org/apache/kafka/common/errors/UnsupportedCompressionTypeException.html new file mode 100644 index 000000000..f10c45228 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/UnsupportedCompressionTypeException.html @@ -0,0 +1,166 @@ + + + + +UnsupportedCompressionTypeException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UnsupportedCompressionTypeException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class UnsupportedCompressionTypeException +extends ApiException
    +
    The requesting client does not support the compression type of given partition.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UnsupportedCompressionTypeException

        +
        public UnsupportedCompressionTypeException(String message)
        +
        +
      • +
      • +
        +

        UnsupportedCompressionTypeException

        +
        public UnsupportedCompressionTypeException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/UnsupportedEndpointTypeException.html b/static/41/javadoc/org/apache/kafka/common/errors/UnsupportedEndpointTypeException.html new file mode 100644 index 000000000..b244a762f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/UnsupportedEndpointTypeException.html @@ -0,0 +1,155 @@ + + + + +UnsupportedEndpointTypeException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UnsupportedEndpointTypeException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class UnsupportedEndpointTypeException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UnsupportedEndpointTypeException

        +
        public UnsupportedEndpointTypeException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/UnsupportedForMessageFormatException.html b/static/41/javadoc/org/apache/kafka/common/errors/UnsupportedForMessageFormatException.html new file mode 100644 index 000000000..5f48e7971 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/UnsupportedForMessageFormatException.html @@ -0,0 +1,169 @@ + + + + +UnsupportedForMessageFormatException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UnsupportedForMessageFormatException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class UnsupportedForMessageFormatException +extends InvalidConfigurationException
    +
    The message format version does not support the requested function. For example, if idempotence is + requested and the topic is using a message format older than 0.11.0.0, then this error will be returned.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UnsupportedForMessageFormatException

        +
        public UnsupportedForMessageFormatException(String message)
        +
        +
      • +
      • +
        +

        UnsupportedForMessageFormatException

        +
        public UnsupportedForMessageFormatException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/UnsupportedSaslMechanismException.html b/static/41/javadoc/org/apache/kafka/common/errors/UnsupportedSaslMechanismException.html new file mode 100644 index 000000000..6b7905473 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/UnsupportedSaslMechanismException.html @@ -0,0 +1,171 @@ + + + + +UnsupportedSaslMechanismException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UnsupportedSaslMechanismException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class UnsupportedSaslMechanismException +extends AuthenticationException
    +
    This exception indicates that the SASL mechanism requested by the client + is not enabled on the broker.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UnsupportedSaslMechanismException

        +
        public UnsupportedSaslMechanismException(String message)
        +
        +
      • +
      • +
        +

        UnsupportedSaslMechanismException

        +
        public UnsupportedSaslMechanismException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/UnsupportedVersionException.html b/static/41/javadoc/org/apache/kafka/common/errors/UnsupportedVersionException.html new file mode 100644 index 000000000..ccb2c1581 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/UnsupportedVersionException.html @@ -0,0 +1,175 @@ + + + + +UnsupportedVersionException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UnsupportedVersionException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class UnsupportedVersionException +extends InvalidConfigurationException
    +
    Indicates that a request API or version needed by the client is not supported by the broker. This is + typically a fatal error as Kafka clients will downgrade request versions as needed except in cases where + a needed feature is not available in old versions. Fatal errors can generally only be handled by closing + the client instance, although in some cases it may be possible to continue without relying on the + underlying feature. For example, when the producer is used with idempotence enabled, this error is fatal + since the producer does not support reverting to weaker semantics. On the other hand, if this error + is raised from KafkaConsumer.offsetsForTimes(Map), it would + be possible to revert to alternative logic to set the consumer's position.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UnsupportedVersionException

        +
        public UnsupportedVersionException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        UnsupportedVersionException

        +
        public UnsupportedVersionException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/VoterNotFoundException.html b/static/41/javadoc/org/apache/kafka/common/errors/VoterNotFoundException.html new file mode 100644 index 000000000..09364fe79 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/VoterNotFoundException.html @@ -0,0 +1,165 @@ + + + + +VoterNotFoundException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class VoterNotFoundException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class VoterNotFoundException +extends ApiException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        VoterNotFoundException

        +
        public VoterNotFoundException(String message)
        +
        +
      • +
      • +
        +

        VoterNotFoundException

        +
        public VoterNotFoundException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/WakeupException.html b/static/41/javadoc/org/apache/kafka/common/errors/WakeupException.html new file mode 100644 index 000000000..3f5b0fdff --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/WakeupException.html @@ -0,0 +1,154 @@ + + + + +WakeupException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class WakeupException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class WakeupException +extends KafkaException
    +
    Exception used to indicate preemption of a blocking operation by an external thread. + For example, KafkaConsumer.wakeup() + can be used to break out of an active KafkaConsumer.poll(java.time.Duration), + which would raise an instance of this exception.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        WakeupException

        +
        public WakeupException()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/package-summary.html b/static/41/javadoc/org/apache/kafka/common/errors/package-summary.html new file mode 100644 index 000000000..2cc19ab18 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/package-summary.html @@ -0,0 +1,579 @@ + + + + +org.apache.kafka.common.errors (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.common.errors

    +
    +
    +
    package org.apache.kafka.common.errors
    +
    +
    Provides common exception classes.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/errors/package-tree.html b/static/41/javadoc/org/apache/kafka/common/errors/package-tree.html new file mode 100644 index 000000000..e7d36f900 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/errors/package-tree.html @@ -0,0 +1,282 @@ + + + + +org.apache.kafka.common.errors Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.common.errors

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/header/Header.html b/static/41/javadoc/org/apache/kafka/common/header/Header.html new file mode 100644 index 000000000..a88d33c7d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/header/Header.html @@ -0,0 +1,133 @@ + + + + +Header (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Header

    +
    +
    +
    +
    public interface Header
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      key()
      +
       
      +
      byte[]
      + +
       
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        key

        +
        String key()
        +
        +
      • +
      • +
        +

        value

        +
        byte[] value()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/header/Headers.html b/static/41/javadoc/org/apache/kafka/common/header/Headers.html new file mode 100644 index 000000000..69b20ecc6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/header/Headers.html @@ -0,0 +1,241 @@ + + + + +Headers (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Headers

    +
    +
    +
    +
    All Superinterfaces:
    +
    Iterable<Header>
    +
    +
    +
    public interface Headers +extends Iterable<Header>
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      add(String key, + byte[] value)
      +
      +
      Creates and adds a header, to the end, returning if the operation succeeded.
      +
      + +
      add(Header header)
      +
      +
      Adds a header (key inside), to the end, returning if the operation succeeded.
      +
      + + +
      +
      Returns all headers for the given key, in the order they were added in, if present.
      +
      + + +
      +
      Returns just one (the very last) header for the given key, if present.
      +
      + + +
      +
      Removes all headers for the given key returning if the operation succeeded.
      +
      + + +
      +
      Returns all headers as an array, in the order they were added in.
      +
      +
      +
      +
      +
      +

      Methods inherited from interface java.lang.Iterable

      +forEach, iterator, spliterator
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        add

        +
        Headers add(Header header) + throws IllegalStateException
        +
        Adds a header (key inside), to the end, returning if the operation succeeded.
        +
        +
        Parameters:
        +
        header - the Header to be added
        +
        Returns:
        +
        this instance of the Headers, once the header is added.
        +
        Throws:
        +
        IllegalStateException - is thrown if headers are in a read-only state.
        +
        +
        +
      • +
      • +
        +

        add

        +
        Headers add(String key, + byte[] value) + throws IllegalStateException
        +
        Creates and adds a header, to the end, returning if the operation succeeded.
        +
        +
        Parameters:
        +
        key - of the header to be added.
        +
        value - of the header to be added.
        +
        Returns:
        +
        this instance of the Headers, once the header is added.
        +
        Throws:
        +
        IllegalStateException - is thrown if headers are in a read-only state.
        +
        +
        +
      • +
      • +
        +

        remove

        +
        Headers remove(String key) + throws IllegalStateException
        +
        Removes all headers for the given key returning if the operation succeeded.
        +
        +
        Parameters:
        +
        key - to remove all headers for.
        +
        Returns:
        +
        this instance of the Headers, once the header is removed.
        +
        Throws:
        +
        IllegalStateException - is thrown if headers are in a read-only state.
        +
        +
        +
      • +
      • +
        +

        lastHeader

        +
        Header lastHeader(String key)
        +
        Returns just one (the very last) header for the given key, if present.
        +
        +
        Parameters:
        +
        key - to get the last header for.
        +
        Returns:
        +
        this last header matching the given key, returns null if not present.
        +
        +
        +
      • +
      • +
        +

        headers

        +
        Iterable<Header> headers(String key)
        +
        Returns all headers for the given key, in the order they were added in, if present.
        +
        +
        Parameters:
        +
        key - to return the headers for.
        +
        Returns:
        +
        all headers for the given key, in the order they were added in, if NO headers are present an empty iterable is returned.
        +
        +
        +
      • +
      • +
        +

        toArray

        +
        Header[] toArray()
        +
        Returns all headers as an array, in the order they were added in.
        +
        +
        Returns:
        +
        the headers as a Header[], mutating this array will not affect the Headers, if NO headers are present an empty array is returned.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/header/package-summary.html b/static/41/javadoc/org/apache/kafka/common/header/package-summary.html new file mode 100644 index 000000000..474e70e59 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/header/package-summary.html @@ -0,0 +1,100 @@ + + + + +org.apache.kafka.common.header (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.common.header

    +
    +
    +
    package org.apache.kafka.common.header
    +
    +
    Provides API for application-defined metadata attached to Kafka records.
    +
    +
    +
      +
    • + +
    • +
    • +
      +
      Interfaces
      +
      +
      Class
      +
      Description
      + +
       
      + +
       
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/header/package-tree.html b/static/41/javadoc/org/apache/kafka/common/header/package-tree.html new file mode 100644 index 000000000..609befbc4 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/header/package-tree.html @@ -0,0 +1,72 @@ + + + + +org.apache.kafka.common.header Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.common.header

    +Package Hierarchies: + +
    +
    +

    Interface Hierarchy

    +
      +
    • org.apache.kafka.common.header.Header
    • +
    • java.lang.Iterable<T> +
        +
      • org.apache.kafka.common.header.Headers
      • +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/CompoundStat.NamedMeasurable.html b/static/41/javadoc/org/apache/kafka/common/metrics/CompoundStat.NamedMeasurable.html new file mode 100644 index 000000000..c9e2c2c51 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/CompoundStat.NamedMeasurable.html @@ -0,0 +1,173 @@ + + + + +CompoundStat.NamedMeasurable (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class CompoundStat.NamedMeasurable

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.CompoundStat.NamedMeasurable
    +
    +
    +
    +
    Enclosing interface:
    +
    CompoundStat
    +
    +
    +
    public static class CompoundStat.NamedMeasurable +extends Object
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      + +
      +
    • + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/CompoundStat.html b/static/41/javadoc/org/apache/kafka/common/metrics/CompoundStat.html new file mode 100644 index 000000000..54219f4fe --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/CompoundStat.html @@ -0,0 +1,153 @@ + + + + +CompoundStat (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface CompoundStat

    +
    +
    +
    +
    All Superinterfaces:
    +
    Stat
    +
    +
    +
    All Known Implementing Classes:
    +
    Frequencies, Meter, Percentiles
    +
    +
    +
    public interface CompoundStat +extends Stat
    +
    A compound stat is a stat where a single measurement and associated data structure feeds many metrics. This is the + example for a histogram which has many associated percentiles.
    +
    +
    +
      + +
    • +
      +

      Nested Class Summary

      +
      Nested Classes
      +
      +
      Modifier and Type
      +
      Interface
      +
      Description
      +
      static class 
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
       
      +
      +
      +
      +
      +

      Methods inherited from interface org.apache.kafka.common.metrics.Stat

      +record
      +
      +
    • +
    +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/Gauge.html b/static/41/javadoc/org/apache/kafka/common/metrics/Gauge.html new file mode 100644 index 000000000..d4d02f4f0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/Gauge.html @@ -0,0 +1,145 @@ + + + + +Gauge (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Gauge<T>

    +
    +
    +
    +
    All Superinterfaces:
    +
    MetricValueProvider<T>
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface Gauge<T> +extends MetricValueProvider<T>
    +
    A gauge metric is an instantaneous reading of a particular value.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      value(MetricConfig config, + long now)
      +
      +
      Returns the current value associated with this gauge.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        value

        +
        T value(MetricConfig config, + long now)
        +
        Returns the current value associated with this gauge.
        +
        +
        Parameters:
        +
        config - The configuration for this metric
        +
        now - The POSIX time in milliseconds the measurement is being taken
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/JmxReporter.html b/static/41/javadoc/org/apache/kafka/common/metrics/JmxReporter.html new file mode 100644 index 000000000..d7645d484 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/JmxReporter.html @@ -0,0 +1,468 @@ + + + + +JmxReporter (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class JmxReporter

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.JmxReporter
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    AutoCloseable, Configurable, MetricsReporter, Reconfigurable
    +
    +
    +
    public class JmxReporter +extends Object +implements MetricsReporter
    +
    Register metrics in JMX as dynamic mbeans based on the metric names
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        JmxReporter

        +
        public JmxReporter()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs)
        +
        Description copied from interface: Configurable
        +
        Configure this class with the given key-value pairs
        +
        +
        Specified by:
        +
        configure in interface Configurable
        +
        +
        +
      • +
      • +
        +

        reconfigurableConfigs

        +
        public Set<String> reconfigurableConfigs()
        +
        Description copied from interface: Reconfigurable
        +
        Returns the names of configs that may be reconfigured.
        +
        +
        Specified by:
        +
        reconfigurableConfigs in interface MetricsReporter
        +
        Specified by:
        +
        reconfigurableConfigs in interface Reconfigurable
        +
        +
        +
      • +
      • +
        +

        validateReconfiguration

        +
        public void validateReconfiguration(Map<String,?> configs) + throws ConfigException
        +
        Description copied from interface: Reconfigurable
        +
        Validates the provided configuration. The provided map contains + all configs including any reconfigurable configs that may be different + from the initial configuration. Reconfiguration will be not performed + if this method throws any exception.
        +
        +
        Specified by:
        +
        validateReconfiguration in interface MetricsReporter
        +
        Specified by:
        +
        validateReconfiguration in interface Reconfigurable
        +
        Throws:
        +
        ConfigException - if the provided configs are not valid. The exception + message from ConfigException will be returned to the client in + the AlterConfigs response.
        +
        +
        +
      • +
      • +
        +

        reconfigure

        +
        public void reconfigure(Map<String,?> configs)
        +
        Description copied from interface: Reconfigurable
        +
        Reconfigures this instance with the given key-value pairs. The provided + map contains all configs including any reconfigurable configs that + may have changed since the object was initially configured using + Configurable.configure(Map). This method will only be invoked if + the configs have passed validation using Reconfigurable.validateReconfiguration(Map).
        +
        +
        Specified by:
        +
        reconfigure in interface MetricsReporter
        +
        Specified by:
        +
        reconfigure in interface Reconfigurable
        +
        +
        +
      • +
      • +
        +

        init

        +
        public void init(List<KafkaMetric> metrics)
        +
        Description copied from interface: MetricsReporter
        +
        This is called when the reporter is first registered to initially register all existing metrics
        +
        +
        Specified by:
        +
        init in interface MetricsReporter
        +
        Parameters:
        +
        metrics - All currently existing metrics
        +
        +
        +
      • +
      • +
        +

        containsMbean

        +
        public boolean containsMbean(String mbeanName)
        +
        +
      • +
      • +
        +

        metricChange

        +
        public void metricChange(KafkaMetric metric)
        +
        Description copied from interface: MetricsReporter
        +
        This is called whenever a metric is updated or added
        +
        +
        Specified by:
        +
        metricChange in interface MetricsReporter
        +
        Parameters:
        +
        metric - The metric that has been added or changed
        +
        +
        +
      • +
      • +
        +

        metricRemoval

        +
        public void metricRemoval(KafkaMetric metric)
        +
        Description copied from interface: MetricsReporter
        +
        This is called whenever a metric is removed
        +
        +
        Specified by:
        +
        metricRemoval in interface MetricsReporter
        +
        Parameters:
        +
        metric - The metric that has been removed
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close()
        +
        Description copied from interface: MetricsReporter
        +
        Called when the metrics repository is closed.
        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface MetricsReporter
        +
        +
        +
      • +
      • +
        +

        compilePredicate

        +
        public static Predicate<String> compilePredicate(Map<String,?> configs)
        +
        +
      • +
      • +
        +

        contextChange

        +
        public void contextChange(MetricsContext metricsContext)
        +
        Description copied from interface: MetricsReporter
        +
        Sets the context labels for the service or library exposing metrics. This will be called before MetricsReporter.init(List) and may be called anytime after that.
        +
        +
        Specified by:
        +
        contextChange in interface MetricsReporter
        +
        Parameters:
        +
        metricsContext - the metric context
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/KafkaMetric.html b/static/41/javadoc/org/apache/kafka/common/metrics/KafkaMetric.html new file mode 100644 index 000000000..5b5f5ae77 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/KafkaMetric.html @@ -0,0 +1,279 @@ + + + + +KafkaMetric (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class KafkaMetric

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.KafkaMetric
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Metric
    +
    +
    +
    public final class KafkaMetric +extends Object +implements Metric
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        KafkaMetric

        +
        public KafkaMetric(Object lock, + MetricName metricName, + MetricValueProvider<?> valueProvider, + MetricConfig config, + org.apache.kafka.common.utils.Time time)
        +
        Create a metric to monitor an object that implements MetricValueProvider.
        +
        +
        Parameters:
        +
        lock - The lock used to prevent race condition
        +
        metricName - The name of the metric
        +
        valueProvider - The metric value provider associated with this metric
        +
        config - The configuration of the metric
        +
        time - The time instance to use with the metrics
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        config

        +
        public MetricConfig config()
        +
        Get the configuration of this metric. + This is supposed to be used by server only.
        +
        +
        Returns:
        +
        Return the config of this metric
        +
        +
        +
      • +
      • +
        +

        metricName

        +
        public MetricName metricName()
        +
        Get the metric name
        +
        +
        Specified by:
        +
        metricName in interface Metric
        +
        Returns:
        +
        Return the name of this metric
        +
        +
        +
      • +
      • +
        +

        metricValue

        +
        public Object metricValue()
        +
        Take the metric and return the value, which could be a Measurable or a Gauge
        +
        +
        Specified by:
        +
        metricValue in interface Metric
        +
        Returns:
        +
        Return the metric value
        +
        Throws:
        +
        IllegalStateException - if the underlying metric is not a Measurable or a Gauge.
        +
        +
        +
      • +
      • +
        +

        isMeasurable

        +
        public boolean isMeasurable()
        +
        The method determines if the metric value provider is of type Measurable.
        +
        +
        Returns:
        +
        true if the metric value provider is of type Measurable, false otherwise.
        +
        +
        +
      • +
      • +
        +

        measurable

        +
        public Measurable measurable()
        +
        Get the underlying metric provider, which should be a Measurable
        +
        +
        Returns:
        +
        Return the metric provider
        +
        Throws:
        +
        IllegalStateException - if the underlying metric is not a Measurable.
        +
        +
        +
      • +
      • +
        +

        config

        +
        public void config(MetricConfig config)
        +
        Set the metric config. + This is supposed to be used by server only.
        +
        +
        Parameters:
        +
        config - configuration for this metrics
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/KafkaMetricsContext.html b/static/41/javadoc/org/apache/kafka/common/metrics/KafkaMetricsContext.html new file mode 100644 index 000000000..804b34144 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/KafkaMetricsContext.html @@ -0,0 +1,209 @@ + + + + +KafkaMetricsContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class KafkaMetricsContext

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.KafkaMetricsContext
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    MetricsContext
    +
    +
    +
    public class KafkaMetricsContext +extends Object +implements MetricsContext
    +
    An implementation of MetricsContext, it encapsulates required metrics context properties for Kafka services and clients
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        KafkaMetricsContext

        +
        public KafkaMetricsContext(String namespace)
        +
        Create a MetricsContext with namespace, no service or client properties
        +
        +
        Parameters:
        +
        namespace - value for _namespace key
        +
        +
        +
      • +
      • +
        +

        KafkaMetricsContext

        +
        public KafkaMetricsContext(String namespace, + Map<String,?> contextLabels)
        +
        Create a MetricsContext with namespace, service or client properties
        +
        +
        Parameters:
        +
        namespace - value for _namespace key
        +
        contextLabels - contextLabels additional entries to add to the context. + values will be converted to string using Object.toString()
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        contextLabels

        +
        public Map<String,String> contextLabels()
        +
        Description copied from interface: MetricsContext
        +
        Returns the labels for this metrics context.
        +
        +
        Specified by:
        +
        contextLabels in interface MetricsContext
        +
        Returns:
        +
        the map of label keys and values; never null but possibly empty
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/Measurable.html b/static/41/javadoc/org/apache/kafka/common/metrics/Measurable.html new file mode 100644 index 000000000..329165750 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/Measurable.html @@ -0,0 +1,150 @@ + + + + +Measurable (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Measurable

    +
    +
    +
    +
    All Superinterfaces:
    +
    MetricValueProvider<Double>
    +
    +
    +
    All Known Subinterfaces:
    +
    MeasurableStat
    +
    +
    +
    All Known Implementing Classes:
    +
    Avg, CumulativeCount, CumulativeSum, Frequencies, Max, Min, Percentiles, Rate, SampledStat, SimpleRate, TokenBucket, Value, WindowedCount, WindowedSum
    +
    +
    +
    public interface Measurable +extends MetricValueProvider<Double>
    +
    A measurable quantity that can be registered as a metric
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      double
      +
      measure(MetricConfig config, + long now)
      +
      +
      Measure this quantity and return the result as a double
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        measure

        +
        double measure(MetricConfig config, + long now)
        +
        Measure this quantity and return the result as a double
        +
        +
        Parameters:
        +
        config - The configuration for this metric
        +
        now - The POSIX time in milliseconds the measurement is being taken
        +
        Returns:
        +
        The measured value
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/MeasurableStat.html b/static/41/javadoc/org/apache/kafka/common/metrics/MeasurableStat.html new file mode 100644 index 000000000..4ec02874e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/MeasurableStat.html @@ -0,0 +1,107 @@ + + + + +MeasurableStat (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface MeasurableStat

    +
    +
    +
    +
    All Superinterfaces:
    +
    Measurable, MetricValueProvider<Double>, Stat
    +
    +
    +
    All Known Implementing Classes:
    +
    Avg, CumulativeCount, CumulativeSum, Frequencies, Max, Min, Percentiles, Rate, SampledStat, SimpleRate, TokenBucket, Value, WindowedCount, WindowedSum
    +
    +
    +
    public interface MeasurableStat +extends Stat, Measurable
    +
    A MeasurableStat is a Stat that is also Measurable (i.e. can produce a single floating point value). + This is the interface used for most of the simple statistics such as Avg, + Max, CumulativeCount, etc.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +

      Methods inherited from interface org.apache.kafka.common.metrics.Measurable

      +measure
      +
      +

      Methods inherited from interface org.apache.kafka.common.metrics.Stat

      +record
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/MetricConfig.html b/static/41/javadoc/org/apache/kafka/common/metrics/MetricConfig.html new file mode 100644 index 000000000..60d5be573 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/MetricConfig.html @@ -0,0 +1,297 @@ + + + + +MetricConfig (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class MetricConfig

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.MetricConfig
    +
    +
    +
    +
    public class MetricConfig +extends Object
    +
    Configuration values for metrics
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        DEFAULT_NUM_SAMPLES

        +
        public static final int DEFAULT_NUM_SAMPLES
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        MetricConfig

        +
        public MetricConfig()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/MetricValueProvider.html b/static/41/javadoc/org/apache/kafka/common/metrics/MetricValueProvider.html new file mode 100644 index 000000000..d737ab3be --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/MetricValueProvider.html @@ -0,0 +1,94 @@ + + + + +MetricValueProvider (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface MetricValueProvider<T>

    +
    +
    +
    +
    All Known Subinterfaces:
    +
    Gauge<T>, Measurable, MeasurableStat
    +
    +
    +
    All Known Implementing Classes:
    +
    Avg, CumulativeCount, CumulativeSum, Frequencies, Max, Min, Percentiles, Rate, SampledStat, SimpleRate, TokenBucket, Value, WindowedCount, WindowedSum
    +
    +
    +
    public interface MetricValueProvider<T>
    +
    Super-interface for Measurable or Gauge that provides + metric values. +

    + In the future for Java8 and above, Gauge.value(MetricConfig, long) will be + moved to this interface with a default implementation in Measurable that returns + Measurable.measure(MetricConfig, long). +

    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/Metrics.html b/static/41/javadoc/org/apache/kafka/common/metrics/Metrics.html new file mode 100644 index 000000000..683fdb316 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/Metrics.html @@ -0,0 +1,946 @@ + + + + +Metrics (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Metrics

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.Metrics
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable
    +
    +
    +
    public final class Metrics +extends Object +implements Closeable
    +
    A registry of sensors and metrics. +

    + A metric is a named, numerical measurement. A sensor is a handle to record numerical measurements as they occur. Each + Sensor has zero or more associated metrics. For example a Sensor might represent message sizes and we might associate + with this sensor a metric for the average, maximum, or other statistics computed off the sequence of message sizes + that are recorded by the sensor. +

    + Usage looks something like this: + +

    + // set up metrics:
    + Metrics metrics = new Metrics(); // this is the global repository of metrics and sensors
    + Sensor sensor = metrics.sensor("message-sizes");
    + MetricName metricName = new MetricName("message-size-avg", "producer-metrics");
    + sensor.add(metricName, new Avg());
    + metricName = new MetricName("message-size-max", "producer-metrics");
    + sensor.add(metricName, new Max());
    + 
    + // as messages are sent we record the sizes
    + sensor.record(messageSize);
    + 
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Metrics

        +
        public Metrics()
        +
        Create a metrics repository with no metric reporters and default configuration. + Expiration of Sensors is disabled.
        +
        +
      • +
      • +
        +

        Metrics

        +
        public Metrics(org.apache.kafka.common.utils.Time time)
        +
        Create a metrics repository with no metric reporters and default configuration. + Expiration of Sensors is disabled.
        +
        +
      • +
      • +
        +

        Metrics

        +
        public Metrics(MetricConfig defaultConfig, + org.apache.kafka.common.utils.Time time)
        +
        Create a metrics repository with no metric reporters and the given default configuration. + Expiration of Sensors is disabled.
        +
        +
      • +
      • +
        +

        Metrics

        +
        public Metrics(MetricConfig defaultConfig)
        +
        Create a metrics repository with no reporters and the given default config. This config will be used for any + metric that doesn't override its own config. Expiration of Sensors is disabled.
        +
        +
        Parameters:
        +
        defaultConfig - The default config to use for all metrics that don't override their config
        +
        +
        +
      • +
      • +
        +

        Metrics

        +
        public Metrics(MetricConfig defaultConfig, + List<MetricsReporter> reporters, + org.apache.kafka.common.utils.Time time)
        +
        Create a metrics repository with a default config and the given metric reporters. + Expiration of Sensors is disabled.
        +
        +
        Parameters:
        +
        defaultConfig - The default config
        +
        reporters - The metrics reporters
        +
        time - The time instance to use with the metrics
        +
        +
        +
      • +
      • +
        +

        Metrics

        +
        public Metrics(MetricConfig defaultConfig, + List<MetricsReporter> reporters, + org.apache.kafka.common.utils.Time time, + MetricsContext metricsContext)
        +
        Create a metrics repository with a default config, metric reporters and metric context + Expiration of Sensors is disabled.
        +
        +
        Parameters:
        +
        defaultConfig - The default config
        +
        reporters - The metrics reporters
        +
        time - The time instance to use with the metrics
        +
        metricsContext - The metricsContext to initialize metrics reporter with
        +
        +
        +
      • +
      • +
        +

        Metrics

        +
        public Metrics(MetricConfig defaultConfig, + List<MetricsReporter> reporters, + org.apache.kafka.common.utils.Time time, + boolean enableExpiration)
        +
        Create a metrics repository with a default config, given metric reporters and the ability to expire eligible sensors
        +
        +
        Parameters:
        +
        defaultConfig - The default config
        +
        reporters - The metrics reporters
        +
        time - The time instance to use with the metrics
        +
        enableExpiration - true if the metrics instance can garbage collect inactive sensors, false otherwise
        +
        +
        +
      • +
      • +
        +

        Metrics

        +
        public Metrics(MetricConfig defaultConfig, + List<MetricsReporter> reporters, + org.apache.kafka.common.utils.Time time, + boolean enableExpiration, + MetricsContext metricsContext)
        +
        Create a metrics repository with a default config, given metric reporters, the ability to expire eligible sensors + and MetricContext
        +
        +
        Parameters:
        +
        defaultConfig - The default config
        +
        reporters - The metrics reporters
        +
        time - The time instance to use with the metrics
        +
        enableExpiration - true if the metrics instance can garbage collect inactive sensors, false otherwise
        +
        metricsContext - The metricsContext to initialize metrics reporter with
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        metricName

        +
        public MetricName metricName(String name, + String group, + String description, + Map<String,String> tags)
        +
        Create a MetricName with the given name, group, description and tags, plus default tags specified in the metric + configuration. Tag in tags takes precedence if the same tag key is specified in the default metric configuration.
        +
        +
        Parameters:
        +
        name - The name of the metric
        +
        group - logical group name of the metrics to which this metric belongs
        +
        description - A human-readable description to include in the metric
        +
        tags - additional key/value attributes of the metric
        +
        +
        +
      • +
      • +
        +

        metricName

        +
        public MetricName metricName(String name, + String group, + String description)
        +
        Create a MetricName with the given name, group, description, and default tags + specified in the metric configuration.
        +
        +
        Parameters:
        +
        name - The name of the metric
        +
        group - logical group name of the metrics to which this metric belongs
        +
        description - A human-readable description to include in the metric
        +
        +
        +
      • +
      • +
        +

        metricName

        +
        public MetricName metricName(String name, + String group)
        +
        Create a MetricName with the given name, group and default tags specified in the metric configuration.
        +
        +
        Parameters:
        +
        name - The name of the metric
        +
        group - logical group name of the metrics to which this metric belongs
        +
        +
        +
      • +
      • +
        +

        metricName

        +
        public MetricName metricName(String name, + String group, + String description, + String... keyValue)
        +
        Create a MetricName with the given name, group, description, and keyValue as tags, plus default tags specified in the metric + configuration. Tag in keyValue takes precedence if the same tag key is specified in the default metric configuration.
        +
        +
        Parameters:
        +
        name - The name of the metric
        +
        group - logical group name of the metrics to which this metric belongs
        +
        description - A human-readable description to include in the metric
        +
        keyValue - additional key/value attributes of the metric (must come in pairs)
        +
        +
        +
      • +
      • +
        +

        metricName

        +
        public MetricName metricName(String name, + String group, + Map<String,String> tags)
        +
        Create a MetricName with the given name, group and tags, plus default tags specified in the metric + configuration. Tag in tags takes precedence if the same tag key is specified in the default metric configuration.
        +
        +
        Parameters:
        +
        name - The name of the metric
        +
        group - logical group name of the metrics to which this metric belongs
        +
        tags - key/value attributes of the metric
        +
        +
        +
      • +
      • +
        +

        toHtmlTable

        +
        public static String toHtmlTable(String domain, + Iterable<MetricNameTemplate> allMetrics)
        +
        Use the specified domain and metric name templates to generate an HTML table documenting the metrics. A separate table section + will be generated for each of the MBeans and the associated attributes. The MBean names are lexicographically sorted to + determine the order of these sections. This order is therefore dependent upon the order of the + tags in each MetricNameTemplate.
        +
        +
        Parameters:
        +
        domain - the domain or prefix for the JMX MBean names; may not be null
        +
        allMetrics - the collection of all MetricNameTemplate instances each describing one metric; may not be null
        +
        Returns:
        +
        the string containing the HTML table; never null
        +
        +
        +
      • +
      • +
        +

        config

        +
        public MetricConfig config()
        +
        +
      • +
      • +
        +

        getSensor

        +
        public Sensor getSensor(String name)
        +
        Get the sensor with the given name if it exists
        +
        +
        Parameters:
        +
        name - The name of the sensor
        +
        Returns:
        +
        Return the sensor or null if no such sensor exists
        +
        +
        +
      • +
      • +
        +

        sensor

        +
        public Sensor sensor(String name)
        +
        Get or create a sensor with the given unique name and no parent sensors. This uses + a default recording level of INFO.
        +
        +
        Parameters:
        +
        name - The sensor name
        +
        Returns:
        +
        The sensor
        +
        +
        +
      • +
      • +
        +

        sensor

        +
        public Sensor sensor(String name, + Sensor.RecordingLevel recordingLevel)
        +
        Get or create a sensor with the given unique name and no parent sensors and with a given + recording level.
        +
        +
        Parameters:
        +
        name - The sensor name.
        +
        recordingLevel - The recording level.
        +
        Returns:
        +
        The sensor
        +
        +
        +
      • +
      • +
        +

        sensor

        +
        public Sensor sensor(String name, + Sensor... parents)
        +
        Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will + receive every value recorded with this sensor. This uses a default recording level of INFO.
        +
        +
        Parameters:
        +
        name - The name of the sensor
        +
        parents - The parent sensors
        +
        Returns:
        +
        The sensor that is created
        +
        +
        +
      • +
      • +
        +

        sensor

        +
        public Sensor sensor(String name, + Sensor.RecordingLevel recordingLevel, + Sensor... parents)
        +
        Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will + receive every value recorded with this sensor.
        +
        +
        Parameters:
        +
        name - The name of the sensor.
        +
        parents - The parent sensors.
        +
        recordingLevel - The recording level.
        +
        Returns:
        +
        The sensor that is created
        +
        +
        +
      • +
      • +
        +

        sensor

        +
        public Sensor sensor(String name, + MetricConfig config, + Sensor... parents)
        +
        Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will + receive every value recorded with this sensor. This uses a default recording level of INFO.
        +
        +
        Parameters:
        +
        name - The name of the sensor
        +
        config - A default configuration to use for this sensor for metrics that don't have their own config
        +
        parents - The parent sensors
        +
        Returns:
        +
        The sensor that is created
        +
        +
        +
      • +
      • +
        +

        sensor

        +
        public Sensor sensor(String name, + MetricConfig config, + Sensor.RecordingLevel recordingLevel, + Sensor... parents)
        +
        Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will + receive every value recorded with this sensor.
        +
        +
        Parameters:
        +
        name - The name of the sensor
        +
        config - A default configuration to use for this sensor for metrics that don't have their own config
        +
        recordingLevel - The recording level.
        +
        parents - The parent sensors
        +
        Returns:
        +
        The sensor that is created
        +
        +
        +
      • +
      • +
        +

        sensor

        +
        public Sensor sensor(String name, + MetricConfig config, + long inactiveSensorExpirationTimeSeconds, + Sensor.RecordingLevel recordingLevel, + Sensor... parents)
        +
        Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will + receive every value recorded with this sensor.
        +
        +
        Parameters:
        +
        name - The name of the sensor
        +
        config - A default configuration to use for this sensor for metrics that don't have their own config
        +
        inactiveSensorExpirationTimeSeconds - If no value is recorded on the Sensor for this duration of time, + it is eligible for removal
        +
        parents - The parent sensors
        +
        recordingLevel - The recording level.
        +
        Returns:
        +
        The sensor that is created
        +
        +
        +
      • +
      • +
        +

        sensor

        +
        public Sensor sensor(String name, + MetricConfig config, + long inactiveSensorExpirationTimeSeconds, + Sensor... parents)
        +
        Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will + receive every value recorded with this sensor. This uses a default recording level of INFO.
        +
        +
        Parameters:
        +
        name - The name of the sensor
        +
        config - A default configuration to use for this sensor for metrics that don't have their own config
        +
        inactiveSensorExpirationTimeSeconds - If no value is recorded on the Sensor for this duration of time, + it is eligible for removal
        +
        parents - The parent sensors
        +
        Returns:
        +
        The sensor that is created
        +
        +
        +
      • +
      • +
        +

        removeSensor

        +
        public void removeSensor(String name)
        +
        Remove a sensor (if it exists), associated metrics and its children.
        +
        +
        Parameters:
        +
        name - The name of the sensor to be removed
        +
        +
        +
      • +
      • +
        +

        addMetric

        +
        public void addMetric(MetricName metricName, + Measurable measurable)
        +
        Add a metric to monitor an object that implements measurable. This metric won't be associated with any sensor. + This is a way to expose existing values as metrics. + + This method is kept for binary compatibility purposes, it has the same behaviour as + addMetric(MetricName, MetricValueProvider).
        +
        +
        Parameters:
        +
        metricName - The name of the metric
        +
        measurable - The measurable that will be measured by this metric
        +
        +
        +
      • +
      • +
        +

        addMetric

        +
        public void addMetric(MetricName metricName, + MetricConfig config, + Measurable measurable)
        +
        Add a metric to monitor an object that implements Measurable. This metric won't be associated with any sensor. + This is a way to expose existing values as metrics. + + This method is kept for binary compatibility purposes, it has the same behaviour as + addMetric(MetricName, MetricConfig, MetricValueProvider).
        +
        +
        Parameters:
        +
        metricName - The name of the metric
        +
        config - The configuration to use when measuring this measurable
        +
        measurable - The measurable that will be measured by this metric
        +
        +
        +
      • +
      • +
        +

        addMetric

        +
        public void addMetric(MetricName metricName, + MetricConfig config, + MetricValueProvider<?> metricValueProvider)
        +
        Add a metric to monitor an object that implements MetricValueProvider. This metric won't be associated with any + sensor. This is a way to expose existing values as metrics. User is expected to add any additional + synchronization to update and access metric values, if required.
        +
        +
        Parameters:
        +
        metricName - The name of the metric
        +
        metricValueProvider - The metric value provider associated with this metric
        +
        Throws:
        +
        IllegalArgumentException - if a metric with same name already exists.
        +
        +
        +
      • +
      • +
        +

        addMetric

        +
        public void addMetric(MetricName metricName, + MetricValueProvider<?> metricValueProvider)
        +
        Add a metric to monitor an object that implements MetricValueProvider. This metric won't be associated with any + sensor. This is a way to expose existing values as metrics. User is expected to add any additional + synchronization to update and access metric values, if required.
        +
        +
        Parameters:
        +
        metricName - The name of the metric
        +
        metricValueProvider - The metric value provider associated with this metric
        +
        +
        +
      • +
      • +
        +

        addMetricIfAbsent

        +
        public KafkaMetric addMetricIfAbsent(MetricName metricName, + MetricConfig config, + MetricValueProvider<?> metricValueProvider)
        +
        Create or get an existing metric to monitor an object that implements MetricValueProvider. + This metric won't be associated with any sensor. This is a way to expose existing values as metrics. + This method takes care of synchronisation while updating/accessing metrics by concurrent threads.
        +
        +
        Parameters:
        +
        metricName - The name of the metric
        +
        metricValueProvider - The metric value provider associated with this metric
        +
        Returns:
        +
        Existing KafkaMetric if already registered or else a newly created one
        +
        +
        +
      • +
      • +
        +

        removeMetric

        +
        public KafkaMetric removeMetric(MetricName metricName)
        +
        Remove a metric if it exists and return it. Return null otherwise. If a metric is removed, `metricRemoval` + will be invoked for each reporter.
        +
        +
        Parameters:
        +
        metricName - The name of the metric
        +
        Returns:
        +
        the removed `KafkaMetric` or null if no such metric exists
        +
        +
        +
      • +
      • +
        +

        addReporter

        +
        public void addReporter(MetricsReporter reporter)
        +
        Add a MetricReporter
        +
        +
      • +
      • +
        +

        removeReporter

        +
        public void removeReporter(MetricsReporter reporter)
        +
        Remove a MetricReporter
        +
        +
      • +
      • +
        +

        metrics

        +
        public Map<MetricName,KafkaMetric> metrics()
        +
        Get all the metrics currently maintained indexed by metricName
        +
        +
      • +
      • +
        +

        reporters

        +
        public List<MetricsReporter> reporters()
        +
        +
      • +
      • +
        +

        metric

        +
        public KafkaMetric metric(MetricName metricName)
        +
        +
      • +
      • +
        +

        metricInstance

        +
        public MetricName metricInstance(MetricNameTemplate template, + String... keyValue)
        +
        +
      • +
      • +
        +

        metricInstance

        +
        public MetricName metricInstance(MetricNameTemplate template, + Map<String,String> tags)
        +
        +
      • +
      • +
        +

        close

        +
        public void close()
        +
        Close this metrics repository.
        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/MetricsContext.html b/static/41/javadoc/org/apache/kafka/common/metrics/MetricsContext.html new file mode 100644 index 000000000..a5b99b4af --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/MetricsContext.html @@ -0,0 +1,192 @@ + + + + +MetricsContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface MetricsContext

    +
    +
    +
    +
    All Known Implementing Classes:
    +
    KafkaMetricsContext
    +
    +
    +
    public interface MetricsContext
    +
    MetricsContext encapsulates additional contextLabels about metrics exposed via a + MetricsReporter + +

    The contextLabels() map provides following information: +

    +
    in all components
    +
    a _namespace field indicating the component exposing metrics + e.g. kafka.server, kafka.consumer. + The JmxReporter uses this as prefix for MBean names
    + +
    for clients and streams libraries
    +
    any freeform fields passed in via + client properties in the form of metrics.context.<key>=<value>
    + +
    for kafka brokers
    +
    kafka.broker.id, kafka.cluster.id
    + +
    for connect workers
    +
    connect.kafka.cluster.id, connect.group.id
    +
    +
    +
    +
      + +
    • +
      +

      Field Summary

      +
      Fields
      +
      +
      Modifier and Type
      +
      Field
      +
      Description
      +
      static final String
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Returns the labels for this metrics context.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        contextLabels

        +
        Map<String,String> contextLabels()
        +
        Returns the labels for this metrics context.
        +
        +
        Returns:
        +
        the map of label keys and values; never null but possibly empty
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/MetricsReporter.html b/static/41/javadoc/org/apache/kafka/common/metrics/MetricsReporter.html new file mode 100644 index 000000000..646b33861 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/MetricsReporter.html @@ -0,0 +1,273 @@ + + + + +MetricsReporter (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface MetricsReporter

    +
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Configurable, Reconfigurable
    +
    +
    +
    All Known Implementing Classes:
    +
    JmxReporter
    +
    +
    +
    public interface MetricsReporter +extends Reconfigurable, AutoCloseable
    +
    A plugin interface to allow things to listen as new metrics are created so they can be reported. +

    + Implement ClusterResourceListener to receive cluster metadata once it's available. Please see the class documentation for ClusterResourceListener for more information.

    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
      +
      Called when the metrics repository is closed.
      +
      +
      default void
      +
      contextChange(MetricsContext metricsContext)
      +
      +
      Sets the context labels for the service or library exposing metrics.
      +
      +
      void
      +
      init(List<KafkaMetric> metrics)
      +
      +
      This is called when the reporter is first registered to initially register all existing metrics
      +
      +
      void
      + +
      +
      This is called whenever a metric is updated or added
      +
      +
      void
      + +
      +
      This is called whenever a metric is removed
      +
      +
      default Set<String>
      + +
      +
      Returns the names of configs that may be reconfigured.
      +
      +
      default void
      +
      reconfigure(Map<String,?> configs)
      +
      +
      Reconfigures this instance with the given key-value pairs.
      +
      +
      default void
      + +
      +
      Validates the provided configuration.
      +
      +
      +
      +
      +
      +

      Methods inherited from interface org.apache.kafka.common.Configurable

      +configure
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        init

        +
        void init(List<KafkaMetric> metrics)
        +
        This is called when the reporter is first registered to initially register all existing metrics
        +
        +
        Parameters:
        +
        metrics - All currently existing metrics
        +
        +
        +
      • +
      • +
        +

        metricChange

        +
        void metricChange(KafkaMetric metric)
        +
        This is called whenever a metric is updated or added
        +
        +
        Parameters:
        +
        metric - The metric that has been added or changed
        +
        +
        +
      • +
      • +
        +

        metricRemoval

        +
        void metricRemoval(KafkaMetric metric)
        +
        This is called whenever a metric is removed
        +
        +
        Parameters:
        +
        metric - The metric that has been removed
        +
        +
        +
      • +
      • +
        +

        close

        +
        void close()
        +
        Called when the metrics repository is closed.
        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        +
        +
      • +
      • +
        +

        reconfigurableConfigs

        +
        default Set<String> reconfigurableConfigs()
        +
        Description copied from interface: Reconfigurable
        +
        Returns the names of configs that may be reconfigured.
        +
        +
        Specified by:
        +
        reconfigurableConfigs in interface Reconfigurable
        +
        +
        +
      • +
      • +
        +

        validateReconfiguration

        +
        default void validateReconfiguration(Map<String,?> configs) + throws ConfigException
        +
        Description copied from interface: Reconfigurable
        +
        Validates the provided configuration. The provided map contains + all configs including any reconfigurable configs that may be different + from the initial configuration. Reconfiguration will be not performed + if this method throws any exception.
        +
        +
        Specified by:
        +
        validateReconfiguration in interface Reconfigurable
        +
        Throws:
        +
        ConfigException - if the provided configs are not valid. The exception + message from ConfigException will be returned to the client in + the AlterConfigs response.
        +
        +
        +
      • +
      • +
        +

        reconfigure

        +
        default void reconfigure(Map<String,?> configs)
        +
        Description copied from interface: Reconfigurable
        +
        Reconfigures this instance with the given key-value pairs. The provided + map contains all configs including any reconfigurable configs that + may have changed since the object was initially configured using + Configurable.configure(Map). This method will only be invoked if + the configs have passed validation using Reconfigurable.validateReconfiguration(Map).
        +
        +
        Specified by:
        +
        reconfigure in interface Reconfigurable
        +
        +
        +
      • +
      • +
        +

        contextChange

        +
        default void contextChange(MetricsContext metricsContext)
        +
        Sets the context labels for the service or library exposing metrics. This will be called before init(List) and may be called anytime after that.
        +
        +
        Parameters:
        +
        metricsContext - the metric context
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/Monitorable.html b/static/41/javadoc/org/apache/kafka/common/metrics/Monitorable.html new file mode 100644 index 000000000..3f4c32fa3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/Monitorable.html @@ -0,0 +1,131 @@ + + + + +Monitorable (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Monitorable

    +
    +
    +
    +
    public interface Monitorable
    +
    Plugins can implement this interface to register their own metrics.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
      +
      Provides a PluginMetrics instance from the component that instantiates the plugin.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        withPluginMetrics

        +
        void withPluginMetrics(PluginMetrics metrics)
        +
        Provides a PluginMetrics instance from the component that instantiates the plugin. + PluginMetrics can be used by the plugin to register and unregister metrics + at any point in their lifecycle prior to their close method being called. + Any metrics registered will be automatically removed when the plugin is closed.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/PluginMetrics.html b/static/41/javadoc/org/apache/kafka/common/metrics/PluginMetrics.html new file mode 100644 index 000000000..2df810d0c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/PluginMetrics.html @@ -0,0 +1,221 @@ + + + + +PluginMetrics (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface PluginMetrics

    +
    +
    +
    +
    public interface PluginMetrics
    +
    This allows plugins to register metrics and sensors. + Any metrics registered by the plugin are automatically removed when the plugin closed.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        metricName

        +
        MetricName metricName(String name, + String description, + LinkedHashMap<String,String> tags)
        +
        Create a MetricName with the given name, description and tags. The group will be set to "plugins" + Tags to uniquely identify the plugins are automatically added to the provided tags
        +
        +
        Parameters:
        +
        name - The name of the metric
        +
        description - A human-readable description to include in the metric
        +
        tags - Additional tags for the metric
        +
        Throws:
        +
        IllegalArgumentException - if any of the tag names collide with the default tags for the plugin
        +
        +
        +
      • +
      • +
        +

        addMetric

        +
        void addMetric(MetricName metricName, + MetricValueProvider<?> metricValueProvider)
        +
        Add a metric to monitor an object that implements MetricValueProvider. This metric won't be associated with any + sensor. This is a way to expose existing values as metrics.
        +
        +
        Parameters:
        +
        metricName - The name of the metric
        +
        metricValueProvider - The metric value provider associated with this metric
        +
        Throws:
        +
        IllegalArgumentException - if a metric with same name already exists
        +
        +
        +
      • +
      • +
        +

        removeMetric

        +
        void removeMetric(MetricName metricName)
        +
        Remove a metric if it exists.
        +
        +
        Parameters:
        +
        metricName - The name of the metric
        +
        Throws:
        +
        IllegalArgumentException - if a metric with this name does not exist
        +
        +
        +
      • +
      • +
        +

        addSensor

        +
        Sensor addSensor(String name)
        +
        Create a Sensor with the given unique name. The name must only be unique for the plugin, so different + plugins can use the same names.
        +
        +
        Parameters:
        +
        name - The sensor name
        +
        Returns:
        +
        The sensor
        +
        Throws:
        +
        IllegalArgumentException - if a sensor with same name already exists for this plugin
        +
        +
        +
      • +
      • +
        +

        removeSensor

        +
        void removeSensor(String name)
        +
        Remove a Sensor and its associated metrics.
        +
        +
        Parameters:
        +
        name - The name of the sensor to be removed
        +
        Throws:
        +
        IllegalArgumentException - if a sensor with this name does not exist
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/Quota.html b/static/41/javadoc/org/apache/kafka/common/metrics/Quota.html new file mode 100644 index 000000000..27649b10c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/Quota.html @@ -0,0 +1,236 @@ + + + + +Quota (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Quota

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.Quota
    +
    +
    +
    +
    public final class Quota +extends Object
    +
    An upper or lower bound for metrics
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Quota

        +
        public Quota(double bound, + boolean upper)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        upperBound

        +
        public static Quota upperBound(double upperBound)
        +
        +
      • +
      • +
        +

        lowerBound

        +
        public static Quota lowerBound(double lowerBound)
        +
        +
      • +
      • +
        +

        isUpperBound

        +
        public boolean isUpperBound()
        +
        +
      • +
      • +
        +

        bound

        +
        public double bound()
        +
        +
      • +
      • +
        +

        acceptable

        +
        public boolean acceptable(double value)
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object obj)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/QuotaViolationException.html b/static/41/javadoc/org/apache/kafka/common/metrics/QuotaViolationException.html new file mode 100644 index 000000000..3c55ae788 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/QuotaViolationException.html @@ -0,0 +1,230 @@ + + + + +QuotaViolationException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class QuotaViolationException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class QuotaViolationException +extends KafkaException
    +
    Thrown when a sensor records a value that causes a metric to go outside the bounds configured as its quota
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        QuotaViolationException

        +
        public QuotaViolationException(KafkaMetric metric, + double value, + double bound)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        metric

        +
        public KafkaMetric metric()
        +
        +
      • +
      • +
        +

        value

        +
        public double value()
        +
        +
      • +
      • +
        +

        bound

        +
        public double bound()
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Throwable
        +
        +
        +
      • +
      • +
        +

        fillInStackTrace

        +
        public Throwable fillInStackTrace()
        +
        +
        Overrides:
        +
        fillInStackTrace in class Throwable
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/Sensor.RecordingLevel.html b/static/41/javadoc/org/apache/kafka/common/metrics/Sensor.RecordingLevel.html new file mode 100644 index 000000000..fe2bb84b5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/Sensor.RecordingLevel.html @@ -0,0 +1,312 @@ + + + + +Sensor.RecordingLevel (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class Sensor.RecordingLevel

    +
    +
    java.lang.Object +
    java.lang.Enum<Sensor.RecordingLevel> +
    org.apache.kafka.common.metrics.Sensor.RecordingLevel
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<Sensor.RecordingLevel>, Constable
    +
    +
    +
    Enclosing class:
    +
    Sensor
    +
    +
    +
    public static enum Sensor.RecordingLevel +extends Enum<Sensor.RecordingLevel>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        MAX_RECORDING_LEVEL_KEY

        +
        public static final int MAX_RECORDING_LEVEL_KEY
        +
        +
      • +
      • +
        +

        name

        +
        public final String name
        +
        an english description of the api--this is for debugging and can change
        +
        +
      • +
      • +
        +

        id

        +
        public final short id
        +
        the permanent and immutable id of an API--this can't change ever
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static Sensor.RecordingLevel[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static Sensor.RecordingLevel valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        forId

        +
        public static Sensor.RecordingLevel forId(int id)
        +
        +
      • +
      • +
        +

        forName

        +
        public static Sensor.RecordingLevel forName(String name)
        +
        Case insensitive lookup by protocol name
        +
        +
      • +
      • +
        +

        shouldRecord

        +
        public boolean shouldRecord(int configId)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/Sensor.html b/static/41/javadoc/org/apache/kafka/common/metrics/Sensor.html new file mode 100644 index 000000000..361b3a82a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/Sensor.html @@ -0,0 +1,380 @@ + + + + +Sensor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Sensor

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.Sensor
    +
    +
    +
    +
    public final class Sensor +extends Object
    +
    A sensor applies a continuous sequence of numerical values to a set of associated metrics. For example a sensor on + message size would record a sequence of message sizes using the record(double) api and would maintain a set + of metrics about request sizes such as the average or max.
    +
    +
    +
      + +
    • +
      +

      Nested Class Summary

      +
      Nested Classes
      +
      +
      Modifier and Type
      +
      Class
      +
      Description
      +
      static enum 
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      boolean
      +
      add(MetricName metricName, + MeasurableStat stat)
      +
      +
      Register a metric with this sensor
      +
      +
      boolean
      +
      add(MetricName metricName, + MeasurableStat stat, + MetricConfig config)
      +
      +
      Register a metric with this sensor
      +
      +
      boolean
      + +
      +
      Register a compound statistic with this sensor with no config override
      +
      +
      boolean
      +
      add(CompoundStat stat, + MetricConfig config)
      +
      +
      Register a compound statistic with this sensor which yields multiple measurable quantities (like a histogram)
      +
      +
      void
      + +
      +
      Check if we have violated our quota for any metric that has a configured quota
      +
      +
      void
      +
      checkQuotas(long timeMs)
      +
       
      +
      boolean
      + +
      +
      Return true if the Sensor is eligible for removal due to inactivity.
      +
      +
      boolean
      + +
      +
      Return if metrics were registered with this sensor.
      +
      + + +
      +
      The name this sensor is registered with.
      +
      +
      void
      + +
      +
      Record an occurrence, this is just short-hand for record(1.0)
      +
      +
      void
      +
      record(double value)
      +
      +
      Record a value with this sensor
      +
      +
      void
      +
      record(double value, + long timeMs)
      +
      +
      Record a value at a known time.
      +
      +
      void
      +
      record(double value, + long timeMs, + boolean checkQuotas)
      +
      +
      Record a value at a known time.
      +
      +
      boolean
      + +
       
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        name

        +
        public String name()
        +
        The name this sensor is registered with. This name will be unique among all registered sensors.
        +
        +
      • +
      • +
        +

        shouldRecord

        +
        public boolean shouldRecord()
        +
        +
        Returns:
        +
        true if the sensor's record level indicates that the metric will be recorded, false otherwise
        +
        +
        +
      • +
      • +
        +

        record

        +
        public void record()
        +
        Record an occurrence, this is just short-hand for record(1.0)
        +
        +
      • +
      • +
        +

        record

        +
        public void record(double value)
        +
        Record a value with this sensor
        +
        +
        Parameters:
        +
        value - The value to record
        +
        Throws:
        +
        QuotaViolationException - if recording this value moves a metric beyond its configured maximum or minimum + bound
        +
        +
        +
      • +
      • +
        +

        record

        +
        public void record(double value, + long timeMs)
        +
        Record a value at a known time. This method is slightly faster than record(double) since it will reuse + the time stamp.
        +
        +
        Parameters:
        +
        value - The value we are recording
        +
        timeMs - The current POSIX time in milliseconds
        +
        Throws:
        +
        QuotaViolationException - if recording this value moves a metric beyond its configured maximum or minimum + bound
        +
        +
        +
      • +
      • +
        +

        record

        +
        public void record(double value, + long timeMs, + boolean checkQuotas)
        +
        Record a value at a known time. This method is slightly faster than record(double) since it will reuse + the time stamp.
        +
        +
        Parameters:
        +
        value - The value we are recording
        +
        timeMs - The current POSIX time in milliseconds
        +
        checkQuotas - Indicate if quota must be enforced or not
        +
        Throws:
        +
        QuotaViolationException - if recording this value moves a metric beyond its configured maximum or minimum + bound
        +
        +
        +
      • +
      • +
        +

        checkQuotas

        +
        public void checkQuotas()
        +
        Check if we have violated our quota for any metric that has a configured quota
        +
        +
      • +
      • +
        +

        checkQuotas

        +
        public void checkQuotas(long timeMs)
        +
        +
      • +
      • +
        +

        add

        +
        public boolean add(CompoundStat stat)
        +
        Register a compound statistic with this sensor with no config override
        +
        +
        Parameters:
        +
        stat - The stat to register
        +
        Returns:
        +
        true if stat is added to sensor, false if sensor is expired
        +
        +
        +
      • +
      • +
        +

        add

        +
        public boolean add(CompoundStat stat, + MetricConfig config)
        +
        Register a compound statistic with this sensor which yields multiple measurable quantities (like a histogram)
        +
        +
        Parameters:
        +
        stat - The stat to register
        +
        config - The configuration for this stat. If null then the stat will use the default configuration for this + sensor.
        +
        Returns:
        +
        true if stat is added to sensor, false if sensor is expired
        +
        +
        +
      • +
      • +
        +

        add

        +
        public boolean add(MetricName metricName, + MeasurableStat stat)
        +
        Register a metric with this sensor
        +
        +
        Parameters:
        +
        metricName - The name of the metric
        +
        stat - The statistic to keep
        +
        Returns:
        +
        true if metric is added to sensor, false if sensor is expired
        +
        +
        +
      • +
      • +
        +

        add

        +
        public boolean add(MetricName metricName, + MeasurableStat stat, + MetricConfig config)
        +
        Register a metric with this sensor
        +
        +
        Parameters:
        +
        metricName - The name of the metric
        +
        stat - The statistic to keep
        +
        config - A special configuration for this metric. If null use the sensor default configuration.
        +
        Returns:
        +
        true if metric is added to sensor, false if sensor is expired
        +
        +
        +
      • +
      • +
        +

        hasMetrics

        +
        public boolean hasMetrics()
        +
        Return if metrics were registered with this sensor.
        +
        +
        Returns:
        +
        true if metrics were registered, false otherwise
        +
        +
        +
      • +
      • +
        +

        hasExpired

        +
        public boolean hasExpired()
        +
        Return true if the Sensor is eligible for removal due to inactivity. + false otherwise
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/Stat.html b/static/41/javadoc/org/apache/kafka/common/metrics/Stat.html new file mode 100644 index 000000000..4af38adc3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/Stat.html @@ -0,0 +1,146 @@ + + + + +Stat (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Stat

    +
    +
    +
    +
    All Known Subinterfaces:
    +
    CompoundStat, MeasurableStat
    +
    +
    +
    All Known Implementing Classes:
    +
    Avg, CumulativeCount, CumulativeSum, Frequencies, Max, Meter, Min, Percentiles, Rate, SampledStat, SimpleRate, TokenBucket, Value, WindowedCount, WindowedSum
    +
    +
    +
    public interface Stat
    +
    A Stat is a quantity such as average, max, etc that is computed off the stream of updates to a sensor
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      +
      record(MetricConfig config, + double value, + long timeMs)
      +
      +
      Record the given value
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        record

        +
        void record(MetricConfig config, + double value, + long timeMs)
        +
        Record the given value
        +
        +
        Parameters:
        +
        config - The configuration to use for this metric
        +
        value - The value to record
        +
        timeMs - The POSIX time in milliseconds this value occurred
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/package-summary.html b/static/41/javadoc/org/apache/kafka/common/metrics/package-summary.html new file mode 100644 index 000000000..c52c41502 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/package-summary.html @@ -0,0 +1,182 @@ + + + + +org.apache.kafka.common.metrics (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.common.metrics

    +
    +
    +
    package org.apache.kafka.common.metrics
    +
    +
    Provides the API used by Kafka clients to emit metrics which are then exposed using the * MetricsReporter interface.
    +
    +
    +
      +
    • + +
    • +
    • +
      +
      +
      +
      +
      Class
      +
      Description
      + +
      +
      A compound stat is a stat where a single measurement and associated data structure feeds many metrics.
      +
      + +
       
      + +
      +
      A gauge metric is an instantaneous reading of a particular value.
      +
      + +
      +
      Register metrics in JMX as dynamic mbeans based on the metric names
      +
      + +
       
      + +
      +
      An implementation of MetricsContext, it encapsulates required metrics context properties for Kafka services and clients
      +
      + +
      +
      A measurable quantity that can be registered as a metric
      +
      + +
      +
      A MeasurableStat is a Stat that is also Measurable (i.e.
      +
      + +
      +
      Configuration values for metrics
      +
      + +
      +
      A registry of sensors and metrics.
      +
      + +
      +
      MetricsContext encapsulates additional contextLabels about metrics exposed via a + MetricsReporter
      +
      + +
      +
      A plugin interface to allow things to listen as new metrics are created so they can be reported.
      +
      + +
      +
      Super-interface for Measurable or Gauge that provides + metric values.
      +
      + +
      +
      Plugins can implement this interface to register their own metrics.
      +
      + +
      +
      This allows plugins to register metrics and sensors.
      +
      + +
      +
      An upper or lower bound for metrics
      +
      + +
      +
      Thrown when a sensor records a value that causes a metric to go outside the bounds configured as its quota
      +
      + +
      +
      A sensor applies a continuous sequence of numerical values to a set of associated metrics.
      +
      + +
       
      + +
      +
      A Stat is a quantity such as average, max, etc that is computed off the stream of updates to a sensor
      +
      +
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/package-tree.html b/static/41/javadoc/org/apache/kafka/common/metrics/package-tree.html new file mode 100644 index 000000000..5876bce65 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/package-tree.html @@ -0,0 +1,147 @@ + + + + +org.apache.kafka.common.metrics Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.common.metrics

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/Avg.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Avg.html new file mode 100644 index 000000000..c1a787013 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Avg.html @@ -0,0 +1,176 @@ + + + + +Avg (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + + +
    java.lang.Object +
    org.apache.kafka.common.metrics.stats.SampledStat +
    org.apache.kafka.common.metrics.stats.Avg
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Measurable, MeasurableStat, MetricValueProvider<Double>, Stat
    +
    +
    +
    public class Avg +extends SampledStat
    +
    A SampledStat that maintains a simple average over its samples.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Avg

        +
        public Avg()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        combine

        +
        public double combine(List<org.apache.kafka.common.metrics.stats.SampledStat.Sample> samples, + MetricConfig config, + long now)
        +
        +
        Specified by:
        +
        combine in class SampledStat
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/CumulativeCount.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/CumulativeCount.html new file mode 100644 index 000000000..dd42607f8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/CumulativeCount.html @@ -0,0 +1,191 @@ + + + + +CumulativeCount (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class CumulativeCount

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.stats.CumulativeSum +
    org.apache.kafka.common.metrics.stats.CumulativeCount
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Measurable, MeasurableStat, MetricValueProvider<Double>, Stat
    +
    +
    +
    public class CumulativeCount +extends CumulativeSum
    +
    A non-sampled version of WindowedCount maintained over all time. + + This is a special kind of CumulativeSum that always records 1 instead of the provided value. + In other words, it counts the number of + record(MetricConfig, double, long) invocations, + instead of summing the recorded values.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        CumulativeCount

        +
        public CumulativeCount()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        record

        +
        public void record(MetricConfig config, + double value, + long timeMs)
        +
        Description copied from interface: Stat
        +
        Record the given value
        +
        +
        Specified by:
        +
        record in interface Stat
        +
        Overrides:
        +
        record in class CumulativeSum
        +
        Parameters:
        +
        config - The configuration to use for this metric
        +
        value - The value to record
        +
        timeMs - The POSIX time in milliseconds this value occurred
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/CumulativeSum.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/CumulativeSum.html new file mode 100644 index 000000000..479a77356 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/CumulativeSum.html @@ -0,0 +1,232 @@ + + + + +CumulativeSum (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class CumulativeSum

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.stats.CumulativeSum
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Measurable, MeasurableStat, MetricValueProvider<Double>, Stat
    +
    +
    +
    Direct Known Subclasses:
    +
    CumulativeCount
    +
    +
    +
    public class CumulativeSum +extends Object +implements MeasurableStat
    +
    An non-sampled cumulative total maintained over all time. + This is a non-sampled version of WindowedSum. + + See also CumulativeCount if you just want to increment the value by 1 on each recording.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        CumulativeSum

        +
        public CumulativeSum()
        +
        +
      • +
      • +
        +

        CumulativeSum

        +
        public CumulativeSum(double value)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        record

        +
        public void record(MetricConfig config, + double value, + long now)
        +
        Description copied from interface: Stat
        +
        Record the given value
        +
        +
        Specified by:
        +
        record in interface Stat
        +
        Parameters:
        +
        config - The configuration to use for this metric
        +
        value - The value to record
        +
        now - The POSIX time in milliseconds this value occurred
        +
        +
        +
      • +
      • +
        +

        measure

        +
        public double measure(MetricConfig config, + long now)
        +
        Description copied from interface: Measurable
        +
        Measure this quantity and return the result as a double
        +
        +
        Specified by:
        +
        measure in interface Measurable
        +
        Parameters:
        +
        config - The configuration for this metric
        +
        now - The POSIX time in milliseconds the measurement is being taken
        +
        Returns:
        +
        The measured value
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/Frequencies.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Frequencies.html new file mode 100644 index 000000000..d648bba03 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Frequencies.html @@ -0,0 +1,287 @@ + + + + +Frequencies (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Frequencies

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.stats.SampledStat +
    org.apache.kafka.common.metrics.stats.Frequencies
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    CompoundStat, Measurable, MeasurableStat, MetricValueProvider<Double>, Stat
    +
    +
    +
    public class Frequencies +extends SampledStat +implements CompoundStat
    +
    A CompoundStat that represents a normalized distribution with a Frequency metric for each + bucketed value. The values of the Frequency metrics specify the frequency of the center value appearing + relative to the total number of values recorded. +

    + For example, consider a component that records failure or success of an operation using boolean values, with + one metric to capture the percentage of operations that failed another to capture the percentage of operations + that succeeded. +

    + This can be accomplished by creating a Sensor to record the values, + with 0.0 for false and 1.0 for true. Then, create a single Frequencies object that has two + Frequency metrics: one centered around 0.0 and another centered around 1.0. The Frequencies + object is a CompoundStat, and so it can be added directly to a Sensor so the metrics are created automatically.

    +
    +
    +
      + +
    • +
      +

      Nested Class Summary

      +
      +

      Nested classes/interfaces inherited from interface org.apache.kafka.common.metrics.CompoundStat

      +CompoundStat.NamedMeasurable
      +
      +
    • + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      +
      Frequencies(int buckets, + double min, + double max, + Frequency... frequencies)
      +
      +
      Create a Frequencies that captures the values in the specified range into the given number of buckets, + where the buckets are centered around the minimum, maximum, and intermediate values.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      double
      +
      combine(List<org.apache.kafka.common.metrics.stats.SampledStat.Sample> samples, + MetricConfig config, + long now)
      +
       
      + +
      forBooleanValues(MetricName falseMetricName, + MetricName trueMetricName)
      +
      +
      Create a Frequencies instance with metrics for the frequency of a boolean sensor that records 0.0 for + false and 1.0 for true.
      +
      +
      double
      +
      frequency(MetricConfig config, + long now, + double centerValue)
      +
      +
      Return the computed frequency describing the number of occurrences of the values in the bucket for the given + center point, relative to the total number of occurrences in the samples.
      +
      + + +
       
      +
      +
      +
      +
      +

      Methods inherited from class org.apache.kafka.common.metrics.stats.SampledStat

      +current, measure, oldest, record, toString
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
      +
      +

      Methods inherited from interface org.apache.kafka.common.metrics.Stat

      +record
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Frequencies

        +
        public Frequencies(int buckets, + double min, + double max, + Frequency... frequencies)
        +
        Create a Frequencies that captures the values in the specified range into the given number of buckets, + where the buckets are centered around the minimum, maximum, and intermediate values.
        +
        +
        Parameters:
        +
        buckets - the number of buckets; must be at least 1
        +
        min - the minimum value to be captured
        +
        max - the maximum value to be captured
        +
        frequencies - the list of Frequency metrics, which at most should be one per bucket centered + on the bucket's value, though not every bucket need to correspond to a metric if the + value is not needed
        +
        Throws:
        +
        IllegalArgumentException - if any of the Frequency objects do not have a + center value within the specified range
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        forBooleanValues

        +
        public static Frequencies forBooleanValues(MetricName falseMetricName, + MetricName trueMetricName)
        +
        Create a Frequencies instance with metrics for the frequency of a boolean sensor that records 0.0 for + false and 1.0 for true.
        +
        +
        Parameters:
        +
        falseMetricName - the name of the metric capturing the frequency of failures; may be null if not needed
        +
        trueMetricName - the name of the metric capturing the frequency of successes; may be null if not needed
        +
        Returns:
        +
        the Frequencies instance; never null
        +
        Throws:
        +
        IllegalArgumentException - if both falseMetricName and trueMetricName are null
        +
        +
        +
      • +
      • +
        +

        stats

        + +
        +
        Specified by:
        +
        stats in interface CompoundStat
        +
        +
        +
      • +
      • +
        +

        frequency

        +
        public double frequency(MetricConfig config, + long now, + double centerValue)
        +
        Return the computed frequency describing the number of occurrences of the values in the bucket for the given + center point, relative to the total number of occurrences in the samples.
        +
        +
        Parameters:
        +
        config - the metric configuration
        +
        now - the current time in milliseconds
        +
        centerValue - the value corresponding to the center point of the bucket
        +
        Returns:
        +
        the frequency of the values in the bucket relative to the total number of samples
        +
        +
        +
      • +
      • +
        +

        combine

        +
        public double combine(List<org.apache.kafka.common.metrics.stats.SampledStat.Sample> samples, + MetricConfig config, + long now)
        +
        +
        Specified by:
        +
        combine in class SampledStat
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/Frequency.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Frequency.html new file mode 100644 index 000000000..6eada4c88 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Frequency.html @@ -0,0 +1,205 @@ + + + + +Frequency (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Frequency

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.stats.Frequency
    +
    +
    +
    +
    public class Frequency +extends Object
    +
    Definition of a frequency metric used in a Frequencies compound statistic.
    +
    +
    +
      + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      +
      Frequency(MetricName name, + double centerValue)
      +
      +
      Create an instance with the given name and center point value.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      double
      + +
      +
      Get the value of this metrics center point.
      +
      + + +
      +
      Get the name of this metric.
      +
      + + +
       
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Frequency

        +
        public Frequency(MetricName name, + double centerValue)
        +
        Create an instance with the given name and center point value.
        +
        +
        Parameters:
        +
        name - the name of the frequency metric; may not be null
        +
        centerValue - the value identifying the Frequencies bucket to be reported
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        name

        +
        public MetricName name()
        +
        Get the name of this metric.
        +
        +
        Returns:
        +
        the metric name; never null
        +
        +
        +
      • +
      • +
        +

        centerValue

        +
        public double centerValue()
        +
        Get the value of this metrics center point.
        +
        +
        Returns:
        +
        the center point value
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/Histogram.BinScheme.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Histogram.BinScheme.html new file mode 100644 index 000000000..b480ed056 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Histogram.BinScheme.html @@ -0,0 +1,179 @@ + + + + +Histogram.BinScheme (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Histogram.BinScheme

    +
    +
    +
    +
    All Known Implementing Classes:
    +
    Histogram.ConstantBinScheme, Histogram.LinearBinScheme
    +
    +
    +
    Enclosing class:
    +
    Histogram
    +
    +
    +
    public static interface Histogram.BinScheme
    +
    An algorithm for determining the bin in which a value is to be placed as well as calculating the upper end + of each bin.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      int
      + +
      +
      Get the number of bins.
      +
      +
      double
      +
      fromBin(int bin)
      +
      +
      Determine the value at the upper range of the specified bin.
      +
      +
      int
      +
      toBin(double value)
      +
      +
      Determine the 0-based bin number in which the supplied value should be placed.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        bins

        +
        int bins()
        +
        Get the number of bins.
        +
        +
        Returns:
        +
        the number of bins
        +
        +
        +
      • +
      • +
        +

        toBin

        +
        int toBin(double value)
        +
        Determine the 0-based bin number in which the supplied value should be placed.
        +
        +
        Parameters:
        +
        value - the value
        +
        Returns:
        +
        the 0-based index of the bin
        +
        +
        +
      • +
      • +
        +

        fromBin

        +
        double fromBin(int bin)
        +
        Determine the value at the upper range of the specified bin.
        +
        +
        Parameters:
        +
        bin - the 0-based bin number
        +
        Returns:
        +
        the value at the upper end of the bin; or negative infinity + if the bin number is negative or positive infinity if the 0-based + bin number is greater than or equal to the number of bins.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/Histogram.ConstantBinScheme.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Histogram.ConstantBinScheme.html new file mode 100644 index 000000000..f5177a6fa --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Histogram.ConstantBinScheme.html @@ -0,0 +1,236 @@ + + + + +Histogram.ConstantBinScheme (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Histogram.ConstantBinScheme

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.stats.Histogram.ConstantBinScheme
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Histogram.BinScheme
    +
    +
    +
    Enclosing class:
    +
    Histogram
    +
    +
    +
    public static class Histogram.ConstantBinScheme +extends Object +implements Histogram.BinScheme
    +
    A scheme for calculating the bins where the width of each bin is a constant determined by the range of values + and the number of bins.
    +
    +
    +
      + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      +
      ConstantBinScheme(int bins, + double min, + double max)
      +
      +
      Create a bin scheme with the specified number of bins that all have the same width.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      int
      + +
      +
      Get the number of bins.
      +
      +
      double
      +
      fromBin(int b)
      +
      +
      Determine the value at the upper range of the specified bin.
      +
      +
      int
      +
      toBin(double x)
      +
      +
      Determine the 0-based bin number in which the supplied value should be placed.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ConstantBinScheme

        +
        public ConstantBinScheme(int bins, + double min, + double max)
        +
        Create a bin scheme with the specified number of bins that all have the same width.
        +
        +
        Parameters:
        +
        bins - the number of bins; must be at least 2
        +
        min - the minimum value to be counted in the bins
        +
        max - the maximum value to be counted in the bins
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        bins

        +
        public int bins()
        +
        Description copied from interface: Histogram.BinScheme
        +
        Get the number of bins.
        +
        +
        Specified by:
        +
        bins in interface Histogram.BinScheme
        +
        Returns:
        +
        the number of bins
        +
        +
        +
      • +
      • +
        +

        fromBin

        +
        public double fromBin(int b)
        +
        Description copied from interface: Histogram.BinScheme
        +
        Determine the value at the upper range of the specified bin.
        +
        +
        Specified by:
        +
        fromBin in interface Histogram.BinScheme
        +
        Parameters:
        +
        b - the 0-based bin number
        +
        Returns:
        +
        the value at the upper end of the bin; or negative infinity + if the bin number is negative or positive infinity if the 0-based + bin number is greater than or equal to the number of bins.
        +
        +
        +
      • +
      • +
        +

        toBin

        +
        public int toBin(double x)
        +
        Description copied from interface: Histogram.BinScheme
        +
        Determine the 0-based bin number in which the supplied value should be placed.
        +
        +
        Specified by:
        +
        toBin in interface Histogram.BinScheme
        +
        Parameters:
        +
        x - the value
        +
        Returns:
        +
        the 0-based index of the bin
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/Histogram.LinearBinScheme.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Histogram.LinearBinScheme.html new file mode 100644 index 000000000..43fe4a673 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Histogram.LinearBinScheme.html @@ -0,0 +1,234 @@ + + + + +Histogram.LinearBinScheme (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Histogram.LinearBinScheme

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.stats.Histogram.LinearBinScheme
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Histogram.BinScheme
    +
    +
    +
    Enclosing class:
    +
    Histogram
    +
    +
    +
    public static class Histogram.LinearBinScheme +extends Object +implements Histogram.BinScheme
    +
    A scheme for calculating the bins where the width of each bin is one more than the previous bin, and therefore + the bin widths are increasing at a linear rate. However, the bin widths are scaled such that the specified range + of values will all fit within the bins (e.g., the upper range of the last bin is equal to the maximum value).
    +
    +
    +
      + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      +
      LinearBinScheme(int numBins, + double max)
      +
      +
      Create a linear bin scheme with the specified number of bins and the maximum value to be counted in the bins.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      int
      + +
      +
      Get the number of bins.
      +
      +
      double
      +
      fromBin(int b)
      +
      +
      Determine the value at the upper range of the specified bin.
      +
      +
      int
      +
      toBin(double x)
      +
      +
      Determine the 0-based bin number in which the supplied value should be placed.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        LinearBinScheme

        +
        public LinearBinScheme(int numBins, + double max)
        +
        Create a linear bin scheme with the specified number of bins and the maximum value to be counted in the bins.
        +
        +
        Parameters:
        +
        numBins - the number of bins; must be at least 2
        +
        max - the maximum value to be counted in the bins
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        bins

        +
        public int bins()
        +
        Description copied from interface: Histogram.BinScheme
        +
        Get the number of bins.
        +
        +
        Specified by:
        +
        bins in interface Histogram.BinScheme
        +
        Returns:
        +
        the number of bins
        +
        +
        +
      • +
      • +
        +

        fromBin

        +
        public double fromBin(int b)
        +
        Description copied from interface: Histogram.BinScheme
        +
        Determine the value at the upper range of the specified bin.
        +
        +
        Specified by:
        +
        fromBin in interface Histogram.BinScheme
        +
        Parameters:
        +
        b - the 0-based bin number
        +
        Returns:
        +
        the value at the upper end of the bin; or negative infinity + if the bin number is negative or positive infinity if the 0-based + bin number is greater than or equal to the number of bins.
        +
        +
        +
      • +
      • +
        +

        toBin

        +
        public int toBin(double x)
        +
        Description copied from interface: Histogram.BinScheme
        +
        Determine the 0-based bin number in which the supplied value should be placed.
        +
        +
        Specified by:
        +
        toBin in interface Histogram.BinScheme
        +
        Parameters:
        +
        x - the value
        +
        Returns:
        +
        the 0-based index of the bin
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/Histogram.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Histogram.html new file mode 100644 index 000000000..19ac052c2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Histogram.html @@ -0,0 +1,228 @@ + + + + +Histogram (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Histogram

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.stats.Histogram
    +
    +
    +
    +
    public class Histogram +extends Object
    +
    +
    +
      + +
    • +
      +

      Nested Class Summary

      +
      Nested Classes
      +
      +
      Modifier and Type
      +
      Class
      +
      Description
      +
      static interface 
      + +
      +
      An algorithm for determining the bin in which a value is to be placed as well as calculating the upper end + of each bin.
      +
      +
      static class 
      + +
      +
      A scheme for calculating the bins where the width of each bin is a constant determined by the range of values + and the number of bins.
      +
      +
      static class 
      + +
      +
      A scheme for calculating the bins where the width of each bin is one more than the previous bin, and therefore + the bin widths are increasing at a linear rate.
      +
      +
      +
      +
    • + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
       
      +
      float[]
      + +
       
      +
      void
      +
      record(double value)
      +
       
      + + +
       
      +
      double
      +
      value(double quantile)
      +
       
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        record

        +
        public void record(double value)
        +
        +
      • +
      • +
        +

        value

        +
        public double value(double quantile)
        +
        +
      • +
      • +
        +

        counts

        +
        public float[] counts()
        +
        +
      • +
      • +
        +

        clear

        +
        public void clear()
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/Max.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Max.html new file mode 100644 index 000000000..43e602c90 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Max.html @@ -0,0 +1,176 @@ + + + + +Max (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + + +
    java.lang.Object +
    org.apache.kafka.common.metrics.stats.SampledStat +
    org.apache.kafka.common.metrics.stats.Max
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Measurable, MeasurableStat, MetricValueProvider<Double>, Stat
    +
    +
    +
    public final class Max +extends SampledStat
    +
    A SampledStat that gives the max over its samples.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Max

        +
        public Max()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        combine

        +
        public double combine(List<org.apache.kafka.common.metrics.stats.SampledStat.Sample> samples, + MetricConfig config, + long now)
        +
        +
        Specified by:
        +
        combine in class SampledStat
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/Meter.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Meter.html new file mode 100644 index 000000000..2fe12821f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Meter.html @@ -0,0 +1,267 @@ + + + + +Meter (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Meter

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.stats.Meter
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    CompoundStat, Stat
    +
    +
    +
    public class Meter +extends Object +implements CompoundStat
    +
    A compound stat that includes a rate metric and a cumulative total metric.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Meter

        +
        public Meter(MetricName rateMetricName, + MetricName totalMetricName)
        +
        Construct a Meter with seconds as time unit
        +
        +
      • +
      • +
        +

        Meter

        +
        public Meter(TimeUnit unit, + MetricName rateMetricName, + MetricName totalMetricName)
        +
        Construct a Meter with provided time unit
        +
        +
      • +
      • +
        +

        Meter

        +
        public Meter(SampledStat rateStat, + MetricName rateMetricName, + MetricName totalMetricName)
        +
        Construct a Meter with seconds as time unit
        +
        +
      • +
      • +
        +

        Meter

        +
        public Meter(TimeUnit unit, + SampledStat rateStat, + MetricName rateMetricName, + MetricName totalMetricName)
        +
        Construct a Meter with provided time unit
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        stats

        + +
        +
        Specified by:
        +
        stats in interface CompoundStat
        +
        +
        +
      • +
      • +
        +

        record

        +
        public void record(MetricConfig config, + double value, + long timeMs)
        +
        Description copied from interface: Stat
        +
        Record the given value
        +
        +
        Specified by:
        +
        record in interface Stat
        +
        Parameters:
        +
        config - The configuration to use for this metric
        +
        value - The value to record
        +
        timeMs - The POSIX time in milliseconds this value occurred
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/Min.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Min.html new file mode 100644 index 000000000..56e5234a9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Min.html @@ -0,0 +1,176 @@ + + + + +Min (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + + +
    java.lang.Object +
    org.apache.kafka.common.metrics.stats.SampledStat +
    org.apache.kafka.common.metrics.stats.Min
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Measurable, MeasurableStat, MetricValueProvider<Double>, Stat
    +
    +
    +
    public class Min +extends SampledStat
    +
    A SampledStat that gives the min over its samples.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Min

        +
        public Min()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        combine

        +
        public double combine(List<org.apache.kafka.common.metrics.stats.SampledStat.Sample> samples, + MetricConfig config, + long now)
        +
        +
        Specified by:
        +
        combine in class SampledStat
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/Percentile.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Percentile.html new file mode 100644 index 000000000..652f02037 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Percentile.html @@ -0,0 +1,169 @@ + + + + +Percentile (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Percentile

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.stats.Percentile
    +
    +
    +
    +
    public class Percentile +extends Object
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Percentile

        +
        public Percentile(MetricName name, + double percentile)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        name

        +
        public MetricName name()
        +
        +
      • +
      • +
        +

        percentile

        +
        public double percentile()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/Percentiles.BucketSizing.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Percentiles.BucketSizing.html new file mode 100644 index 000000000..a5937b829 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Percentiles.BucketSizing.html @@ -0,0 +1,221 @@ + + + + +Percentiles.BucketSizing (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class Percentiles.BucketSizing

    +
    +
    java.lang.Object +
    java.lang.Enum<Percentiles.BucketSizing> +
    org.apache.kafka.common.metrics.stats.Percentiles.BucketSizing
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<Percentiles.BucketSizing>, Constable
    +
    +
    +
    Enclosing class:
    +
    Percentiles
    +
    +
    +
    public static enum Percentiles.BucketSizing +extends Enum<Percentiles.BucketSizing>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static Percentiles.BucketSizing[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static Percentiles.BucketSizing valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/Percentiles.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Percentiles.html new file mode 100644 index 000000000..8b6552dcc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Percentiles.html @@ -0,0 +1,246 @@ + + + + +Percentiles (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Percentiles

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.stats.SampledStat +
    org.apache.kafka.common.metrics.stats.Percentiles
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    CompoundStat, Measurable, MeasurableStat, MetricValueProvider<Double>, Stat
    +
    +
    +
    public class Percentiles +extends SampledStat +implements CompoundStat
    +
    A compound stat that reports one or more percentiles
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/Rate.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Rate.html new file mode 100644 index 000000000..7ce298fb0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Rate.html @@ -0,0 +1,270 @@ + + + + +Rate (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Rate

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.stats.Rate
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Measurable, MeasurableStat, MetricValueProvider<Double>, Stat
    +
    +
    +
    Direct Known Subclasses:
    +
    SimpleRate
    +
    +
    +
    public class Rate +extends Object +implements MeasurableStat
    +
    The rate of the given quantity. By default this is the total observed over a set of samples from a sampled statistic + divided by the elapsed time over the sample windows. Alternative SampledStat implementations can be provided, + however, to record the rate of occurrences (e.g. the count of values measured over the time interval) or other such + values.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        unitName

        +
        public String unitName()
        +
        +
      • +
      • +
        +

        record

        +
        public void record(MetricConfig config, + double value, + long timeMs)
        +
        Description copied from interface: Stat
        +
        Record the given value
        +
        +
        Specified by:
        +
        record in interface Stat
        +
        Parameters:
        +
        config - The configuration to use for this metric
        +
        value - The value to record
        +
        timeMs - The POSIX time in milliseconds this value occurred
        +
        +
        +
      • +
      • +
        +

        measure

        +
        public double measure(MetricConfig config, + long now)
        +
        Description copied from interface: Measurable
        +
        Measure this quantity and return the result as a double
        +
        +
        Specified by:
        +
        measure in interface Measurable
        +
        Parameters:
        +
        config - The configuration for this metric
        +
        now - The POSIX time in milliseconds the measurement is being taken
        +
        Returns:
        +
        The measured value
        +
        +
        +
      • +
      • +
        +

        windowSize

        +
        public long windowSize(MetricConfig config, + long now)
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/SampledStat.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/SampledStat.html new file mode 100644 index 000000000..96fb50f85 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/SampledStat.html @@ -0,0 +1,259 @@ + + + + +SampledStat (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SampledStat

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.stats.SampledStat
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Measurable, MeasurableStat, MetricValueProvider<Double>, Stat
    +
    +
    +
    Direct Known Subclasses:
    +
    Avg, Frequencies, Max, Min, Percentiles, WindowedSum
    +
    +
    +
    public abstract class SampledStat +extends Object +implements MeasurableStat
    +
    A SampledStat records a single scalar value measured over one or more samples. Each sample is recorded over a + configurable window. The window can be defined by number of events or elapsed time (or both, if both are given the + window is complete when either the event count or elapsed time criterion is met). +

    + All the samples are combined to produce the measurement. When a window is complete the oldest sample is cleared and + recycled to begin recording the next sample. + + Subclasses of this class define different statistics measured using this basic pattern.

    +
    +
    +
      + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      +
      SampledStat(double initialValue)
      +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      abstract double
      +
      combine(List<org.apache.kafka.common.metrics.stats.SampledStat.Sample> samples, + MetricConfig config, + long now)
      +
       
      +
      org.apache.kafka.common.metrics.stats.SampledStat.Sample
      +
      current(long timeMs)
      +
       
      +
      double
      +
      measure(MetricConfig config, + long now)
      +
      +
      Measure this quantity and return the result as a double
      +
      +
      org.apache.kafka.common.metrics.stats.SampledStat.Sample
      +
      oldest(long now)
      +
       
      +
      void
      +
      record(MetricConfig config, + double value, + long timeMs)
      +
      +
      Record the given value
      +
      + + +
       
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SampledStat

        +
        public SampledStat(double initialValue)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        record

        +
        public void record(MetricConfig config, + double value, + long timeMs)
        +
        Description copied from interface: Stat
        +
        Record the given value
        +
        +
        Specified by:
        +
        record in interface Stat
        +
        Parameters:
        +
        config - The configuration to use for this metric
        +
        value - The value to record
        +
        timeMs - The POSIX time in milliseconds this value occurred
        +
        +
        +
      • +
      • +
        +

        measure

        +
        public double measure(MetricConfig config, + long now)
        +
        Description copied from interface: Measurable
        +
        Measure this quantity and return the result as a double
        +
        +
        Specified by:
        +
        measure in interface Measurable
        +
        Parameters:
        +
        config - The configuration for this metric
        +
        now - The POSIX time in milliseconds the measurement is being taken
        +
        Returns:
        +
        The measured value
        +
        +
        +
      • +
      • +
        +

        current

        +
        public org.apache.kafka.common.metrics.stats.SampledStat.Sample current(long timeMs)
        +
        +
      • +
      • +
        +

        oldest

        +
        public org.apache.kafka.common.metrics.stats.SampledStat.Sample oldest(long now)
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        combine

        +
        public abstract double combine(List<org.apache.kafka.common.metrics.stats.SampledStat.Sample> samples, + MetricConfig config, + long now)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/SimpleRate.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/SimpleRate.html new file mode 100644 index 000000000..98890faa3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/SimpleRate.html @@ -0,0 +1,181 @@ + + + + +SimpleRate (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SimpleRate

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.stats.Rate +
    org.apache.kafka.common.metrics.stats.SimpleRate
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Measurable, MeasurableStat, MetricValueProvider<Double>, Stat
    +
    +
    +
    public class SimpleRate +extends Rate
    +
    A simple rate the rate is incrementally calculated + based on the elapsed time between the earliest reading + and now. + + An exception is made for the first window, which is + considered of fixed size. This avoids the issue of + an artificially high rate when the gap between readings + is close to 0.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SimpleRate

        +
        public SimpleRate()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/TokenBucket.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/TokenBucket.html new file mode 100644 index 000000000..dc1b2904a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/TokenBucket.html @@ -0,0 +1,254 @@ + + + + +TokenBucket (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TokenBucket

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.stats.TokenBucket
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Measurable, MeasurableStat, MetricValueProvider<Double>, Stat
    +
    +
    +
    public class TokenBucket +extends Object +implements MeasurableStat
    +
    The TokenBucket is a MeasurableStat implementing a token bucket algorithm + that is usable within a Sensor. + + The Quota.bound() defined the refill rate of the bucket while the maximum burst or + the maximum number of credits of the bucket is defined by + * MetricConfig#timeWindowMs() * Quota#bound(). + + The quota is considered as exhausted when the amount of remaining credits in the bucket + is below zero. The enforcement is done by the Sensor. + + Token Bucket vs Rate based Quota: + The current sampled rate based quota does not cope well with bursty workloads. The issue is + that a unique and large sample can hold the average above the quota until it is discarded. + Practically, when this happens, one must wait until the sample is expired to bring the rate + below the quota even though less time would be theoretically required. As an example, let's + imagine that we have: + - Quota (Q) = 5 + - Samples (S) = 100 + - Window (W) = 1s + A burst of 560 brings the average rate (R) to 5.6 (560 / 100). The expected throttle time is + computed as follow: ((R - Q / Q * S * W)) = ((5.6 - 5) / 5 * 100 * 1) = 12 secs. In practice, + the average rate won't go below the quota before the burst is dropped from the samples so one + must wait 100s (S * W). + + The token bucket relies on continuously updated amount of credits. Therefore, it does not + suffers from the above issue. The same example would work as follow: + - Quota (Q) = 5 + - Burst (B) = 5 * 1 * 100 = 500 (Q * S * W) + A burst of 560 brings the amount of credits to -60. One must wait 12s (-(-60)/5) to refill the + bucket to zero.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TokenBucket

        +
        public TokenBucket()
        +
        +
      • +
      • +
        +

        TokenBucket

        +
        public TokenBucket(TimeUnit unit)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        measure

        +
        public double measure(MetricConfig config, + long timeMs)
        +
        Description copied from interface: Measurable
        +
        Measure this quantity and return the result as a double
        +
        +
        Specified by:
        +
        measure in interface Measurable
        +
        Parameters:
        +
        config - The configuration for this metric
        +
        timeMs - The POSIX time in milliseconds the measurement is being taken
        +
        Returns:
        +
        The measured value
        +
        +
        +
      • +
      • +
        +

        record

        +
        public void record(MetricConfig config, + double value, + long timeMs)
        +
        Description copied from interface: Stat
        +
        Record the given value
        +
        +
        Specified by:
        +
        record in interface Stat
        +
        Parameters:
        +
        config - The configuration to use for this metric
        +
        value - The value to record
        +
        timeMs - The POSIX time in milliseconds this value occurred
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/Value.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Value.html new file mode 100644 index 000000000..74ead6684 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/Value.html @@ -0,0 +1,204 @@ + + + + +Value (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Value

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.stats.Value
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Measurable, MeasurableStat, MetricValueProvider<Double>, Stat
    +
    +
    +
    public class Value +extends Object +implements MeasurableStat
    +
    An instantaneous value.
    +
    +
    +
      + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      double
      +
      measure(MetricConfig config, + long now)
      +
      +
      Measure this quantity and return the result as a double
      +
      +
      void
      +
      record(MetricConfig config, + double value, + long timeMs)
      +
      +
      Record the given value
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Value

        +
        public Value()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        measure

        +
        public double measure(MetricConfig config, + long now)
        +
        Description copied from interface: Measurable
        +
        Measure this quantity and return the result as a double
        +
        +
        Specified by:
        +
        measure in interface Measurable
        +
        Parameters:
        +
        config - The configuration for this metric
        +
        now - The POSIX time in milliseconds the measurement is being taken
        +
        Returns:
        +
        The measured value
        +
        +
        +
      • +
      • +
        +

        record

        +
        public void record(MetricConfig config, + double value, + long timeMs)
        +
        Description copied from interface: Stat
        +
        Record the given value
        +
        +
        Specified by:
        +
        record in interface Stat
        +
        Parameters:
        +
        config - The configuration to use for this metric
        +
        value - The value to record
        +
        timeMs - The POSIX time in milliseconds this value occurred
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/WindowedCount.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/WindowedCount.html new file mode 100644 index 000000000..f29eba718 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/WindowedCount.html @@ -0,0 +1,148 @@ + + + + +WindowedCount (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class WindowedCount

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Measurable, MeasurableStat, MetricValueProvider<Double>, Stat
    +
    +
    +
    public class WindowedCount +extends WindowedSum
    +
    A SampledStat that maintains a simple count of what it has seen. + This is a special kind of WindowedSum that always records a value of 1 instead of the provided value. + In other words, it counts the number of + SampledStat.record(MetricConfig, double, long) invocations, + instead of summing the recorded values. + + See also CumulativeCount for a non-sampled version of this metric.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        WindowedCount

        +
        public WindowedCount()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/WindowedSum.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/WindowedSum.html new file mode 100644 index 000000000..0aeee2ad7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/WindowedSum.html @@ -0,0 +1,183 @@ + + + + +WindowedSum (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class WindowedSum

    +
    +
    java.lang.Object +
    org.apache.kafka.common.metrics.stats.SampledStat +
    org.apache.kafka.common.metrics.stats.WindowedSum
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Measurable, MeasurableStat, MetricValueProvider<Double>, Stat
    +
    +
    +
    Direct Known Subclasses:
    +
    WindowedCount
    +
    +
    +
    public class WindowedSum +extends SampledStat
    +
    A SampledStat that maintains the sum of what it has seen. + This is a sampled version of CumulativeSum. + + See also WindowedCount if you want to increment the value by 1 on each recording.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        WindowedSum

        +
        public WindowedSum()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        combine

        +
        public double combine(List<org.apache.kafka.common.metrics.stats.SampledStat.Sample> samples, + MetricConfig config, + long now)
        +
        +
        Specified by:
        +
        combine in class SampledStat
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/package-summary.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/package-summary.html new file mode 100644 index 000000000..d1dd4c67d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/package-summary.html @@ -0,0 +1,191 @@ + + + + +org.apache.kafka.common.metrics.stats (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.common.metrics.stats

    +
    +
    +
    package org.apache.kafka.common.metrics.stats
    +
    +
    Provides methods of statistically aggregating metrics upon emission.
    +
    +
    +
      +
    • + +
    • +
    • +
      +
      +
      +
      +
      Class
      +
      Description
      + +
      +
      A SampledStat that maintains a simple average over its samples.
      +
      + +
      +
      A non-sampled version of WindowedCount maintained over all time.
      +
      + +
      +
      An non-sampled cumulative total maintained over all time.
      +
      + +
      +
      A CompoundStat that represents a normalized distribution with a Frequency metric for each + bucketed value.
      +
      + +
      +
      Definition of a frequency metric used in a Frequencies compound statistic.
      +
      + +
       
      + +
      +
      An algorithm for determining the bin in which a value is to be placed as well as calculating the upper end + of each bin.
      +
      + +
      +
      A scheme for calculating the bins where the width of each bin is a constant determined by the range of values + and the number of bins.
      +
      + +
      +
      A scheme for calculating the bins where the width of each bin is one more than the previous bin, and therefore + the bin widths are increasing at a linear rate.
      +
      + +
      +
      A SampledStat that gives the max over its samples.
      +
      + +
      +
      A compound stat that includes a rate metric and a cumulative total metric.
      +
      + +
      +
      A SampledStat that gives the min over its samples.
      +
      + +
       
      + +
      +
      A compound stat that reports one or more percentiles
      +
      + +
       
      + +
      +
      The rate of the given quantity.
      +
      + +
      +
      A SampledStat records a single scalar value measured over one or more samples.
      +
      + +
      +
      A simple rate the rate is incrementally calculated + based on the elapsed time between the earliest reading + and now.
      +
      + +
      +
      The TokenBucket is a MeasurableStat implementing a token bucket algorithm + that is usable within a Sensor.
      +
      + +
      +
      An instantaneous value.
      +
      + +
      +
      A SampledStat that maintains a simple count of what it has seen.
      +
      + +
      +
      A SampledStat that maintains the sum of what it has seen.
      +
      +
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/metrics/stats/package-tree.html b/static/41/javadoc/org/apache/kafka/common/metrics/stats/package-tree.html new file mode 100644 index 000000000..6abaeb9ad --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/metrics/stats/package-tree.html @@ -0,0 +1,122 @@ + + + + +org.apache.kafka.common.metrics.stats Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.common.metrics.stats

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/package-summary.html b/static/41/javadoc/org/apache/kafka/common/package-summary.html new file mode 100644 index 000000000..1197f1bf6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/package-summary.html @@ -0,0 +1,253 @@ + + + + +org.apache.kafka.common (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.common

    +
    +
    +
    package org.apache.kafka.common
    +
    +
    Provides shared functionality for Kafka clients and servers.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/package-tree.html b/static/41/javadoc/org/apache/kafka/common/package-tree.html new file mode 100644 index 000000000..2bd133275 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/package-tree.html @@ -0,0 +1,157 @@ + + + + +org.apache.kafka.common Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.common

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/quota/ClientQuotaAlteration.Op.html b/static/41/javadoc/org/apache/kafka/common/quota/ClientQuotaAlteration.Op.html new file mode 100644 index 000000000..4428d3503 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/quota/ClientQuotaAlteration.Op.html @@ -0,0 +1,227 @@ + + + + +ClientQuotaAlteration.Op (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ClientQuotaAlteration.Op

    +
    +
    java.lang.Object +
    org.apache.kafka.common.quota.ClientQuotaAlteration.Op
    +
    +
    +
    +
    Enclosing class:
    +
    ClientQuotaAlteration
    +
    +
    +
    public static class ClientQuotaAlteration.Op +extends Object
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Op

        +
        public Op(String key, + Double value)
        +
        +
        Parameters:
        +
        key - the quota type to alter
        +
        value - if set then the existing value is updated, + otherwise if null, the existing value is cleared
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        key

        +
        public String key()
        +
        +
        Returns:
        +
        the quota type to alter
        +
        +
        +
      • +
      • +
        +

        value

        +
        public Double value()
        +
        +
        Returns:
        +
        if set then the existing value is updated, + otherwise if null, the existing value is cleared
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/quota/ClientQuotaAlteration.html b/static/41/javadoc/org/apache/kafka/common/quota/ClientQuotaAlteration.html new file mode 100644 index 000000000..02afb1d0d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/quota/ClientQuotaAlteration.html @@ -0,0 +1,211 @@ + + + + +ClientQuotaAlteration (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ClientQuotaAlteration

    +
    +
    java.lang.Object +
    org.apache.kafka.common.quota.ClientQuotaAlteration
    +
    +
    +
    +
    public class ClientQuotaAlteration +extends Object
    +
    Describes a configuration alteration to be made to a client quota entity.
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/quota/ClientQuotaEntity.html b/static/41/javadoc/org/apache/kafka/common/quota/ClientQuotaEntity.html new file mode 100644 index 000000000..5adffdc47 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/quota/ClientQuotaEntity.html @@ -0,0 +1,293 @@ + + + + +ClientQuotaEntity (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ClientQuotaEntity

    +
    +
    java.lang.Object +
    org.apache.kafka.common.quota.ClientQuotaEntity
    +
    +
    +
    +
    public class ClientQuotaEntity +extends Object
    +
    Describes a client quota entity, which is a mapping of entity types to their names.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ClientQuotaEntity

        +
        public ClientQuotaEntity(Map<String,String> entries)
        +
        Constructs a quota entity for the given types and names. If a name is null, + then it is mapped to the built-in default entity name.
        +
        +
        Parameters:
        +
        entries - maps entity type to its name
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        isValidEntityType

        +
        public static boolean isValidEntityType(String entityType)
        +
        +
      • +
      • +
        +

        entries

        +
        public Map<String,String> entries()
        +
        +
        Returns:
        +
        map of entity type to its name
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/quota/ClientQuotaFilter.html b/static/41/javadoc/org/apache/kafka/common/quota/ClientQuotaFilter.html new file mode 100644 index 000000000..ea9d437cd --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/quota/ClientQuotaFilter.html @@ -0,0 +1,234 @@ + + + + +ClientQuotaFilter (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ClientQuotaFilter

    +
    +
    java.lang.Object +
    org.apache.kafka.common.quota.ClientQuotaFilter
    +
    +
    +
    +
    public class ClientQuotaFilter +extends Object
    +
    Describes a client quota entity filter.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        contains

        +
        public static ClientQuotaFilter contains(Collection<ClientQuotaFilterComponent> components)
        +
        Constructs and returns a quota filter that matches all provided components. Matching entities + with entity types that are not specified by a component will also be included in the result.
        +
        +
        Parameters:
        +
        components - the components for the filter
        +
        +
        +
      • +
      • +
        +

        containsOnly

        +
        public static ClientQuotaFilter containsOnly(Collection<ClientQuotaFilterComponent> components)
        +
        Constructs and returns a quota filter that matches all provided components. Matching entities + with entity types that are not specified by a component will *not* be included in the result.
        +
        +
        Parameters:
        +
        components - the components for the filter
        +
        +
        +
      • +
      • +
        +

        all

        +
        public static ClientQuotaFilter all()
        +
        Constructs and returns a quota filter that matches all configured entities.
        +
        +
      • +
      • +
        +

        components

        +
        public Collection<ClientQuotaFilterComponent> components()
        +
        +
        Returns:
        +
        the filter's components
        +
        +
        +
      • +
      • +
        +

        strict

        +
        public boolean strict()
        +
        +
        Returns:
        +
        whether the filter is strict, i.e. only includes specified components
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/quota/ClientQuotaFilterComponent.html b/static/41/javadoc/org/apache/kafka/common/quota/ClientQuotaFilterComponent.html new file mode 100644 index 000000000..4d7d2394e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/quota/ClientQuotaFilterComponent.html @@ -0,0 +1,248 @@ + + + + +ClientQuotaFilterComponent (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ClientQuotaFilterComponent

    +
    +
    java.lang.Object +
    org.apache.kafka.common.quota.ClientQuotaFilterComponent
    +
    +
    +
    +
    public class ClientQuotaFilterComponent +extends Object
    +
    Describes a component for applying a client quota filter.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        ofEntity

        +
        public static ClientQuotaFilterComponent ofEntity(String entityType, + String entityName)
        +
        Constructs and returns a filter component that exactly matches the provided entity + name for the entity type.
        +
        +
        Parameters:
        +
        entityType - the entity type the filter component applies to
        +
        entityName - the entity name that's matched exactly
        +
        +
        +
      • +
      • +
        +

        ofDefaultEntity

        +
        public static ClientQuotaFilterComponent ofDefaultEntity(String entityType)
        +
        Constructs and returns a filter component that matches the built-in default entity name + for the entity type.
        +
        +
        Parameters:
        +
        entityType - the entity type the filter component applies to
        +
        +
        +
      • +
      • +
        +

        ofEntityType

        +
        public static ClientQuotaFilterComponent ofEntityType(String entityType)
        +
        Constructs and returns a filter component that matches any specified name for the + entity type.
        +
        +
        Parameters:
        +
        entityType - the entity type the filter component applies to
        +
        +
        +
      • +
      • +
        +

        entityType

        +
        public String entityType()
        +
        +
        Returns:
        +
        the component's entity type
        +
        +
        +
      • +
      • +
        +

        match

        +
        public Optional<String> match()
        +
        +
        Returns:
        +
        the optional match string, where: + if present, the name that's matched exactly + if empty, matches the default name + if null, matches any specified name
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/quota/package-summary.html b/static/41/javadoc/org/apache/kafka/common/quota/package-summary.html new file mode 100644 index 000000000..01e9f115c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/quota/package-summary.html @@ -0,0 +1,114 @@ + + + + +org.apache.kafka.common.quota (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.common.quota

    +
    +
    +
    package org.apache.kafka.common.quota
    +
    +
    Provides mechanisms for enforcing resource quotas.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/quota/package-tree.html b/static/41/javadoc/org/apache/kafka/common/quota/package-tree.html new file mode 100644 index 000000000..c049cfac7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/quota/package-tree.html @@ -0,0 +1,75 @@ + + + + +org.apache.kafka.common.quota Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.common.quota

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/resource/PatternType.html b/static/41/javadoc/org/apache/kafka/common/resource/PatternType.html new file mode 100644 index 000000000..a8bd900c7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/resource/PatternType.html @@ -0,0 +1,334 @@ + + + + +PatternType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class PatternType

    +
    +
    java.lang.Object +
    java.lang.Enum<PatternType> +
    org.apache.kafka.common.resource.PatternType
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<PatternType>, Constable
    +
    +
    +
    public enum PatternType +extends Enum<PatternType>
    +
    Resource pattern type.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      +
        +
      • +
        +

        UNKNOWN

        +
        public static final PatternType UNKNOWN
        +
        Represents any PatternType which this client cannot understand, perhaps because this client is too old.
        +
        +
      • +
      • +
        +

        ANY

        +
        public static final PatternType ANY
        +
        In a filter, matches any resource pattern type.
        +
        +
      • +
      • +
        +

        MATCH

        +
        public static final PatternType MATCH
        +
        In a filter, will perform pattern matching. + + e.g. Given a filter of ResourcePatternFilter(TOPIC, "payments.received", MATCH)`, the filter match + any ResourcePattern that matches topic 'payments.received'. This might include: +
          +
        • A Literal pattern with the same type and name, e.g. ResourcePattern(TOPIC, "payments.received", LITERAL)
        • +
        • A Wildcard pattern with the same type, e.g. ResourcePattern(TOPIC, "*", LITERAL)
        • +
        • A Prefixed pattern with the same type and where the name is a matching prefix, e.g. ResourcePattern(TOPIC, "payments.", PREFIXED)
        • +
        +
        +
      • +
      • +
        +

        LITERAL

        +
        public static final PatternType LITERAL
        +
        A literal resource name. + + A literal name defines the full name of a resource, e.g. topic with name 'foo', or group with name 'bob'. + + The special wildcard character * can be used to represent a resource with any name.
        +
        +
      • +
      • +
        +

        PREFIXED

        +
        public static final PatternType PREFIXED
        +
        A prefixed resource name. + + A prefixed name defines a prefix for a resource, e.g. topics with names that start with 'foo'.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static PatternType[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static PatternType valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        code

        +
        public byte code()
        +
        +
        Returns:
        +
        the code of this resource.
        +
        +
        +
      • +
      • +
        +

        isUnknown

        +
        public boolean isUnknown()
        +
        +
        Returns:
        +
        whether this resource pattern type is UNKNOWN.
        +
        +
        +
      • +
      • +
        +

        isSpecific

        +
        public boolean isSpecific()
        +
        +
        Returns:
        +
        whether this resource pattern type is a concrete type, rather than UNKNOWN or one of the filter types.
        +
        +
        +
      • +
      • +
        +

        fromCode

        +
        public static PatternType fromCode(byte code)
        +
        Return the PatternType with the provided code or UNKNOWN if one cannot be found.
        +
        +
      • +
      • +
        +

        fromString

        +
        public static PatternType fromString(String name)
        +
        Return the PatternType with the provided name or UNKNOWN if one cannot be found.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/resource/Resource.html b/static/41/javadoc/org/apache/kafka/common/resource/Resource.html new file mode 100644 index 000000000..9cfb630b2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/resource/Resource.html @@ -0,0 +1,287 @@ + + + + +Resource (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Resource

    +
    +
    java.lang.Object +
    org.apache.kafka.common.resource.Resource
    +
    +
    +
    +
    public class Resource +extends Object
    +
    Represents a cluster resource with a tuple of (type, name).
    +
    +
    +
      + +
    • +
      +

      Field Summary

      +
      Fields
      +
      +
      Modifier and Type
      +
      Field
      +
      Description
      +
      static final Resource
      + +
      +
      A resource representing the whole cluster.
      +
      +
      static final String
      + +
      +
      The name of the CLUSTER resource.
      +
      +
      +
      +
    • + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      +
      Resource(ResourceType resourceType, + String name)
      +
      +
      Create an instance of this class with the provided parameters.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      boolean
      + +
       
      +
      int
      + +
       
      +
      boolean
      + +
      +
      Return true if this Resource has any UNKNOWN components.
      +
      + + +
      +
      Return the resource name.
      +
      + + +
      +
      Return the resource type.
      +
      + + +
       
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +getClass, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        CLUSTER_NAME

        +
        public static final String CLUSTER_NAME
        +
        The name of the CLUSTER resource.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        CLUSTER

        +
        public static final Resource CLUSTER
        +
        A resource representing the whole cluster.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Resource

        +
        public Resource(ResourceType resourceType, + String name)
        +
        Create an instance of this class with the provided parameters.
        +
        +
        Parameters:
        +
        resourceType - non-null resource type
        +
        name - non-null resource name
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        resourceType

        +
        public ResourceType resourceType()
        +
        Return the resource type.
        +
        +
      • +
      • +
        +

        name

        +
        public String name()
        +
        Return the resource name.
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        isUnknown

        +
        public boolean isUnknown()
        +
        Return true if this Resource has any UNKNOWN components.
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/resource/ResourcePattern.html b/static/41/javadoc/org/apache/kafka/common/resource/ResourcePattern.html new file mode 100644 index 000000000..4ab27d5aa --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/resource/ResourcePattern.html @@ -0,0 +1,308 @@ + + + + +ResourcePattern (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ResourcePattern

    +
    +
    java.lang.Object +
    org.apache.kafka.common.resource.ResourcePattern
    +
    +
    +
    +
    public class ResourcePattern +extends Object
    +
    Represents a pattern that is used by ACLs to match zero or more + Resources.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        WILDCARD_RESOURCE

        +
        public static final String WILDCARD_RESOURCE
        +
        A special literal resource name that corresponds to 'all resources of a certain type'.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ResourcePattern

        +
        public ResourcePattern(ResourceType resourceType, + String name, + PatternType patternType)
        +
        Create a pattern using the supplied parameters.
        +
        +
        Parameters:
        +
        resourceType - non-null, specific, resource type
        +
        name - non-null resource name, which can be the WILDCARD_RESOURCE.
        +
        patternType - non-null, specific, resource pattern type, which controls how the pattern will match resource names.
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        resourceType

        +
        public ResourceType resourceType()
        +
        +
        Returns:
        +
        the specific resource type this pattern matches
        +
        +
        +
      • +
      • +
        +

        name

        +
        public String name()
        +
        +
        Returns:
        +
        the resource name.
        +
        +
        +
      • +
      • +
        +

        patternType

        +
        public PatternType patternType()
        +
        +
        Returns:
        +
        the resource pattern type.
        +
        +
        +
      • +
      • +
        +

        toFilter

        +
        public ResourcePatternFilter toFilter()
        +
        +
        Returns:
        +
        a filter which matches only this pattern.
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        isUnknown

        +
        public boolean isUnknown()
        +
        +
        Returns:
        +
        true if this Resource has any UNKNOWN components.
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/resource/ResourcePatternFilter.html b/static/41/javadoc/org/apache/kafka/common/resource/ResourcePatternFilter.html new file mode 100644 index 000000000..f6884eef1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/resource/ResourcePatternFilter.html @@ -0,0 +1,334 @@ + + + + +ResourcePatternFilter (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ResourcePatternFilter

    +
    +
    java.lang.Object +
    org.apache.kafka.common.resource.ResourcePatternFilter
    +
    +
    +
    +
    public class ResourcePatternFilter +extends Object
    +
    Represents a filter that can match ResourcePattern.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ResourcePatternFilter

        +
        public ResourcePatternFilter(ResourceType resourceType, + String name, + PatternType patternType)
        +
        Create a filter using the supplied parameters.
        +
        +
        Parameters:
        +
        resourceType - non-null resource type. + If ResourceType.ANY, the filter will ignore the resource type of the pattern. + If any other resource type, the filter will match only patterns with the same type.
        +
        name - resource name or null. + If null, the filter will ignore the name of resources. + If ResourcePattern.WILDCARD_RESOURCE, will match only wildcard patterns.
        +
        patternType - non-null resource pattern type. + If PatternType.ANY, the filter will match patterns regardless of pattern type. + If PatternType.MATCH, the filter will match patterns that would match the supplied + name, including a matching prefixed and wildcards patterns. + If any other resource pattern type, the filter will match only patterns with the same type.
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        isUnknown

        +
        public boolean isUnknown()
        +
        +
        Returns:
        +
        true if this filter has any UNKNOWN components.
        +
        +
        +
      • +
      • +
        +

        resourceType

        +
        public ResourceType resourceType()
        +
        +
        Returns:
        +
        the specific resource type this pattern matches
        +
        +
        +
      • +
      • +
        +

        name

        +
        public String name()
        +
        +
        Returns:
        +
        the resource name.
        +
        +
        +
      • +
      • +
        +

        patternType

        +
        public PatternType patternType()
        +
        +
        Returns:
        +
        the resource pattern type.
        +
        +
        +
      • +
      • +
        +

        matches

        +
        public boolean matches(ResourcePattern pattern)
        +
        +
        Returns:
        +
        true if this filter matches the given pattern.
        +
        +
        +
      • +
      • +
        +

        matchesAtMostOne

        +
        public boolean matchesAtMostOne()
        +
        +
        Returns:
        +
        true if this filter could only match one pattern. + In other words, if there are no ANY or UNKNOWN fields.
        +
        +
        +
      • +
      • +
        +

        findIndefiniteField

        +
        public String findIndefiniteField()
        +
        +
        Returns:
        +
        a string describing any ANY or UNKNOWN field, or null if there is no such field.
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/resource/ResourceType.html b/static/41/javadoc/org/apache/kafka/common/resource/ResourceType.html new file mode 100644 index 000000000..dd25f86e1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/resource/ResourceType.html @@ -0,0 +1,349 @@ + + + + +ResourceType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class ResourceType

    +
    +
    java.lang.Object +
    java.lang.Enum<ResourceType> +
    org.apache.kafka.common.resource.ResourceType
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<ResourceType>, Constable
    +
    +
    +
    public enum ResourceType +extends Enum<ResourceType>
    +
    Represents a type of resource which an ACL can be applied to.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      +
        +
      • +
        +

        UNKNOWN

        +
        public static final ResourceType UNKNOWN
        +
        Represents any ResourceType which this client cannot understand, + perhaps because this client is too old.
        +
        +
      • +
      • +
        +

        ANY

        +
        public static final ResourceType ANY
        +
        In a filter, matches any ResourceType.
        +
        +
      • +
      • +
        +

        TOPIC

        +
        public static final ResourceType TOPIC
        +
        A Kafka topic.
        +
        +
      • +
      • +
        +

        GROUP

        +
        public static final ResourceType GROUP
        +
        A consumer group.
        +
        +
      • +
      • +
        +

        CLUSTER

        +
        public static final ResourceType CLUSTER
        +
        The cluster as a whole.
        +
        +
      • +
      • +
        +

        TRANSACTIONAL_ID

        +
        public static final ResourceType TRANSACTIONAL_ID
        +
        A transactional ID.
        +
        +
      • +
      • +
        +

        DELEGATION_TOKEN

        +
        public static final ResourceType DELEGATION_TOKEN
        +
        A token ID.
        +
        +
      • +
      • +
        +

        USER

        +
        public static final ResourceType USER
        +
        A user principal
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static ResourceType[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static ResourceType valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        fromString

        +
        public static ResourceType fromString(String str) + throws IllegalArgumentException
        +
        Parse the given string as an ACL resource type.
        +
        +
        Parameters:
        +
        str - The string to parse.
        +
        Returns:
        +
        The ResourceType, or UNKNOWN if the string could not be matched.
        +
        Throws:
        +
        IllegalArgumentException
        +
        +
        +
      • +
      • +
        +

        fromCode

        +
        public static ResourceType fromCode(byte code)
        +
        Return the ResourceType with the provided code or `ResourceType.UNKNOWN` if one cannot be found.
        +
        +
      • +
      • +
        +

        code

        +
        public byte code()
        +
        Return the code of this resource.
        +
        +
      • +
      • +
        +

        isUnknown

        +
        public boolean isUnknown()
        +
        Return whether this resource type is UNKNOWN.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/resource/package-summary.html b/static/41/javadoc/org/apache/kafka/common/resource/package-summary.html new file mode 100644 index 000000000..5ab705afd --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/resource/package-summary.html @@ -0,0 +1,123 @@ + + + + +org.apache.kafka.common.resource (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.common.resource

    +
    +
    +
    package org.apache.kafka.common.resource
    +
    +
    Provides client handles representing logical resources in a Kafka cluster.
    +
    +
    +
      +
    • + +
    • +
    • +
      +
      +
      +
      +
      Class
      +
      Description
      + +
      +
      Resource pattern type.
      +
      + +
      +
      Represents a cluster resource with a tuple of (type, name).
      +
      + +
      +
      Represents a pattern that is used by ACLs to match zero or more + Resources.
      +
      + +
      +
      Represents a filter that can match ResourcePattern.
      +
      + +
      +
      Represents a type of resource which an ACL can be applied to.
      +
      +
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/resource/package-tree.html b/static/41/javadoc/org/apache/kafka/common/resource/package-tree.html new file mode 100644 index 000000000..f23aaa354 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/resource/package-tree.html @@ -0,0 +1,88 @@ + + + + +org.apache.kafka.common.resource Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.common.resource

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/auth/AuthenticateCallbackHandler.html b/static/41/javadoc/org/apache/kafka/common/security/auth/AuthenticateCallbackHandler.html new file mode 100644 index 000000000..571578ff8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/auth/AuthenticateCallbackHandler.html @@ -0,0 +1,179 @@ + + + + +AuthenticateCallbackHandler (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface AuthenticateCallbackHandler

    +
    +
    +
    +
    All Superinterfaces:
    +
    CallbackHandler
    +
    +
    +
    All Known Implementing Classes:
    +
    OAuthBearerLoginCallbackHandler, OAuthBearerValidatorCallbackHandler
    +
    +
    +
    public interface AuthenticateCallbackHandler +extends CallbackHandler
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
      +
      Closes this instance.
      +
      +
      void
      +
      configure(Map<String,?> configs, + String saslMechanism, + List<AppConfigurationEntry> jaasConfigEntries)
      +
      +
      Configures this callback handler for the specified SASL mechanism.
      +
      +
      +
      +
      +
      +

      Methods inherited from interface javax.security.auth.callback.CallbackHandler

      +handle
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        void configure(Map<String,?> configs, + String saslMechanism, + List<AppConfigurationEntry> jaasConfigEntries)
        +
        Configures this callback handler for the specified SASL mechanism.
        +
        +
        Parameters:
        +
        configs - Key-value pairs containing the parsed configuration options of + the client or broker. Note that these are the Kafka configuration options + and not the JAAS configuration options. JAAS config options may be obtained + from `jaasConfigEntries` for callbacks which obtain some configs from the + JAAS configuration. For configs that may be specified as both Kafka config + as well as JAAS config (e.g. sasl.kerberos.service.name), the configuration + is treated as invalid if conflicting values are provided.
        +
        saslMechanism - Negotiated SASL mechanism. For clients, this is the SASL + mechanism configured for the client. For brokers, this is the mechanism + negotiated with the client and is one of the mechanisms enabled on the broker.
        +
        jaasConfigEntries - JAAS configuration entries from the JAAS login context. + This list contains a single entry for clients and may contain more than + one entry for brokers if multiple mechanisms are enabled on a listener using + static JAAS configuration where there is no mapping between mechanisms and + login module entries. In this case, callback handlers can use the login module in + `jaasConfigEntries` to identify the entry corresponding to `saslMechanism`. + Alternatively, dynamic JAAS configuration option + SaslConfigs.SASL_JAAS_CONFIG may be + configured on brokers with listener and mechanism prefix, in which case + only the configuration entry corresponding to `saslMechanism` will be provided + in `jaasConfigEntries`.
        +
        +
        +
      • +
      • +
        +

        close

        +
        void close()
        +
        Closes this instance.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/auth/AuthenticationContext.html b/static/41/javadoc/org/apache/kafka/common/security/auth/AuthenticationContext.html new file mode 100644 index 000000000..dbcbb32ab --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/auth/AuthenticationContext.html @@ -0,0 +1,158 @@ + + + + +AuthenticationContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface AuthenticationContext

    +
    +
    +
    +
    All Known Implementing Classes:
    +
    PlaintextAuthenticationContext, SaslAuthenticationContext, SslAuthenticationContext
    +
    +
    +
    public interface AuthenticationContext
    +
    An object representing contextual information from the authentication session. See + PlaintextAuthenticationContext, SaslAuthenticationContext + and SslAuthenticationContext. This class is only used in the broker.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Address of the authenticated client
      +
      + + +
      +
      Name of the listener used for the connection
      +
      + + +
      +
      Underlying security protocol of the authentication session.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        securityProtocol

        +
        SecurityProtocol securityProtocol()
        +
        Underlying security protocol of the authentication session.
        +
        +
      • +
      • +
        +

        clientAddress

        +
        InetAddress clientAddress()
        +
        Address of the authenticated client
        +
        +
      • +
      • +
        +

        listenerName

        +
        String listenerName()
        +
        Name of the listener used for the connection
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/auth/KafkaPrincipal.html b/static/41/javadoc/org/apache/kafka/common/security/auth/KafkaPrincipal.html new file mode 100644 index 000000000..35dd0c297 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/auth/KafkaPrincipal.html @@ -0,0 +1,320 @@ + + + + +KafkaPrincipal (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class KafkaPrincipal

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.auth.KafkaPrincipal
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Principal
    +
    +
    +
    public class KafkaPrincipal +extends Object +implements Principal
    +

    Principals in Kafka are defined by a type and a name. The principal type will always be "User" + for the simple authorizer that is enabled by default, but custom authorizers can leverage different + principal types (such as to enable group or role-based ACLs). The KafkaPrincipalBuilder interface + is used when you need to derive a different principal type from the authentication context, or when + you need to represent relations between different principals. For example, you could extend + KafkaPrincipal in order to link a user principal to one or more role principals. + +

    For custom extensions of KafkaPrincipal, there two key points to keep in mind: +

      +
    1. To be compatible with the ACL APIs provided by Kafka (including the command line tool), each ACL + can only represent a permission granted to a single principal (consisting of a principal type and name). + It is possible to use richer ACL semantics, but you must implement your own mechanisms for adding + and removing ACLs. +
    2. In general, KafkaPrincipal extensions are only useful when the corresponding Authorizer + is also aware of the extension. If you have a KafkaPrincipalBuilder which derives user groups + from the authentication context (e.g. from an SSL client certificate), then you need a custom + authorizer which is capable of using the additional group information. +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        KafkaPrincipal

        +
        public KafkaPrincipal(String principalType, + String name)
        +
        +
      • +
      • +
        +

        KafkaPrincipal

        +
        public KafkaPrincipal(String principalType, + String name, + boolean tokenAuthenticated)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Specified by:
        +
        toString in interface Principal
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Specified by:
        +
        equals in interface Principal
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Specified by:
        +
        hashCode in interface Principal
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        getName

        +
        public String getName()
        +
        +
        Specified by:
        +
        getName in interface Principal
        +
        +
        +
      • +
      • +
        +

        getPrincipalType

        +
        public String getPrincipalType()
        +
        +
      • +
      • +
        +

        tokenAuthenticated

        +
        public void tokenAuthenticated(boolean tokenAuthenticated)
        +
        +
      • +
      • +
        +

        tokenAuthenticated

        +
        public boolean tokenAuthenticated()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/auth/KafkaPrincipalBuilder.html b/static/41/javadoc/org/apache/kafka/common/security/auth/KafkaPrincipalBuilder.html new file mode 100644 index 000000000..7fab97373 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/auth/KafkaPrincipalBuilder.html @@ -0,0 +1,145 @@ + + + + +KafkaPrincipalBuilder (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface KafkaPrincipalBuilder

    +
    +
    +
    +
    public interface KafkaPrincipalBuilder
    +
    Pluggable principal builder interface which supports both SSL authentication through + SslAuthenticationContext and SASL through SaslAuthenticationContext. + + Note that the Configurable and Closeable + interfaces are respected if implemented. Additionally, implementations must provide a + default no-arg constructor. + + Note that custom implementations of KafkaPrincipalBuilder + must also implement KafkaPrincipalSerde, otherwise brokers will not be able to + forward requests to the controller.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Build a kafka principal from the authentication context.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/auth/KafkaPrincipalSerde.html b/static/41/javadoc/org/apache/kafka/common/security/auth/KafkaPrincipalSerde.html new file mode 100644 index 000000000..1498d9446 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/auth/KafkaPrincipalSerde.html @@ -0,0 +1,159 @@ + + + + +KafkaPrincipalSerde (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface KafkaPrincipalSerde

    +
    +
    +
    +
    public interface KafkaPrincipalSerde
    +
    Serializer/Deserializer interface for KafkaPrincipal for the purpose of inter-broker forwarding. + Any serialization/deserialization failure should raise a SerializationException to be consistent.
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/auth/Login.html b/static/41/javadoc/org/apache/kafka/common/security/auth/Login.html new file mode 100644 index 000000000..c0d7328a0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/auth/Login.html @@ -0,0 +1,202 @@ + + + + +Login (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Login

    +
    +
    +
    +
    public interface Login
    +
    Login interface for authentication.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
      +
      Closes this instance.
      +
      +
      void
      +
      configure(Map<String,?> configs, + String contextName, + Configuration jaasConfiguration, + AuthenticateCallbackHandler loginCallbackHandler)
      +
      +
      Configures this login instance.
      +
      + + +
      +
      Performs login for each login module specified for the login context of this instance.
      +
      + + +
      +
      Returns the service name to be used for SASL.
      +
      + + +
      +
      Returns the authenticated subject of this login context.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        void configure(Map<String,?> configs, + String contextName, + Configuration jaasConfiguration, + AuthenticateCallbackHandler loginCallbackHandler)
        +
        Configures this login instance.
        +
        +
        Parameters:
        +
        configs - Key-value pairs containing the parsed configuration options of + the client or broker. Note that these are the Kafka configuration options + and not the JAAS configuration options. The JAAS options may be obtained + from `jaasConfiguration`.
        +
        contextName - JAAS context name for this login which may be used to obtain + the login context from `jaasConfiguration`.
        +
        jaasConfiguration - JAAS configuration containing the login context named + `contextName`. If static JAAS configuration is used, this `Configuration` + may also contain other login contexts.
        +
        loginCallbackHandler - Login callback handler instance to use for this Login. + Login callback handler class may be configured using + SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS.
        +
        +
        +
      • +
      • +
        +

        login

        +
        LoginContext login() + throws LoginException
        +
        Performs login for each login module specified for the login context of this instance.
        +
        +
        Throws:
        +
        LoginException
        +
        +
        +
      • +
      • +
        +

        subject

        +
        Subject subject()
        +
        Returns the authenticated subject of this login context.
        +
        +
      • +
      • +
        +

        serviceName

        +
        String serviceName()
        +
        Returns the service name to be used for SASL.
        +
        +
      • +
      • +
        +

        close

        +
        void close()
        +
        Closes this instance.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/auth/PlaintextAuthenticationContext.html b/static/41/javadoc/org/apache/kafka/common/security/auth/PlaintextAuthenticationContext.html new file mode 100644 index 000000000..ada6bbaab --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/auth/PlaintextAuthenticationContext.html @@ -0,0 +1,207 @@ + + + + +PlaintextAuthenticationContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class PlaintextAuthenticationContext

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.auth.PlaintextAuthenticationContext
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    AuthenticationContext
    +
    +
    +
    public class PlaintextAuthenticationContext +extends Object +implements AuthenticationContext
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/auth/SaslAuthenticationContext.html b/static/41/javadoc/org/apache/kafka/common/security/auth/SaslAuthenticationContext.html new file mode 100644 index 000000000..3f13fdc61 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/auth/SaslAuthenticationContext.html @@ -0,0 +1,250 @@ + + + + +SaslAuthenticationContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SaslAuthenticationContext

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.auth.SaslAuthenticationContext
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    AuthenticationContext
    +
    +
    +
    public class SaslAuthenticationContext +extends Object +implements AuthenticationContext
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/auth/SaslExtensions.html b/static/41/javadoc/org/apache/kafka/common/security/auth/SaslExtensions.html new file mode 100644 index 000000000..995ff284a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/auth/SaslExtensions.html @@ -0,0 +1,267 @@ + + + + +SaslExtensions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SaslExtensions

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.auth.SaslExtensions
    +
    +
    +
    +
    public class SaslExtensions +extends Object
    +
    A simple immutable value object class holding customizable SASL extensions. + +

    + + Note on object identity and equality: SaslExtensions intentionally + overrides the standard equals(Object) and hashCode() methods calling their + respective Object.equals(Object) and Object.hashCode() implementations. In so + doing, it provides equality only via reference identity and will not base equality on + the underlying values of its extensions map. + +

    + + The reason for this approach to equality is based off of the manner in which + credentials are stored in a Subject. SaslExtensions are added to and + removed from a Subject via its public credentials. + The public credentials are stored in a Set in the Subject, so object equality + therefore becomes a concern. With shallow, reference-based equality, distinct + SaslExtensions instances with the same map values can be considered unique. This is + critical to operations like token refresh. + + See KAFKA-14062 for more detail.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SaslExtensions

        +
        public SaslExtensions(Map<String,String> extensionsMap)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        map

        +
        public Map<String,String> map()
        +
        Returns an immutable map of the extension names and their values
        +
        +
      • +
      • +
        +

        empty

        +
        public static SaslExtensions empty()
        +
        Creates an "empty" instance indicating no SASL extensions. Do not cache the result of + this method call for use by multiple Subjects as the references need to be + unique. + +

        + + See the class-level documentation for details.

        +
        +
        Returns:
        +
        Unique, but empty, SaslExtensions instance
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public final boolean equals(Object o)
        +
        Implements equals using the reference comparison implementation from + Object.equals(Object). + +

        + + See the class-level documentation for details.

        +
        +
        Overrides:
        +
        equals in class Object
        +
        Parameters:
        +
        o - Other object to compare
        +
        Returns:
        +
        True if o == this
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public final int hashCode()
        +
        Implements hashCode using the native implementation from + Object.hashCode(). + +

        + + See the class-level documentation for details.

        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        Returns:
        +
        Hash code of instance
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/auth/SaslExtensionsCallback.html b/static/41/javadoc/org/apache/kafka/common/security/auth/SaslExtensionsCallback.html new file mode 100644 index 000000000..6571c74d0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/auth/SaslExtensionsCallback.html @@ -0,0 +1,190 @@ + + + + +SaslExtensionsCallback (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SaslExtensionsCallback

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.auth.SaslExtensionsCallback
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Callback
    +
    +
    +
    public class SaslExtensionsCallback +extends Object +implements Callback
    +
    Optional callback used for SASL mechanisms if any extensions need to be set + in the SASL exchange.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SaslExtensionsCallback

        +
        public SaslExtensionsCallback()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        extensions

        +
        public SaslExtensions extensions()
        +
        Returns always non-null SaslExtensions consisting of the extension + names and values that are sent by the client to the server in the initial + client SASL authentication message. The default value is + SaslExtensions.empty() so that if this callback is + unhandled the client will see a non-null value.
        +
        +
      • +
      • +
        +

        extensions

        +
        public void extensions(SaslExtensions extensions)
        +
        Sets the SASL extensions on this callback.
        +
        +
        Parameters:
        +
        extensions - the mandatory extensions to set
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/auth/SecurityProtocol.html b/static/41/javadoc/org/apache/kafka/common/security/auth/SecurityProtocol.html new file mode 100644 index 000000000..6ea459422 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/auth/SecurityProtocol.html @@ -0,0 +1,319 @@ + + + + +SecurityProtocol (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class SecurityProtocol

    +
    +
    java.lang.Object +
    java.lang.Enum<SecurityProtocol> +
    org.apache.kafka.common.security.auth.SecurityProtocol
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<SecurityProtocol>, Constable
    +
    +
    +
    public enum SecurityProtocol +extends Enum<SecurityProtocol>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      +
        +
      • +
        +

        PLAINTEXT

        +
        public static final SecurityProtocol PLAINTEXT
        +
        Un-authenticated, non-encrypted channel
        +
        +
      • +
      • +
        +

        SSL

        +
        public static final SecurityProtocol SSL
        +
        SSL channel
        +
        +
      • +
      • +
        +

        SASL_PLAINTEXT

        +
        public static final SecurityProtocol SASL_PLAINTEXT
        +
        SASL authenticated, non-encrypted channel
        +
        +
      • +
      • +
        +

        SASL_SSL

        +
        public static final SecurityProtocol SASL_SSL
        +
        SASL authenticated, SSL channel
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        id

        +
        public final short id
        +
        The permanent and immutable id of a security protocol -- this can't change, and must match kafka.cluster.SecurityProtocol
        +
        +
      • +
      • +
        +

        name

        +
        public final String name
        +
        Name of the security protocol. This may be used by client configuration.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static SecurityProtocol[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static SecurityProtocol valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        names

        +
        public static List<String> names()
        +
        +
      • +
      • +
        +

        forId

        +
        public static SecurityProtocol forId(short id)
        +
        +
      • +
      • +
        +

        forName

        +
        public static SecurityProtocol forName(String name)
        +
        Case insensitive lookup by protocol name
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/auth/SecurityProviderCreator.html b/static/41/javadoc/org/apache/kafka/common/security/auth/SecurityProviderCreator.html new file mode 100644 index 000000000..b128b9fd7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/auth/SecurityProviderCreator.html @@ -0,0 +1,151 @@ + + + + +SecurityProviderCreator (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface SecurityProviderCreator

    +
    +
    +
    +
    All Superinterfaces:
    +
    Configurable
    +
    +
    +
    public interface SecurityProviderCreator +extends Configurable
    +
    An interface for generating security providers.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      default void
      +
      configure(Map<String,?> config)
      +
      +
      Configure method is used to configure the generator to create the Security Provider
      +
      + + +
      +
      Generate the security provider configured
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        default void configure(Map<String,?> config)
        +
        Configure method is used to configure the generator to create the Security Provider
        +
        +
        Specified by:
        +
        configure in interface Configurable
        +
        Parameters:
        +
        config - configuration parameters for initialising security provider
        +
        +
        +
      • +
      • +
        +

        getProvider

        +
        Provider getProvider()
        +
        Generate the security provider configured
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/auth/SslAuthenticationContext.html b/static/41/javadoc/org/apache/kafka/common/security/auth/SslAuthenticationContext.html new file mode 100644 index 000000000..f0fb02b9d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/auth/SslAuthenticationContext.html @@ -0,0 +1,218 @@ + + + + +SslAuthenticationContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SslAuthenticationContext

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.auth.SslAuthenticationContext
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    AuthenticationContext
    +
    +
    +
    public class SslAuthenticationContext +extends Object +implements AuthenticationContext
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/auth/SslEngineFactory.html b/static/41/javadoc/org/apache/kafka/common/security/auth/SslEngineFactory.html new file mode 100644 index 000000000..c3af2f339 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/auth/SslEngineFactory.html @@ -0,0 +1,251 @@ + + + + +SslEngineFactory (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface SslEngineFactory

    +
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Closeable, Configurable
    +
    +
    +
    public interface SslEngineFactory +extends Configurable, Closeable
    +
    Plugin interface for allowing creation of SSLEngine object in a custom way. + For example, you can use this to customize loading your key material and trust material needed for SSLContext. + This is complementary to the existing Java Security Provider mechanism which allows the entire provider + to be replaced with a custom provider. In scenarios where only the configuration mechanism for SSL engines + need to be updated, this interface provides a convenient method for overriding the default implementation.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        createClientSslEngine

        +
        SSLEngine createClientSslEngine(String peerHost, + int peerPort, + String endpointIdentification)
        +
        Creates a new SSLEngine object to be used by the client.
        +
        +
        Parameters:
        +
        peerHost - The peer host to use. This is used in client mode if endpoint validation is enabled.
        +
        peerPort - The peer port to use. This is a hint and not used for validation.
        +
        endpointIdentification - Endpoint identification algorithm for client mode.
        +
        Returns:
        +
        The new SSLEngine.
        +
        +
        +
      • +
      • +
        +

        createServerSslEngine

        +
        SSLEngine createServerSslEngine(String peerHost, + int peerPort)
        +
        Creates a new SSLEngine object to be used by the server.
        +
        +
        Parameters:
        +
        peerHost - The peer host to use. This is a hint and not used for validation.
        +
        peerPort - The peer port to use. This is a hint and not used for validation.
        +
        Returns:
        +
        The new SSLEngine.
        +
        +
        +
      • +
      • +
        +

        shouldBeRebuilt

        +
        boolean shouldBeRebuilt(Map<String,Object> nextConfigs)
        +
        Returns true if SSLEngine needs to be rebuilt. This method will be called when reconfiguration is triggered on + the SslFactory used to create SSL engines. Based on the new configs provided in nextConfigs, this method + will decide whether underlying SSLEngine object needs to be rebuilt. If this method returns true, the + SslFactory will create a new instance of this object with nextConfigs and run other + checks before deciding to use the new object for new incoming connection requests. Existing connections + are not impacted by this and will not see any changes done as part of reconfiguration. +

        + For example, if the implementation depends on file-based key material, it can check if the file was updated + compared to the previous/last-loaded timestamp and return true. +

        +
        +
        Parameters:
        +
        nextConfigs - The new configuration we want to use.
        +
        Returns:
        +
        True only if the underlying SSLEngine object should be rebuilt.
        +
        +
        +
      • +
      • +
        +

        reconfigurableConfigs

        +
        Set<String> reconfigurableConfigs()
        +
        Returns the names of configs that may be reconfigured.
        +
        +
        Returns:
        +
        Names of configuration options that are dynamically reconfigurable.
        +
        +
        +
      • +
      • +
        +

        keystore

        +
        KeyStore keystore()
        +
        Returns keystore configured for this factory.
        +
        +
        Returns:
        +
        The keystore for this factory or null if a keystore is not configured.
        +
        +
        +
      • +
      • +
        +

        truststore

        +
        KeyStore truststore()
        +
        Returns truststore configured for this factory.
        +
        +
        Returns:
        +
        The truststore for this factory or null if a truststore is not configured.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/auth/package-summary.html b/static/41/javadoc/org/apache/kafka/common/security/auth/package-summary.html new file mode 100644 index 000000000..63bc45a9c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/auth/package-summary.html @@ -0,0 +1,137 @@ + + + + +org.apache.kafka.common.security.auth (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.common.security.auth

    +
    +
    +
    package org.apache.kafka.common.security.auth
    +
    +
    Provides pluggable interfaces for implementing Kafka authentication mechanisms.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/auth/package-tree.html b/static/41/javadoc/org/apache/kafka/common/security/auth/package-tree.html new file mode 100644 index 000000000..7199b253e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/auth/package-tree.html @@ -0,0 +1,119 @@ + + + + +org.apache.kafka.common.security.auth Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.common.security.auth

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/BrokerJwtValidator.ClaimSupplier.html b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/BrokerJwtValidator.ClaimSupplier.html new file mode 100644 index 000000000..f60165ba3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/BrokerJwtValidator.ClaimSupplier.html @@ -0,0 +1,133 @@ + + + + +BrokerJwtValidator.ClaimSupplier (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface BrokerJwtValidator.ClaimSupplier<T>

    +
    +
    +
    +
    Enclosing class:
    +
    BrokerJwtValidator
    +
    +
    +
    public static interface BrokerJwtValidator.ClaimSupplier<T>
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      get()
      +
       
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        get

        +
        T get() +throws org.jose4j.jwt.MalformedClaimException
        +
        +
        Throws:
        +
        org.jose4j.jwt.MalformedClaimException
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/BrokerJwtValidator.html b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/BrokerJwtValidator.html new file mode 100644 index 000000000..a6a8e7f79 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/BrokerJwtValidator.html @@ -0,0 +1,241 @@ + + + + +BrokerJwtValidator (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class BrokerJwtValidator

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.oauthbearer.BrokerJwtValidator
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable, JwtValidator
    +
    +
    +
    public class BrokerJwtValidator +extends Object +implements JwtValidator
    +
    BrokerJwtValidator is an implementation of JwtValidator that is used + by the broker to perform more extensive validation of the JWT access token that is received + from the client, but ultimately from posting the client credentials to the OAuth/OIDC provider's + token endpoint. + + The validation steps performed (primarily by the jose4j library) are: + +
      +
    1. + Basic structural validation of the b64token value as defined in + RFC 6750 Section 2.1 +
    2. +
    3. + Basic conversion of the token into an in-memory data structure +
    4. +
    5. + Presence of scope, exp, subject, iss, and + iat claims +
    6. +
    7. + Signature matching validation against the kid and those provided by + the OAuth/OIDC provider's JWKS +
    8. +
    +
    +
    +
      + +
    • +
      +

      Nested Class Summary

      +
      Nested Classes
      +
      +
      Modifier and Type
      +
      Class
      +
      Description
      +
      static interface 
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      + +
      +
      A public, no-args constructor is necessary for instantiation via configuration.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      +
      configure(Map<String,?> configs, + String saslMechanism, + List<AppConfigurationEntry> jaasConfigEntries)
      +
       
      + +
      validate(String accessToken)
      +
      +
      Accepts an OAuth JWT access token in base-64 encoded format, validates, and returns an + OAuthBearerToken.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      +
      +

      Methods inherited from interface org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable

      +close
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        BrokerJwtValidator

        +
        public BrokerJwtValidator()
        +
        A public, no-args constructor is necessary for instantiation via configuration.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs, + String saslMechanism, + List<AppConfigurationEntry> jaasConfigEntries)
        +
        +
        Specified by:
        +
        configure in interface org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable
        +
        +
        +
      • +
      • +
        +

        validate

        +
        public OAuthBearerToken validate(String accessToken) + throws JwtValidatorException
        +
        Accepts an OAuth JWT access token in base-64 encoded format, validates, and returns an + OAuthBearerToken.
        +
        +
        Specified by:
        +
        validate in interface JwtValidator
        +
        Parameters:
        +
        accessToken - Non-null JWT access token
        +
        Returns:
        +
        OAuthBearerToken
        +
        Throws:
        +
        JwtValidatorException - Thrown on errors performing validation of given token
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/ClientCredentialsJwtRetriever.html b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/ClientCredentialsJwtRetriever.html new file mode 100644 index 000000000..b64142286 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/ClientCredentialsJwtRetriever.html @@ -0,0 +1,276 @@ + + + + +ClientCredentialsJwtRetriever (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ClientCredentialsJwtRetriever

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable, JwtRetriever
    +
    +
    +
    public class ClientCredentialsJwtRetriever +extends Object +implements JwtRetriever
    +
    ClientCredentialsJwtRetriever is a JwtRetriever that performs the steps to request + a JWT from an OAuth/OIDC identity provider using the client_credentials grant type. This + grant type is commonly used for non-interactive "service accounts" where there is no user available + to interactively supply credentials. + +

    + + This JwtRetriever is enabled by specifying its class name in the Kafka configuration. + For client use, specify the class name in the sasl.oauthbearer.jwt.retriever.class + configuration like so: + +

    + sasl.oauthbearer.jwt.retriever.class=org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever
    + 
    + +

    + + If using this JwtRetriever on the broker side (for inter-broker communication), the configuration + should be specified with a listener-based property: + +

    + listener.name.<listener name>.oauthbearer.sasl.oauthbearer.jwt.retriever.class=org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever
    + 
    + +

    + + The ClientCredentialsJwtRetriever also uses the following configuration: + +

      +
    • sasl.oauthbearer.client.credentials.client.id
    • +
    • sasl.oauthbearer.client.credentials.client.secret
    • +
    • sasl.oauthbearer.scope
    • +
    • sasl.oauthbearer.token.endpoint.url
    • +
    + + Please refer to the official Apache Kafka documentation for more information on these, and related configuration. + +

    + + Previous versions of this implementation used sasl.jaas.config to specify attributes such + as clientId, clientSecret, and scope. These will still work, but + if the configuration for each of these is specified, it will be used instead of the JAAS option. + +

    + + Here's an example of the JAAS configuration for a Kafka client: + +

    + sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required ;
    +
    + sasl.oauthbearer.client.credentials.client.id=jdoe
    + sasl.oauthbearer.client.credentials.client.secret=$3cr3+
    + sasl.oauthbearer.jwt.retriever.class=org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever
    + sasl.oauthbearer.scope=my-application-scope
    + sasl.oauthbearer.token.endpoint.url=https://example.com/oauth2/v1/token
    + 
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ClientCredentialsJwtRetriever

        +
        public ClientCredentialsJwtRetriever()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs, + String saslMechanism, + List<AppConfigurationEntry> jaasConfigEntries)
        +
        +
        Specified by:
        +
        configure in interface org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable
        +
        +
        +
      • +
      • +
        +

        retrieve

        +
        public String retrieve() + throws JwtRetrieverException
        +
        Description copied from interface: JwtRetriever
        +
        Retrieves a JWT access token in its serialized three-part form. The implementation + is free to determine how it should be retrieved but should not perform validation + on the result. + + Note: This is a blocking function and callers should be aware that the + implementation may be communicating over a network, with the file system, coordinating + threads, etc. The facility in the LoginModule from + which this is ultimately called does not provide an asynchronous approach.
        +
        +
        Specified by:
        +
        retrieve in interface JwtRetriever
        +
        Returns:
        +
        Non-null JWT access token string
        +
        Throws:
        +
        JwtRetrieverException - Thrown on errors related to IO during retrieval
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close() + throws IOException
        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        Specified by:
        +
        close in interface org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable
        +
        Throws:
        +
        IOException
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/ClientJwtValidator.html b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/ClientJwtValidator.html new file mode 100644 index 000000000..1645709b3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/ClientJwtValidator.html @@ -0,0 +1,268 @@ + + + + +ClientJwtValidator (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ClientJwtValidator

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.oauthbearer.ClientJwtValidator
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable, JwtValidator
    +
    +
    +
    public class ClientJwtValidator +extends Object +implements JwtValidator
    +
    ClientJwtValidator is an implementation of JwtValidator that is used + by the client to perform some rudimentary validation of the JWT access token that is received + as part of the response from posting the client credentials to the OAuth/OIDC provider's + token endpoint. + + The validation steps performed are: + +
      +
    1. + Basic structural validation of the b64token value as defined in + RFC 6750 Section 2.1 +
    2. +
    3. Basic conversion of the token into an in-memory map
    4. +
    5. Presence of scope, exp, subject, and iat claims
    6. +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ClientJwtValidator

        +
        public ClientJwtValidator()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs, + String saslMechanism, + List<AppConfigurationEntry> jaasConfigEntries)
        +
        +
        Specified by:
        +
        configure in interface org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable
        +
        +
        +
      • +
      • +
        +

        validate

        +
        public OAuthBearerToken validate(String accessToken) + throws JwtValidatorException
        +
        Accepts an OAuth JWT access token in base-64 encoded format, validates, and returns an + OAuthBearerToken.
        +
        +
        Specified by:
        +
        validate in interface JwtValidator
        +
        Parameters:
        +
        accessToken - Non-null JWT access token
        +
        Returns:
        +
        OAuthBearerToken
        +
        Throws:
        +
        JwtValidatorException - Thrown on errors performing validation of given token
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/DefaultJwtRetriever.html b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/DefaultJwtRetriever.html new file mode 100644 index 000000000..89d4e3ff4 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/DefaultJwtRetriever.html @@ -0,0 +1,235 @@ + + + + +DefaultJwtRetriever (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DefaultJwtRetriever

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable, JwtRetriever
    +
    +
    +
    public class DefaultJwtRetriever +extends Object +implements JwtRetriever
    +
    DefaultJwtRetriever instantiates and delegates JwtRetriever API calls to an embedded implementation + based on configuration: + +
      +
    • + If the value of sasl.oauthbearer.token.endpoint.url is set to a value that starts with the + file protocol (e.g. file:/tmp/path/to/a/static-jwt.json), an instance of + FileJwtRetriever will be used as the underlying JwtRetriever. Otherwise, the URL is + assumed to be an HTTP/HTTPS-based URL, and an instance of ClientCredentialsRequestFormatter will + be created and used. +
    • +
    + + The configuration required by the individual JwtRetriever classes will likely differ. Please refer to the + official Apache Kafka documentation for more information on these, and related configuration.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DefaultJwtRetriever

        +
        public DefaultJwtRetriever()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs, + String saslMechanism, + List<AppConfigurationEntry> jaasConfigEntries)
        +
        +
        Specified by:
        +
        configure in interface org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable
        +
        +
        +
      • +
      • +
        +

        retrieve

        +
        public String retrieve() + throws JwtRetrieverException
        +
        Description copied from interface: JwtRetriever
        +
        Retrieves a JWT access token in its serialized three-part form. The implementation + is free to determine how it should be retrieved but should not perform validation + on the result. + + Note: This is a blocking function and callers should be aware that the + implementation may be communicating over a network, with the file system, coordinating + threads, etc. The facility in the LoginModule from + which this is ultimately called does not provide an asynchronous approach.
        +
        +
        Specified by:
        +
        retrieve in interface JwtRetriever
        +
        Returns:
        +
        Non-null JWT access token string
        +
        Throws:
        +
        JwtRetrieverException - Thrown on errors related to IO during retrieval
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close() + throws IOException
        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        Specified by:
        +
        close in interface org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable
        +
        Throws:
        +
        IOException
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/DefaultJwtValidator.html b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/DefaultJwtValidator.html new file mode 100644 index 000000000..2f45dc15a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/DefaultJwtValidator.html @@ -0,0 +1,229 @@ + + + + +DefaultJwtValidator (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DefaultJwtValidator

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable, JwtValidator
    +
    +
    +
    public class DefaultJwtValidator +extends Object +implements JwtValidator
    +
    This JwtValidator uses the delegation approach, instantiating and delegating calls to a + more concrete implementation. The underlying implementation is determined by the presence/absence + of the VerificationKeyResolver: if it's present, a BrokerJwtValidator is + created, otherwise a ClientJwtValidator is created.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DefaultJwtValidator

        +
        public DefaultJwtValidator()
        +
        +
      • +
      • +
        +

        DefaultJwtValidator

        +
        public DefaultJwtValidator(org.apache.kafka.common.security.oauthbearer.internals.secured.CloseableVerificationKeyResolver verificationKeyResolver)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs, + String saslMechanism, + List<AppConfigurationEntry> jaasConfigEntries)
        +
        +
        Specified by:
        +
        configure in interface org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable
        +
        +
        +
      • +
      • +
        +

        validate

        +
        public OAuthBearerToken validate(String accessToken) + throws JwtValidatorException
        +
        Description copied from interface: JwtValidator
        +
        Accepts an OAuth JWT access token in base-64 encoded format, validates, and returns an + OAuthBearerToken.
        +
        +
        Specified by:
        +
        validate in interface JwtValidator
        +
        Parameters:
        +
        accessToken - Non-null JWT access token
        +
        Returns:
        +
        OAuthBearerToken
        +
        Throws:
        +
        JwtValidatorException - Thrown on errors performing validation of given token
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close() + throws IOException
        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        Specified by:
        +
        close in interface org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable
        +
        Throws:
        +
        IOException
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/FileJwtRetriever.html b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/FileJwtRetriever.html new file mode 100644 index 000000000..2a9b4249a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/FileJwtRetriever.html @@ -0,0 +1,205 @@ + + + + +FileJwtRetriever (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class FileJwtRetriever

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.oauthbearer.FileJwtRetriever
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable, JwtRetriever
    +
    +
    +
    public class FileJwtRetriever +extends Object +implements JwtRetriever
    +
    FileJwtRetriever is an JwtRetriever that will load the contents + of a file, interpreting them as a JWT access key in the serialized form.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        FileJwtRetriever

        +
        public FileJwtRetriever()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs, + String saslMechanism, + List<AppConfigurationEntry> jaasConfigEntries)
        +
        +
        Specified by:
        +
        configure in interface org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable
        +
        +
        +
      • +
      • +
        +

        retrieve

        +
        public String retrieve() + throws JwtRetrieverException
        +
        Description copied from interface: JwtRetriever
        +
        Retrieves a JWT access token in its serialized three-part form. The implementation + is free to determine how it should be retrieved but should not perform validation + on the result. + + Note: This is a blocking function and callers should be aware that the + implementation may be communicating over a network, with the file system, coordinating + threads, etc. The facility in the LoginModule from + which this is ultimately called does not provide an asynchronous approach.
        +
        +
        Specified by:
        +
        retrieve in interface JwtRetriever
        +
        Returns:
        +
        Non-null JWT access token string
        +
        Throws:
        +
        JwtRetrieverException - Thrown on errors related to IO during retrieval
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/JwtBearerJwtRetriever.html b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/JwtBearerJwtRetriever.html new file mode 100644 index 000000000..2f35a0c89 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/JwtBearerJwtRetriever.html @@ -0,0 +1,295 @@ + + + + +JwtBearerJwtRetriever (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class JwtBearerJwtRetriever

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable, JwtRetriever
    +
    +
    +
    public class JwtBearerJwtRetriever +extends Object +implements JwtRetriever
    +
    JwtBearerJwtRetriever is a JwtRetriever that performs the steps to request + a JWT from an OAuth/OIDC identity provider using the urn:ietf:params:oauth:grant-type:jwt-bearer + grant type. This grant type is used for machine-to-machine "service accounts". + +

    + + This JwtRetriever is enabled by specifying its class name in the Kafka configuration. + For client use, specify the class name in the sasl.oauthbearer.jwt.retriever.class + configuration like so: + +

    + sasl.oauthbearer.jwt.retriever.class=org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever
    + 
    + +

    + + If using this JwtRetriever on the broker side (for inter-broker communication), the configuration + should be specified with a listener-based property: + +

    + listener.name.<listener name>.oauthbearer.sasl.oauthbearer.jwt.retriever.class=org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever
    + 
    + +

    + + The JwtBearerJwtRetriever also uses the following configuration: + +

      +
    • sasl.oauthbearer.assertion.algorithm
    • +
    • sasl.oauthbearer.assertion.claim.aud
    • +
    • sasl.oauthbearer.assertion.claim.exp.seconds
    • +
    • sasl.oauthbearer.assertion.claim.iss
    • +
    • sasl.oauthbearer.assertion.claim.jti.include
    • +
    • sasl.oauthbearer.assertion.claim.nbf.seconds
    • +
    • sasl.oauthbearer.assertion.claim.sub
    • +
    • sasl.oauthbearer.assertion.file
    • +
    • sasl.oauthbearer.assertion.private.key.file
    • +
    • sasl.oauthbearer.assertion.private.key.passphrase
    • +
    • sasl.oauthbearer.assertion.template.file
    • +
    • sasl.oauthbearer.jwt.retriever.class
    • +
    • sasl.oauthbearer.scope
    • +
    • sasl.oauthbearer.token.endpoint.url
    • +
    + + Please refer to the official Apache Kafka documentation for more information on these, and related, configuration. + +

    + + Here's an example of the JAAS configuration for a Kafka client: + +

    + sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required ;
    +
    + sasl.oauthbearer.assertion.algorithm=RS256
    + sasl.oauthbearer.assertion.claim.aud=my-application-audience
    + sasl.oauthbearer.assertion.claim.exp.seconds=600
    + sasl.oauthbearer.assertion.claim.iss=my-oauth-issuer
    + sasl.oauthbearer.assertion.claim.jti.include=true
    + sasl.oauthbearer.assertion.claim.nbf.seconds=120
    + sasl.oauthbearer.assertion.claim.sub=kafka-app-1234
    + sasl.oauthbearer.assertion.private.key.file=/path/to/private.key
    + sasl.oauthbearer.assertion.private.key.passphrase=$3cr3+
    + sasl.oauthbearer.assertion.template.file=/path/to/assertion-template.json
    + sasl.oauthbearer.jwt.retriever.class=org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever
    + sasl.oauthbearer.scope=my-application-scope
    + sasl.oauthbearer.token.endpoint.url=https://example.com/oauth2/v1/token
    + 
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        JwtBearerJwtRetriever

        +
        public JwtBearerJwtRetriever()
        +
        +
      • +
      • +
        +

        JwtBearerJwtRetriever

        +
        public JwtBearerJwtRetriever(org.apache.kafka.common.utils.Time time)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs, + String saslMechanism, + List<AppConfigurationEntry> jaasConfigEntries)
        +
        +
        Specified by:
        +
        configure in interface org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable
        +
        +
        +
      • +
      • +
        +

        retrieve

        +
        public String retrieve() + throws JwtRetrieverException
        +
        Description copied from interface: JwtRetriever
        +
        Retrieves a JWT access token in its serialized three-part form. The implementation + is free to determine how it should be retrieved but should not perform validation + on the result. + + Note: This is a blocking function and callers should be aware that the + implementation may be communicating over a network, with the file system, coordinating + threads, etc. The facility in the LoginModule from + which this is ultimately called does not provide an asynchronous approach.
        +
        +
        Specified by:
        +
        retrieve in interface JwtRetriever
        +
        Returns:
        +
        Non-null JWT access token string
        +
        Throws:
        +
        JwtRetrieverException - Thrown on errors related to IO during retrieval
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close() + throws IOException
        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        Specified by:
        +
        close in interface org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable
        +
        Throws:
        +
        IOException
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/JwtRetriever.html b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/JwtRetriever.html new file mode 100644 index 000000000..4fa1c23ae --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/JwtRetriever.html @@ -0,0 +1,170 @@ + + + + +JwtRetriever (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface JwtRetriever

    +
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Closeable, org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable
    +
    +
    +
    All Known Implementing Classes:
    +
    ClientCredentialsJwtRetriever, DefaultJwtRetriever, FileJwtRetriever, JwtBearerJwtRetriever
    +
    +
    +
    public interface JwtRetriever +extends org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable
    +
    A JwtRetriever is the internal API by which the login module will + retrieve an access token for use in authorization by the broker. The implementation may + involve authentication to a remote system, or it can be as simple as loading the contents + of a file or configuration setting. + + Retrieval is a separate concern from validation, so it isn't necessary for + the JwtRetriever implementation to validate the integrity of the JWT + access token.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Retrieves a JWT access token in its serialized three-part form.
      +
      +
      +
      +
      +
      +

      Methods inherited from interface org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable

      +close, configure
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        retrieve

        +
        String retrieve() + throws JwtRetrieverException
        +
        Retrieves a JWT access token in its serialized three-part form. The implementation + is free to determine how it should be retrieved but should not perform validation + on the result. + + Note: This is a blocking function and callers should be aware that the + implementation may be communicating over a network, with the file system, coordinating + threads, etc. The facility in the LoginModule from + which this is ultimately called does not provide an asynchronous approach.
        +
        +
        Returns:
        +
        Non-null JWT access token string
        +
        Throws:
        +
        JwtRetrieverException - Thrown on errors related to IO during retrieval
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/JwtRetrieverException.html b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/JwtRetrieverException.html new file mode 100644 index 000000000..732ca3764 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/JwtRetrieverException.html @@ -0,0 +1,170 @@ + + + + +JwtRetrieverException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class JwtRetrieverException

    +
    +
    java.lang.Object +
    java.lang.Throwable +
    java.lang.Exception +
    java.lang.RuntimeException +
    org.apache.kafka.common.KafkaException +
    org.apache.kafka.common.security.oauthbearer.JwtRetrieverException
    +
    +
    +
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class JwtRetrieverException +extends KafkaException
    +
    A JwtRetrieverException is thrown in cases where the JWT cannot be retrieved.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        JwtRetrieverException

        +
        public JwtRetrieverException(String message)
        +
        +
      • +
      • +
        +

        JwtRetrieverException

        +
        public JwtRetrieverException(Throwable cause)
        +
        +
      • +
      • +
        +

        JwtRetrieverException

        +
        public JwtRetrieverException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/JwtValidator.html b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/JwtValidator.html new file mode 100644 index 000000000..9661cb75e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/JwtValidator.html @@ -0,0 +1,179 @@ + + + + +JwtValidator (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface JwtValidator

    +
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Closeable, org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable
    +
    +
    +
    All Known Implementing Classes:
    +
    BrokerJwtValidator, ClientJwtValidator, DefaultJwtValidator
    +
    +
    +
    public interface JwtValidator +extends org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable
    +
    An instance of JwtValidator acts as a function object that, given an access + token in base-64 encoded JWT format, can parse the data, perform validation, and construct an + OAuthBearerToken for use by the caller. + + The primary reason for this abstraction is that client and broker may have different libraries + available to them to perform these operations. Additionally, the exact steps for validation may + differ between implementations. To put this more concretely: the implementation in the Kafka + client does not have bundled a robust library to perform this logic, and it is not the + responsibility of the client to perform vigorous validation. However, the Kafka broker ships with + a richer set of library dependencies that can perform more substantial validation and is also + expected to perform a trust-but-verify test of the access token's signature. + + See: + +
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      validate(String accessToken)
      +
      +
      Accepts an OAuth JWT access token in base-64 encoded format, validates, and returns an + OAuthBearerToken.
      +
      +
      +
      +
      +
      +

      Methods inherited from interface org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable

      +close, configure
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/JwtValidatorException.html b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/JwtValidatorException.html new file mode 100644 index 000000000..f48825e58 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/JwtValidatorException.html @@ -0,0 +1,173 @@ + + + + +JwtValidatorException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class JwtValidatorException

    +
    +
    java.lang.Object +
    java.lang.Throwable +
    java.lang.Exception +
    java.lang.RuntimeException +
    org.apache.kafka.common.KafkaException +
    org.apache.kafka.common.security.oauthbearer.JwtValidatorException
    +
    +
    +
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class JwtValidatorException +extends KafkaException
    +
    A JwtValidatorException is thrown in cases where the validity of a JWT cannot be + determined. It is intended to be used when errors arise within the processing of a + CallbackHandler.handle(Callback[]). This error, however, is not thrown from that + method directly.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        JwtValidatorException

        +
        public JwtValidatorException(String message)
        +
        +
      • +
      • +
        +

        JwtValidatorException

        +
        public JwtValidatorException(Throwable cause)
        +
        +
      • +
      • +
        +

        JwtValidatorException

        +
        public JwtValidatorException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/OAuthBearerExtensionsValidatorCallback.html b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/OAuthBearerExtensionsValidatorCallback.html new file mode 100644 index 000000000..482cb7124 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/OAuthBearerExtensionsValidatorCallback.html @@ -0,0 +1,271 @@ + + + + +OAuthBearerExtensionsValidatorCallback (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class OAuthBearerExtensionsValidatorCallback

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.oauthbearer.OAuthBearerExtensionsValidatorCallback
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Callback
    +
    +
    +
    public class OAuthBearerExtensionsValidatorCallback +extends Object +implements Callback
    +
    A Callback for use by the SaslServer implementation when it + needs to validate the SASL extensions for the OAUTHBEARER mechanism + Callback handlers should use the valid(String) + method to communicate valid extensions back to the SASL server. + Callback handlers should use the + error(String, String) method to communicate validation errors back to + the SASL Server. + As per RFC-7628 (https://tools.ietf.org/html/rfc7628#section-3.1), unknown extensions must be ignored by the server. + The callback handler implementation should simply ignore unknown extensions, + not calling error(String, String) nor valid(String). + Callback handlers should communicate other problems by raising an IOException. +

    + The OAuth bearer token is provided in the callback for better context in extension validation. + It is very important that token validation is done in its own OAuthBearerValidatorCallback + irregardless of provided extensions, as they are inherently insecure.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        OAuthBearerExtensionsValidatorCallback

        +
        public OAuthBearerExtensionsValidatorCallback(OAuthBearerToken token, + SaslExtensions extensions)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        token

        +
        public OAuthBearerToken token()
        +
        +
        Returns:
        +
        OAuthBearerToken the OAuth bearer token of the client
        +
        +
        +
      • +
      • +
        +

        inputExtensions

        +
        public SaslExtensions inputExtensions()
        +
        +
        Returns:
        +
        SaslExtensions consisting of the unvalidated extension names and values that were sent by the client
        +
        +
        +
      • +
      • +
        +

        validatedExtensions

        +
        public Map<String,String> validatedExtensions()
        +
        +
        Returns:
        +
        an unmodifiable Map consisting of the validated and recognized by the server extension names and values
        +
        +
        +
      • +
      • +
        +

        invalidExtensions

        +
        public Map<String,String> invalidExtensions()
        +
        +
        Returns:
        +
        An immutable Map consisting of the name->error messages of extensions which failed validation
        +
        +
        +
      • +
      • +
        +

        ignoredExtensions

        +
        public Map<String,String> ignoredExtensions()
        +
        +
        Returns:
        +
        An immutable Map consisting of the extensions that have neither been validated nor invalidated
        +
        +
        +
      • +
      • +
        +

        valid

        +
        public void valid(String extensionName)
        +
        Validates a specific extension in the original inputExtensions map
        +
        +
        Parameters:
        +
        extensionName - - the name of the extension which was validated
        +
        +
        +
      • +
      • +
        +

        error

        +
        public void error(String invalidExtensionName, + String errorMessage)
        +
        Set the error value for a specific extension key-value pair if validation has failed
        +
        +
        Parameters:
        +
        invalidExtensionName - the mandatory extension name which caused the validation failure
        +
        errorMessage - error message describing why the validation failed
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginCallbackHandler.html b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginCallbackHandler.html new file mode 100644 index 000000000..4772b4a86 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginCallbackHandler.html @@ -0,0 +1,454 @@ + + + + +OAuthBearerLoginCallbackHandler (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class OAuthBearerLoginCallbackHandler

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    CallbackHandler, AuthenticateCallbackHandler
    +
    +
    +
    public class OAuthBearerLoginCallbackHandler +extends Object +implements AuthenticateCallbackHandler
    +

    + OAuthBearerLoginCallbackHandler is an AuthenticateCallbackHandler that + accepts OAuthBearerTokenCallback and SaslExtensionsCallback callbacks to + perform the steps to request a JWT from an OAuth/OIDC provider using the + client_credentials. This grant type is commonly used for non-interactive + "service accounts" where there is no user available to interactively supply credentials. +

    + +

    + The OAuthBearerLoginCallbackHandler is used on the client side to retrieve a JWT + and the OAuthBearerValidatorCallbackHandler is used on the broker to validate the JWT + that was sent to it by the client to allow access. Both the brokers and clients will need to + be configured with their appropriate callback handlers and respective configuration for OAuth + functionality to work. +

    + +

    + Note that while this callback handler class must be specified for a Kafka client that wants to + use OAuth functionality, in the case of OAuth-based inter-broker communication, the callback + handler must be used on the Kafka broker side as well. +

    + +

    + This AuthenticateCallbackHandler is enabled by specifying its class name in the Kafka + configuration. For client use, specify the class name in the + SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS + configuration like so: + + + sasl.login.callback.handler.class=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler + +

    + +

    + If using OAuth login on the broker side (for inter-broker communication), the callback handler + class will be specified with a listener-based property: + listener.name..oauthbearer.sasl.login.callback.handler.class like so: + + + listener.name..oauthbearer.sasl.login.callback.handler.class=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler + +

    + +

    + The Kafka configuration must also include JAAS configuration which includes the following + OAuth-specific options: + +

      +
    • clientIdOAuth client ID (required)
    • +
    • clientSecretOAuth client secret (required)
    • +
    • scopeOAuth scope (optional)
    • +
    +

    + +

    + The JAAS configuration can also include any SSL options that are needed. The configuration + options are the same as those specified by the configuration in + SslConfigs.addClientSslSupport(ConfigDef). +

    + +

    + Here's an example of the JAAS configuration for a Kafka client: + + + sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required \ + clientId="foo" \ + clientSecret="bar" \ + scope="baz" \ + ssl.protocol="SSL" ; + +

    + +

    + The configuration option + SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL + is also required in order for the client to contact the OAuth/OIDC provider. For example: + + + sasl.oauthbearer.token.endpoint.url=https://example.com/oauth2/v1/token + + + Please see the OAuth/OIDC providers documentation for the token endpoint URL. +

    + +

    + The following is a list of all the configuration options that are available for the login + callback handler: + +

    +

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        OAuthBearerLoginCallbackHandler

        +
        public OAuthBearerLoginCallbackHandler()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs, + String saslMechanism, + List<AppConfigurationEntry> jaasConfigEntries)
        +
        Description copied from interface: AuthenticateCallbackHandler
        +
        Configures this callback handler for the specified SASL mechanism.
        +
        +
        Specified by:
        +
        configure in interface AuthenticateCallbackHandler
        +
        Parameters:
        +
        configs - Key-value pairs containing the parsed configuration options of + the client or broker. Note that these are the Kafka configuration options + and not the JAAS configuration options. JAAS config options may be obtained + from `jaasConfigEntries` for callbacks which obtain some configs from the + JAAS configuration. For configs that may be specified as both Kafka config + as well as JAAS config (e.g. sasl.kerberos.service.name), the configuration + is treated as invalid if conflicting values are provided.
        +
        saslMechanism - Negotiated SASL mechanism. For clients, this is the SASL + mechanism configured for the client. For brokers, this is the mechanism + negotiated with the client and is one of the mechanisms enabled on the broker.
        +
        jaasConfigEntries - JAAS configuration entries from the JAAS login context. + This list contains a single entry for clients and may contain more than + one entry for brokers if multiple mechanisms are enabled on a listener using + static JAAS configuration where there is no mapping between mechanisms and + login module entries. In this case, callback handlers can use the login module in + `jaasConfigEntries` to identify the entry corresponding to `saslMechanism`. + Alternatively, dynamic JAAS configuration option + SaslConfigs.SASL_JAAS_CONFIG may be + configured on brokers with listener and mechanism prefix, in which case + only the configuration entry corresponding to `saslMechanism` will be provided + in `jaasConfigEntries`.
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close()
        +
        Description copied from interface: AuthenticateCallbackHandler
        +
        Closes this instance.
        +
        +
        Specified by:
        +
        close in interface AuthenticateCallbackHandler
        +
        +
        +
      • +
      • +
        +

        handle

        +
        public void handle(Callback[] callbacks) + throws IOException, +UnsupportedCallbackException
        +
        +
        Specified by:
        +
        handle in interface CallbackHandler
        +
        Throws:
        +
        IOException
        +
        UnsupportedCallbackException
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginModule.html b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginModule.html new file mode 100644 index 000000000..b35dba799 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginModule.html @@ -0,0 +1,468 @@ + + + + +OAuthBearerLoginModule (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class OAuthBearerLoginModule

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    LoginModule
    +
    +
    +
    public class OAuthBearerLoginModule +extends Object +implements LoginModule
    +
    The LoginModule for the SASL/OAUTHBEARER mechanism. When a client + (whether a non-broker client or a broker when SASL/OAUTHBEARER is the + inter-broker protocol) connects to Kafka the OAuthBearerLoginModule + instance asks its configured AuthenticateCallbackHandler + implementation to handle an instance of OAuthBearerTokenCallback and + return an instance of OAuthBearerToken. A default, builtin + AuthenticateCallbackHandler implementation creates an unsecured token + as defined by these JAAS module options: +

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    JAAS Module Option for Unsecured Token RetrievalDocumentation
    unsecuredLoginStringClaim_<claimname>="value"Creates a String claim with the given name and value. Any valid + claim name can be specified except 'iat' and 'exp' (these are + automatically generated).
    unsecuredLoginNumberClaim_<claimname>="value"Creates a Number claim with the given name and value. Any valid + claim name can be specified except 'iat' and 'exp' (these are + automatically generated).
    unsecuredLoginListClaim_<claimname>="value"Creates a String List claim with the given name and values parsed + from the given value where the first character is taken as the delimiter. For + example: unsecuredLoginListClaim_fubar="|value1|value2". Any valid + claim name can be specified except 'iat' and 'exp' (these are + automatically generated).
    unsecuredLoginPrincipalClaimNameSet to a custom claim name if you wish the name of the String + claim holding the principal name to be something other than + 'sub'.
    unsecuredLoginLifetimeSecondsSet to an integer value if the token expiration is to be set to something + other than the default value of 3600 seconds (which is 1 hour). The + 'exp' claim will be set to reflect the expiration time.
    unsecuredLoginScopeClaimNameSet to a custom claim name if you wish the name of the String or + String List claim holding any token scope to be something other than + 'scope'.
    +

    +

    + You can also add custom unsecured SASL extensions when using the default, builtin AuthenticateCallbackHandler + implementation through using the configurable option unsecuredLoginExtension_<extensionname>. Note that there + are validations for the key/values in order to conform to the SASL/OAUTHBEARER standard + (https://tools.ietf.org/html/rfc7628#section-3.1), including the reserved key at + OAuthBearerClientInitialResponse.AUTH_KEY. + The OAuthBearerLoginModule instance also asks its configured AuthenticateCallbackHandler + implementation to handle an instance of SaslExtensionsCallback and return an instance of SaslExtensions. + The configured callback handler does not need to handle this callback, though -- any UnsupportedCallbackException + that is thrown is ignored, and no SASL extensions will be associated with the login. +

    + Production use cases will require writing an implementation of + AuthenticateCallbackHandler that can handle an instance of + OAuthBearerTokenCallback and declaring it via either the + sasl.login.callback.handler.class configuration option for a + non-broker client or via the + listener.name.sasl_ssl.oauthbearer.sasl.login.callback.handler.class + configuration option for brokers (when SASL/OAUTHBEARER is the inter-broker + protocol). +

    + This class stores the retrieved OAuthBearerToken in the + Subject's private credentials where the SaslClient can + retrieve it. An appropriate, builtin SaslClient implementation is + automatically used and configured such that it can perform that retrieval. +

    + Here is a typical, basic JAAS configuration for a client leveraging unsecured + SASL/OAUTHBEARER authentication: + +

    + KafkaClient {
    +      org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule Required
    +      unsecuredLoginStringClaim_sub="thePrincipalName";
    + };
    + 
    + + An implementation of the Login interface specific to the + OAUTHBEARER mechanism is automatically applied; it periodically + refreshes any token before it expires so that the client can continue to make + connections to brokers. The parameters that impact how the refresh algorithm + operates are specified as part of the producer/consumer/broker configuration + and are as follows. See the documentation for these properties elsewhere for + details. +

    + + + + + + + + + + + + + + + + +
    Producer/Consumer/Broker Configuration Property
    sasl.login.refresh.window.factor
    sasl.login.refresh.window.jitter
    sasl.login.refresh.min.period.seconds
    sasl.login.refresh.min.buffer.seconds
    +

    + When a broker accepts a SASL/OAUTHBEARER connection the instance of the + builtin SaslServer implementation asks its configured + AuthenticateCallbackHandler implementation to handle an instance of + OAuthBearerValidatorCallback constructed with the OAuth 2 Bearer + Token's compact serialization and return an instance of + OAuthBearerToken if the value validates. A default, builtin + AuthenticateCallbackHandler implementation validates an unsecured + token as defined by these JAAS module options: +

    + + + + + + + + + + + + + + + + + + + + + +
    JAAS Module Option for Unsecured Token ValidationDocumentation
    unsecuredValidatorPrincipalClaimName="value"Set to a non-empty value if you wish a particular String claim + holding a principal name to be checked for existence; the default is to check + for the existence of the 'sub' claim.
    unsecuredValidatorScopeClaimName="value"Set to a custom claim name if you wish the name of the String or + String List claim holding any token scope to be something other than + 'scope'.
    unsecuredValidatorRequiredScope="value"Set to a space-delimited list of scope values if you wish the + String/String List claim holding the token scope to be checked to + make sure it contains certain values.
    unsecuredValidatorAllowableClockSkewMs="value"Set to a positive integer value if you wish to allow up to some number of + positive milliseconds of clock skew (the default is 0).
    +

    + Here is a typical, basic JAAS configuration for a broker leveraging unsecured + SASL/OAUTHBEARER validation: + +

    + KafkaServer {
    +      org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule Required
    +      unsecuredLoginStringClaim_sub="thePrincipalName";
    + };
    + 
    + + Production use cases will require writing an implementation of + AuthenticateCallbackHandler that can handle an instance of + OAuthBearerValidatorCallback and declaring it via the + listener.name.sasl_ssl.oauthbearer.sasl.server.callback.handler.class + broker configuration option. +

    + The builtin SaslServer implementation for SASL/OAUTHBEARER in Kafka + makes the instance of OAuthBearerToken available upon successful + authentication via the negotiated property "OAUTHBEARER.token"; the + token could be used in a custom authorizer (to authorize based on JWT claims + rather than ACLs, for example). +

    + This implementation's logout() method will logout the specific token + that this instance logged in if it's Subject instance is shared + across multiple LoginContexts and there happen to be multiple tokens + on the Subject. This functionality is useful because it means a new + token with a longer lifetime can be created before a soon-to-expire token is + actually logged out. Otherwise, if multiple simultaneous tokens were not + supported like this, the soon-to-be expired token would have to be logged out + first, and then if the new token could not be retrieved (maybe the + authorization server is temporarily unavailable, for example) the client + would be left without a token and would be unable to create new connections. + Better to mitigate this possibility by leaving the existing token (which + still has some lifetime left) in place until a new replacement token is + actually retrieved. This implementation supports this.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        OAUTHBEARER_MECHANISM

        +
        public static final String OAUTHBEARER_MECHANISM
        +
        The SASL Mechanism name for OAuth 2: OAUTHBEARER
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        OAuthBearerLoginModule

        +
        public OAuthBearerLoginModule()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/OAuthBearerToken.html b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/OAuthBearerToken.html new file mode 100644 index 000000000..6ed3c02d0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/OAuthBearerToken.html @@ -0,0 +1,246 @@ + + + + +OAuthBearerToken (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface OAuthBearerToken

    +
    +
    +
    +
    public interface OAuthBearerToken
    +
    The b64token value as defined in + RFC 6750 Section + 2.1 along with the token's specific scope and lifetime and principal + name. +

    + A network request would be required to re-hydrate an opaque token, and that + could result in (for example) an IOException, but retrievers for + various attributes (scope(), lifetimeMs(), etc.) declare no + exceptions. Therefore, if a network request is required for any of these + retriever methods, that request could be performed at construction time so + that the various attributes can be reliably provided thereafter. For example, + a constructor might declare throws IOException in such a case. + Alternatively, the retrievers could throw unchecked exceptions. +

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        value

        +
        String value()
        +
        The b64token value as defined in + RFC 6750 Section + 2.1
        +
        +
        Returns:
        +
        b64token value as defined in + RFC 6750 + Section 2.1
        +
        +
        +
      • +
      • +
        +

        scope

        +
        Set<String> scope()
        +
        The token's scope of access, as per + RFC 6749 Section + 1.4
        +
        +
        Returns:
        +
        the token's (always non-null but potentially empty) scope of access, + as per RFC + 6749 Section 1.4. Note that all values in the returned set will + be trimmed of preceding and trailing whitespace, and the result will + never contain the empty string.
        +
        +
        +
      • +
      • +
        +

        lifetimeMs

        +
        long lifetimeMs()
        +
        The token's lifetime, expressed as the number of milliseconds since the + epoch, as per RFC + 6749 Section 1.4
        +
        +
        Returns:
        +
        the token's lifetime, expressed as the number of milliseconds since + the epoch, as per + RFC 6749 + Section 1.4.
        +
        +
        +
      • +
      • +
        +

        principalName

        +
        String principalName()
        +
        The name of the principal to which this credential applies
        +
        +
        Returns:
        +
        the always non-null/non-empty principal name
        +
        +
        +
      • +
      • +
        +

        startTimeMs

        +
        Long startTimeMs()
        +
        When the credential became valid, in terms of the number of milliseconds + since the epoch, if known, otherwise null. An expiring credential may not + necessarily indicate when it was created -- just when it expires -- so we + need to support a null return value here.
        +
        +
        Returns:
        +
        the time when the credential became valid, in terms of the number of + milliseconds since the epoch, if known, otherwise null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/OAuthBearerTokenCallback.html b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/OAuthBearerTokenCallback.html new file mode 100644 index 000000000..39020feb9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/OAuthBearerTokenCallback.html @@ -0,0 +1,279 @@ + + + + +OAuthBearerTokenCallback (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class OAuthBearerTokenCallback

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.oauthbearer.OAuthBearerTokenCallback
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Callback
    +
    +
    +
    public class OAuthBearerTokenCallback +extends Object +implements Callback
    +
    A Callback for use by the SaslClient and Login + implementations when they require an OAuth 2 bearer token. Callback handlers + should use the error(String, String, String) method to communicate + errors returned by the authorization server as per + RFC 6749: The OAuth + 2.0 Authorization Framework. Callback handlers should communicate other + problems by raising an IOException.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        OAuthBearerTokenCallback

        +
        public OAuthBearerTokenCallback()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        token

        +
        public OAuthBearerToken token()
        +
        Return the (potentially null) token
        +
        +
        Returns:
        +
        the (potentially null) token
        +
        +
        +
      • +
      • +
        +

        errorCode

        +
        public String errorCode()
        +
        Return the optional (but always non-empty if not null) error code as per + RFC 6749: The OAuth + 2.0 Authorization Framework.
        +
        +
        Returns:
        +
        the optional (but always non-empty if not null) error code
        +
        +
        +
      • +
      • +
        +

        errorDescription

        +
        public String errorDescription()
        +
        Return the (potentially null) error description as per + RFC 6749: The OAuth + 2.0 Authorization Framework.
        +
        +
        Returns:
        +
        the (potentially null) error description
        +
        +
        +
      • +
      • +
        +

        errorUri

        +
        public String errorUri()
        +
        Return the (potentially null) error URI as per + RFC 6749: The OAuth + 2.0 Authorization Framework.
        +
        +
        Returns:
        +
        the (potentially null) error URI
        +
        +
        +
      • +
      • +
        +

        token

        +
        public void token(OAuthBearerToken token)
        +
        Set the token. All error-related values are cleared.
        +
        +
        Parameters:
        +
        token - the optional token to set
        +
        +
        +
      • +
      • +
        +

        error

        +
        public void error(String errorCode, + String errorDescription, + String errorUri)
        +
        Set the error values as per + RFC 6749: The OAuth + 2.0 Authorization Framework. Any token is cleared.
        +
        +
        Parameters:
        +
        errorCode - the mandatory error code to set
        +
        errorDescription - the optional error description to set
        +
        errorUri - the optional error URI to set
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallback.html b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallback.html new file mode 100644 index 000000000..de5233e9c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallback.html @@ -0,0 +1,311 @@ + + + + +OAuthBearerValidatorCallback (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class OAuthBearerValidatorCallback

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallback
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Callback
    +
    +
    +
    public class OAuthBearerValidatorCallback +extends Object +implements Callback
    +
    A Callback for use by the SaslServer implementation when it + needs to provide an OAuth 2 bearer token compact serialization for + validation. Callback handlers should use the + error(String, String, String) method to communicate errors back to + the SASL Client as per + RFC 6749: The OAuth + 2.0 Authorization Framework and the IANA + OAuth Extensions Error Registry. Callback handlers should communicate + other problems by raising an IOException.
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandler.html b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandler.html new file mode 100644 index 000000000..1127b49a5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandler.html @@ -0,0 +1,288 @@ + + + + +OAuthBearerValidatorCallbackHandler (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class OAuthBearerValidatorCallbackHandler

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallbackHandler
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    CallbackHandler, AuthenticateCallbackHandler
    +
    +
    +
    public class OAuthBearerValidatorCallbackHandler +extends Object +implements AuthenticateCallbackHandler
    +

    + OAuthBearerValidatorCallbackHandler is an AuthenticateCallbackHandler that + accepts OAuthBearerValidatorCallback and OAuthBearerExtensionsValidatorCallback + callbacks to implement OAuth/OIDC validation. This callback handler is intended only to be used + on the Kafka broker side as it will receive a OAuthBearerValidatorCallback that includes + the JWT provided by the Kafka client. That JWT is validated in terms of format, expiration, + signature, and audience and issuer (if desired). This callback handler is the broker side of the + OAuth functionality, whereas OAuthBearerLoginCallbackHandler is used by clients. +

    + +

    + This AuthenticateCallbackHandler is enabled in the broker configuration by setting the + BrokerSecurityConfigs.SASL_SERVER_CALLBACK_HANDLER_CLASS_CONFIG + like so: + + + listener.name..oauthbearer.sasl.server.callback.handler.class=org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallbackHandler + +

    + +

    + The JAAS configuration for OAuth is also needed. If using OAuth for inter-broker communication, + the options are those specified in OAuthBearerLoginCallbackHandler. +

    + +

    + The configuration option + SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_URL + is also required in order to contact the OAuth/OIDC provider to retrieve the JWKS for use in + JWT signature validation. For example: + + + listener.name..oauthbearer.sasl.oauthbearer.jwks.endpoint.url=https://example.com/oauth2/v1/keys + + + Please see the OAuth/OIDC providers documentation for the JWKS endpoint URL. +

    + +

    + The following is a list of all the configuration options that are available for the broker + validation callback handler: + +

    +

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        OAuthBearerValidatorCallbackHandler

        +
        public OAuthBearerValidatorCallbackHandler()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs, + String saslMechanism, + List<AppConfigurationEntry> jaasConfigEntries)
        +
        Description copied from interface: AuthenticateCallbackHandler
        +
        Configures this callback handler for the specified SASL mechanism.
        +
        +
        Specified by:
        +
        configure in interface AuthenticateCallbackHandler
        +
        Parameters:
        +
        configs - Key-value pairs containing the parsed configuration options of + the client or broker. Note that these are the Kafka configuration options + and not the JAAS configuration options. JAAS config options may be obtained + from `jaasConfigEntries` for callbacks which obtain some configs from the + JAAS configuration. For configs that may be specified as both Kafka config + as well as JAAS config (e.g. sasl.kerberos.service.name), the configuration + is treated as invalid if conflicting values are provided.
        +
        saslMechanism - Negotiated SASL mechanism. For clients, this is the SASL + mechanism configured for the client. For brokers, this is the mechanism + negotiated with the client and is one of the mechanisms enabled on the broker.
        +
        jaasConfigEntries - JAAS configuration entries from the JAAS login context. + This list contains a single entry for clients and may contain more than + one entry for brokers if multiple mechanisms are enabled on a listener using + static JAAS configuration where there is no mapping between mechanisms and + login module entries. In this case, callback handlers can use the login module in + `jaasConfigEntries` to identify the entry corresponding to `saslMechanism`. + Alternatively, dynamic JAAS configuration option + SaslConfigs.SASL_JAAS_CONFIG may be + configured on brokers with listener and mechanism prefix, in which case + only the configuration entry corresponding to `saslMechanism` will be provided + in `jaasConfigEntries`.
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close()
        +
        Description copied from interface: AuthenticateCallbackHandler
        +
        Closes this instance.
        +
        +
        Specified by:
        +
        close in interface AuthenticateCallbackHandler
        +
        +
        +
      • +
      • +
        +

        handle

        +
        public void handle(Callback[] callbacks) + throws IOException, +UnsupportedCallbackException
        +
        +
        Specified by:
        +
        handle in interface CallbackHandler
        +
        Throws:
        +
        IOException
        +
        UnsupportedCallbackException
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/package-summary.html b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/package-summary.html new file mode 100644 index 000000000..d14a0e3cf --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/package-summary.html @@ -0,0 +1,200 @@ + + + + +org.apache.kafka.common.security.oauthbearer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.common.security.oauthbearer

    +
    +
    +
    package org.apache.kafka.common.security.oauthbearer
    +
    +
    Provides a LoginModule for using OAuth Bearer Token authentication with Kafka clusters.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/package-tree.html b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/package-tree.html new file mode 100644 index 000000000..de8a488f7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/oauthbearer/package-tree.html @@ -0,0 +1,122 @@ + + + + +org.apache.kafka.common.security.oauthbearer Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.common.security.oauthbearer

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/plain/PlainAuthenticateCallback.html b/static/41/javadoc/org/apache/kafka/common/security/plain/PlainAuthenticateCallback.html new file mode 100644 index 000000000..5b1fa94ad --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/plain/PlainAuthenticateCallback.html @@ -0,0 +1,203 @@ + + + + +PlainAuthenticateCallback (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class PlainAuthenticateCallback

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.plain.PlainAuthenticateCallback
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Callback
    +
    +
    +
    public class PlainAuthenticateCallback +extends Object +implements Callback
    +
    +
    +
      + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      +
      PlainAuthenticateCallback(char[] password)
      +
      +
      Creates a callback with the password provided by the client
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      boolean
      + +
      +
      Returns true if client password matches expected password, false otherwise.
      +
      +
      void
      +
      authenticated(boolean authenticated)
      +
      +
      Sets the authenticated state.
      +
      +
      char[]
      + +
      +
      Returns the password provided by the client during SASL/PLAIN authentication
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        PlainAuthenticateCallback

        +
        public PlainAuthenticateCallback(char[] password)
        +
        Creates a callback with the password provided by the client
        +
        +
        Parameters:
        +
        password - The password provided by the client during SASL/PLAIN authentication
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        password

        +
        public char[] password()
        +
        Returns the password provided by the client during SASL/PLAIN authentication
        +
        +
      • +
      • +
        +

        authenticated

        +
        public boolean authenticated()
        +
        Returns true if client password matches expected password, false otherwise. + This state is set the server-side callback handler.
        +
        +
      • +
      • +
        +

        authenticated

        +
        public void authenticated(boolean authenticated)
        +
        Sets the authenticated state. This is set by the server-side callback handler + by matching the client provided password with the expected password.
        +
        +
        Parameters:
        +
        authenticated - true indicates successful authentication
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/plain/PlainLoginModule.html b/static/41/javadoc/org/apache/kafka/common/security/plain/PlainLoginModule.html new file mode 100644 index 000000000..0f30b41f6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/plain/PlainLoginModule.html @@ -0,0 +1,225 @@ + + + + +PlainLoginModule (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class PlainLoginModule

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.plain.PlainLoginModule
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    LoginModule
    +
    +
    +
    public class PlainLoginModule +extends Object +implements LoginModule
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        PlainLoginModule

        +
        public PlainLoginModule()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/plain/package-summary.html b/static/41/javadoc/org/apache/kafka/common/security/plain/package-summary.html new file mode 100644 index 000000000..c50ed30dd --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/plain/package-summary.html @@ -0,0 +1,87 @@ + + + + +org.apache.kafka.common.security.plain (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.common.security.plain

    +
    +
    +
    package org.apache.kafka.common.security.plain
    +
    +
    Provides implementation to use plaintext credentials authentication for securing Kafka clusters.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/plain/package-tree.html b/static/41/javadoc/org/apache/kafka/common/security/plain/package-tree.html new file mode 100644 index 000000000..4575f48dc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/plain/package-tree.html @@ -0,0 +1,72 @@ + + + + +org.apache.kafka.common.security.plain Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.common.security.plain

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/scram/ScramCredential.html b/static/41/javadoc/org/apache/kafka/common/security/scram/ScramCredential.html new file mode 100644 index 000000000..4867d438a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/scram/ScramCredential.html @@ -0,0 +1,209 @@ + + + + +ScramCredential (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ScramCredential

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.scram.ScramCredential
    +
    +
    +
    +
    public class ScramCredential +extends Object
    +
    SCRAM credential class that encapsulates the credential data persisted for each user that is + accessible to the server. See RFC rfc5802 + for details.
    +
    +
    +
      + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      +
      ScramCredential(byte[] salt, + byte[] storedKey, + byte[] serverKey, + int iterations)
      +
      +
      Constructs a new credential.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      int
      + +
      +
      Number of iterations used to process this credential using the SCRAM algorithm.
      +
      +
      byte[]
      + +
      +
      Returns the salt used to process this credential using the SCRAM algorithm.
      +
      +
      byte[]
      + +
      +
      Server key computed from the client password using the SCRAM algorithm.
      +
      +
      byte[]
      + +
      +
      Stored key computed from the client password using the SCRAM algorithm.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ScramCredential

        +
        public ScramCredential(byte[] salt, + byte[] storedKey, + byte[] serverKey, + int iterations)
        +
        Constructs a new credential.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        salt

        +
        public byte[] salt()
        +
        Returns the salt used to process this credential using the SCRAM algorithm.
        +
        +
      • +
      • +
        +

        serverKey

        +
        public byte[] serverKey()
        +
        Server key computed from the client password using the SCRAM algorithm.
        +
        +
      • +
      • +
        +

        storedKey

        +
        public byte[] storedKey()
        +
        Stored key computed from the client password using the SCRAM algorithm.
        +
        +
      • +
      • +
        +

        iterations

        +
        public int iterations()
        +
        Number of iterations used to process this credential using the SCRAM algorithm.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/scram/ScramCredentialCallback.html b/static/41/javadoc/org/apache/kafka/common/security/scram/ScramCredentialCallback.html new file mode 100644 index 000000000..4d1dbc5c2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/scram/ScramCredentialCallback.html @@ -0,0 +1,179 @@ + + + + +ScramCredentialCallback (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ScramCredentialCallback

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.scram.ScramCredentialCallback
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Callback
    +
    +
    +
    public class ScramCredentialCallback +extends Object +implements Callback
    +
    Callback used for SCRAM mechanisms.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ScramCredentialCallback

        +
        public ScramCredentialCallback()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        scramCredential

        +
        public void scramCredential(ScramCredential scramCredential)
        +
        Sets the SCRAM credential for this instance.
        +
        +
      • +
      • +
        +

        scramCredential

        +
        public ScramCredential scramCredential()
        +
        Returns the SCRAM credential if set on this instance.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/scram/ScramExtensionsCallback.html b/static/41/javadoc/org/apache/kafka/common/security/scram/ScramExtensionsCallback.html new file mode 100644 index 000000000..5bf7dd93d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/scram/ScramExtensionsCallback.html @@ -0,0 +1,183 @@ + + + + +ScramExtensionsCallback (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ScramExtensionsCallback

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.scram.ScramExtensionsCallback
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Callback
    +
    +
    +
    public class ScramExtensionsCallback +extends Object +implements Callback
    +
    Optional callback used for SCRAM mechanisms if any extensions need to be set + in the SASL/SCRAM exchange.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ScramExtensionsCallback

        +
        public ScramExtensionsCallback()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        extensions

        +
        public Map<String,String> extensions()
        +
        Returns map of the extension names and values that are sent by the client to + the server in the initial client SCRAM authentication message. + Default is an empty unmodifiable map.
        +
        +
      • +
      • +
        +

        extensions

        +
        public void extensions(Map<String,String> extensions)
        +
        Sets the SCRAM extensions on this callback. Maps passed in should be unmodifiable
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/scram/ScramLoginModule.html b/static/41/javadoc/org/apache/kafka/common/security/scram/ScramLoginModule.html new file mode 100644 index 000000000..9d6f19e2c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/scram/ScramLoginModule.html @@ -0,0 +1,262 @@ + + + + +ScramLoginModule (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ScramLoginModule

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.scram.ScramLoginModule
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    LoginModule
    +
    +
    +
    public class ScramLoginModule +extends Object +implements LoginModule
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ScramLoginModule

        +
        public ScramLoginModule()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/scram/package-summary.html b/static/41/javadoc/org/apache/kafka/common/security/scram/package-summary.html new file mode 100644 index 000000000..8937e05bf --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/scram/package-summary.html @@ -0,0 +1,99 @@ + + + + +org.apache.kafka.common.security.scram (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.common.security.scram

    +
    +
    +
    package org.apache.kafka.common.security.scram
    +
    +
    Provides adaptor to use the Salted Challenge Response Authentication Mechanism for securing Kafka clusters.
    +
    +
    +
      +
    • +
      +
      Classes
      +
      +
      Class
      +
      Description
      + +
      +
      SCRAM credential class that encapsulates the credential data persisted for each user that is + accessible to the server.
      +
      + +
      +
      Callback used for SCRAM mechanisms.
      +
      + +
      +
      Optional callback used for SCRAM mechanisms if any extensions need to be set + in the SASL/SCRAM exchange.
      +
      + +
       
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/scram/package-tree.html b/static/41/javadoc/org/apache/kafka/common/security/scram/package-tree.html new file mode 100644 index 000000000..03550808a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/scram/package-tree.html @@ -0,0 +1,74 @@ + + + + +org.apache.kafka.common.security.scram Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.common.security.scram

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/token/delegation/DelegationToken.html b/static/41/javadoc/org/apache/kafka/common/security/token/delegation/DelegationToken.html new file mode 100644 index 000000000..ded2df4c6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/token/delegation/DelegationToken.html @@ -0,0 +1,218 @@ + + + + +DelegationToken (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DelegationToken

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.token.delegation.DelegationToken
    +
    +
    +
    +
    public class DelegationToken +extends Object
    +
    A class representing a delegation token.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DelegationToken

        +
        public DelegationToken(TokenInformation tokenInformation, + byte[] hmac)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        tokenInfo

        +
        public TokenInformation tokenInfo()
        +
        +
      • +
      • +
        +

        hmac

        +
        public byte[] hmac()
        +
        +
      • +
      • +
        +

        hmacAsBase64String

        +
        public String hmacAsBase64String()
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/token/delegation/TokenInformation.html b/static/41/javadoc/org/apache/kafka/common/security/token/delegation/TokenInformation.html new file mode 100644 index 000000000..ecd6f8bfa --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/token/delegation/TokenInformation.html @@ -0,0 +1,348 @@ + + + + +TokenInformation (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TokenInformation

    +
    +
    java.lang.Object +
    org.apache.kafka.common.security.token.delegation.TokenInformation
    +
    +
    +
    +
    public class TokenInformation +extends Object
    +
    A class representing a delegation token details.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        fromRecord

        +
        public static TokenInformation fromRecord(String tokenId, + KafkaPrincipal owner, + KafkaPrincipal tokenRequester, + Collection<KafkaPrincipal> renewers, + long issueTimestamp, + long maxTimestamp, + long expiryTimestamp)
        +
        +
      • +
      • +
        +

        owner

        +
        public KafkaPrincipal owner()
        +
        +
      • +
      • +
        +

        ownerAsString

        +
        public String ownerAsString()
        +
        +
      • +
      • +
        +

        tokenRequester

        +
        public KafkaPrincipal tokenRequester()
        +
        +
      • +
      • +
        +

        tokenRequesterAsString

        +
        public String tokenRequesterAsString()
        +
        +
      • +
      • +
        +

        renewers

        +
        public Collection<KafkaPrincipal> renewers()
        +
        +
      • +
      • +
        +

        renewersAsString

        +
        public Collection<String> renewersAsString()
        +
        +
      • +
      • +
        +

        issueTimestamp

        +
        public long issueTimestamp()
        +
        +
      • +
      • +
        +

        expiryTimestamp

        +
        public long expiryTimestamp()
        +
        +
      • +
      • +
        +

        setExpiryTimestamp

        +
        public void setExpiryTimestamp(long expiryTimestamp)
        +
        +
      • +
      • +
        +

        tokenId

        +
        public String tokenId()
        +
        +
      • +
      • +
        +

        maxTimestamp

        +
        public long maxTimestamp()
        +
        +
      • +
      • +
        +

        ownerOrRenewer

        +
        public boolean ownerOrRenewer(KafkaPrincipal principal)
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/token/delegation/package-summary.html b/static/41/javadoc/org/apache/kafka/common/security/token/delegation/package-summary.html new file mode 100644 index 000000000..85d09462e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/token/delegation/package-summary.html @@ -0,0 +1,91 @@ + + + + +org.apache.kafka.common.security.token.delegation (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.common.security.token.delegation

    +
    +
    +
    package org.apache.kafka.common.security.token.delegation
    +
    +
    Provides mechanism for delegating authorization to a distinct Principal for securing Kafka clusters.
    +
    +
    +
      +
    • +
      +
      Classes
      +
      +
      Class
      +
      Description
      + +
      +
      A class representing a delegation token.
      +
      + +
      +
      A class representing a delegation token details.
      +
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/security/token/delegation/package-tree.html b/static/41/javadoc/org/apache/kafka/common/security/token/delegation/package-tree.html new file mode 100644 index 000000000..cd3b0246a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/security/token/delegation/package-tree.html @@ -0,0 +1,72 @@ + + + + +org.apache.kafka.common.security.token.delegation Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.common.security.token.delegation

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/BooleanDeserializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/BooleanDeserializer.html new file mode 100644 index 000000000..485f120ba --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/BooleanDeserializer.html @@ -0,0 +1,223 @@ + + + + +BooleanDeserializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class BooleanDeserializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.BooleanDeserializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Deserializer<Boolean>
    +
    +
    +
    public class BooleanDeserializer +extends Object +implements Deserializer<Boolean>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        BooleanDeserializer

        +
        public BooleanDeserializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        deserialize

        +
        public Boolean deserialize(String topic, + byte[] data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a byte array into a value or object. + +

        It is recommended to deserialize a null byte array to a null object.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<Boolean>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        data - serialized bytes; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      • +
        +

        deserialize

        +
        public Boolean deserialize(String topic, + Headers headers, + ByteBuffer data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a ByteBuffer into a value or object. + +

        If ByteBufferDeserializer is used by an application, the application code cannot make any assumptions + about the returned ByteBuffer like the position, limit, capacity, etc., or if it is backed by + an array or not. + +

        Similarly, if this method is overridden, the implementation cannot make any assumptions about the + passed in ByteBuffer either. + +

        It is recommended to deserialize a null ByteBuffer to a null object. + +

        Note that the passed in Headers may be empty, but never null. + The implementation is allowed to modify the passed in headers, as a side effect of deserialization. + It is considered best practice to not delete or modify existing headers, but rather only add new ones.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<Boolean>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        headers - headers associated with the record
        +
        data - serialized ByteBuffer; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/BooleanSerializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/BooleanSerializer.html new file mode 100644 index 000000000..6a328f2dc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/BooleanSerializer.html @@ -0,0 +1,183 @@ + + + + +BooleanSerializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class BooleanSerializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.BooleanSerializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serializer<Boolean>
    +
    +
    +
    public class BooleanSerializer +extends Object +implements Serializer<Boolean>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        BooleanSerializer

        +
        public BooleanSerializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        serialize

        +
        public byte[] serialize(String topic, + Boolean data)
        +
        Description copied from interface: Serializer
        +
        Convert data into a byte array. + +

        It is recommended to serialize null data to the null byte array.

        +
        +
        Specified by:
        +
        serialize in interface Serializer<Boolean>
        +
        Parameters:
        +
        topic - topic associated with data
        +
        data - typed data; may be null
        +
        Returns:
        +
        serialized bytes; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/ByteArrayDeserializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/ByteArrayDeserializer.html new file mode 100644 index 000000000..7ac761ffc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/ByteArrayDeserializer.html @@ -0,0 +1,183 @@ + + + + +ByteArrayDeserializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ByteArrayDeserializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.ByteArrayDeserializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Deserializer<byte[]>
    +
    +
    +
    public class ByteArrayDeserializer +extends Object +implements Deserializer<byte[]>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ByteArrayDeserializer

        +
        public ByteArrayDeserializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        deserialize

        +
        public byte[] deserialize(String topic, + byte[] data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a byte array into a value or object. + +

        It is recommended to deserialize a null byte array to a null object.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<byte[]>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        data - serialized bytes; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/ByteArraySerializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/ByteArraySerializer.html new file mode 100644 index 000000000..d1b68cf75 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/ByteArraySerializer.html @@ -0,0 +1,183 @@ + + + + +ByteArraySerializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ByteArraySerializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.ByteArraySerializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serializer<byte[]>
    +
    +
    +
    public class ByteArraySerializer +extends Object +implements Serializer<byte[]>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ByteArraySerializer

        +
        public ByteArraySerializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        serialize

        +
        public byte[] serialize(String topic, + byte[] data)
        +
        Description copied from interface: Serializer
        +
        Convert data into a byte array. + +

        It is recommended to serialize null data to the null byte array.

        +
        +
        Specified by:
        +
        serialize in interface Serializer<byte[]>
        +
        Parameters:
        +
        topic - topic associated with data
        +
        data - typed data; may be null
        +
        Returns:
        +
        serialized bytes; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/ByteBufferDeserializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/ByteBufferDeserializer.html new file mode 100644 index 000000000..772f1f15e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/ByteBufferDeserializer.html @@ -0,0 +1,223 @@ + + + + +ByteBufferDeserializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ByteBufferDeserializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.ByteBufferDeserializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Deserializer<ByteBuffer>
    +
    +
    +
    public class ByteBufferDeserializer +extends Object +implements Deserializer<ByteBuffer>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ByteBufferDeserializer

        +
        public ByteBufferDeserializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        deserialize

        +
        public ByteBuffer deserialize(String topic, + byte[] data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a byte array into a value or object. + +

        It is recommended to deserialize a null byte array to a null object.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<ByteBuffer>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        data - serialized bytes; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      • +
        +

        deserialize

        +
        public ByteBuffer deserialize(String topic, + Headers headers, + ByteBuffer data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a ByteBuffer into a value or object. + +

        If ByteBufferDeserializer is used by an application, the application code cannot make any assumptions + about the returned ByteBuffer like the position, limit, capacity, etc., or if it is backed by + an array or not. + +

        Similarly, if this method is overridden, the implementation cannot make any assumptions about the + passed in ByteBuffer either. + +

        It is recommended to deserialize a null ByteBuffer to a null object. + +

        Note that the passed in Headers may be empty, but never null. + The implementation is allowed to modify the passed in headers, as a side effect of deserialization. + It is considered best practice to not delete or modify existing headers, but rather only add new ones.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<ByteBuffer>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        headers - headers associated with the record
        +
        data - serialized ByteBuffer; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/ByteBufferSerializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/ByteBufferSerializer.html new file mode 100644 index 000000000..1efdd9160 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/ByteBufferSerializer.html @@ -0,0 +1,190 @@ + + + + +ByteBufferSerializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ByteBufferSerializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.ByteBufferSerializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serializer<ByteBuffer>
    +
    +
    +
    public class ByteBufferSerializer +extends Object +implements Serializer<ByteBuffer>
    +
    ByteBufferSerializer always rewinds the position of the input buffer to zero for + serialization. A manual rewind is not necessary. +

    + Note: any existing buffer position is ignored. +

    + The position is also rewound back to zero before serialize(String, ByteBuffer) + returns.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ByteBufferSerializer

        +
        public ByteBufferSerializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        serialize

        +
        public byte[] serialize(String topic, + ByteBuffer data)
        +
        Description copied from interface: Serializer
        +
        Convert data into a byte array. + +

        It is recommended to serialize null data to the null byte array.

        +
        +
        Specified by:
        +
        serialize in interface Serializer<ByteBuffer>
        +
        Parameters:
        +
        topic - topic associated with data
        +
        data - typed data; may be null
        +
        Returns:
        +
        serialized bytes; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/BytesDeserializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/BytesDeserializer.html new file mode 100644 index 000000000..63570e245 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/BytesDeserializer.html @@ -0,0 +1,183 @@ + + + + +BytesDeserializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class BytesDeserializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.BytesDeserializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Deserializer<org.apache.kafka.common.utils.Bytes>
    +
    +
    +
    public class BytesDeserializer +extends Object +implements Deserializer<org.apache.kafka.common.utils.Bytes>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        BytesDeserializer

        +
        public BytesDeserializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        deserialize

        +
        public org.apache.kafka.common.utils.Bytes deserialize(String topic, + byte[] data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a byte array into a value or object. + +

        It is recommended to deserialize a null byte array to a null object.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<org.apache.kafka.common.utils.Bytes>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        data - serialized bytes; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/BytesSerializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/BytesSerializer.html new file mode 100644 index 000000000..a78f12f18 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/BytesSerializer.html @@ -0,0 +1,183 @@ + + + + +BytesSerializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class BytesSerializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.BytesSerializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serializer<org.apache.kafka.common.utils.Bytes>
    +
    +
    +
    public class BytesSerializer +extends Object +implements Serializer<org.apache.kafka.common.utils.Bytes>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        BytesSerializer

        +
        public BytesSerializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        serialize

        +
        public byte[] serialize(String topic, + org.apache.kafka.common.utils.Bytes data)
        +
        Description copied from interface: Serializer
        +
        Convert data into a byte array. + +

        It is recommended to serialize null data to the null byte array.

        +
        +
        Specified by:
        +
        serialize in interface Serializer<org.apache.kafka.common.utils.Bytes>
        +
        Parameters:
        +
        topic - topic associated with data
        +
        data - typed data; may be null
        +
        Returns:
        +
        serialized bytes; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/Deserializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/Deserializer.html new file mode 100644 index 000000000..69c90965d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/Deserializer.html @@ -0,0 +1,265 @@ + + + + +Deserializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Deserializer<T>

    +
    +
    +
    +
    Type Parameters:
    +
    T - Type to be deserialized into.
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Closeable
    +
    +
    +
    All Known Implementing Classes:
    +
    BooleanDeserializer, ByteArrayDeserializer, ByteBufferDeserializer, BytesDeserializer, DoubleDeserializer, FloatDeserializer, IntegerDeserializer, ListDeserializer, LongDeserializer, SessionWindowedDeserializer, ShortDeserializer, StringDeserializer, TimeWindowedDeserializer, UUIDDeserializer, VoidDeserializer
    +
    +
    +
    public interface Deserializer<T> +extends Closeable
    +
    An interface for converting bytes to objects. + A class that implements this interface is expected to have a constructor with no parameters. + +

    This interface can be combined with ClusterResourceListener + to receive cluster metadata once it's available, as well as Monitorable + to enable the deserializer to register metrics. For the latter, the following tags are automatically added to + all metrics registered: config set to either key.deserializer or value.deserializer, + and class set to the deserializer class name.

    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      default void
      + +
      +
      Close this deserializer.
      +
      +
      default void
      +
      configure(Map<String,?> configs, + boolean isKey)
      +
      +
      Configure this class.
      +
      + +
      deserialize(String topic, + byte[] data)
      +
      +
      Deserialize a record value from a byte array into a value or object.
      +
      +
      default T
      +
      deserialize(String topic, + Headers headers, + byte[] data)
      +
      +
      Deserialize a record value from a byte array into a value or object.
      +
      +
      default T
      +
      deserialize(String topic, + Headers headers, + ByteBuffer data)
      +
      +
      Deserialize a record value from a ByteBuffer into a value or object.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        default void configure(Map<String,?> configs, + boolean isKey)
        +
        Configure this class.
        +
        +
        Parameters:
        +
        configs - configs in key/value pairs
        +
        isKey - whether the deserializer is used for the key or the value
        +
        +
        +
      • +
      • +
        +

        deserialize

        +
        T deserialize(String topic, + byte[] data)
        +
        Deserialize a record value from a byte array into a value or object. + +

        It is recommended to deserialize a null byte array to a null object.

        +
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        data - serialized bytes; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      • +
        +

        deserialize

        +
        default T deserialize(String topic, + Headers headers, + byte[] data)
        +
        Deserialize a record value from a byte array into a value or object. + +

        It is recommended to deserialize a null byte array to a null object. + +

        Note that the passed in Headers may be empty, but never null. + The implementation is allowed to modify the passed in headers, as a side effect of deserialization. + It is considered best practice to not delete or modify existing headers, but rather only add new ones.

        +
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        headers - headers associated with the record
        +
        data - serialized bytes; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      • +
        +

        deserialize

        +
        default T deserialize(String topic, + Headers headers, + ByteBuffer data)
        +
        Deserialize a record value from a ByteBuffer into a value or object. + +

        If ByteBufferDeserializer is used by an application, the application code cannot make any assumptions + about the returned ByteBuffer like the position, limit, capacity, etc., or if it is backed by + an array or not. + +

        Similarly, if this method is overridden, the implementation cannot make any assumptions about the + passed in ByteBuffer either. + +

        It is recommended to deserialize a null ByteBuffer to a null object. + +

        Note that the passed in Headers may be empty, but never null. + The implementation is allowed to modify the passed in headers, as a side effect of deserialization. + It is considered best practice to not delete or modify existing headers, but rather only add new ones.

        +
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        headers - headers associated with the record
        +
        data - serialized ByteBuffer; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      • +
        +

        close

        +
        default void close()
        +
        Close this deserializer. + +

        This method must be idempotent as it may be called multiple times.

        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/DoubleDeserializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/DoubleDeserializer.html new file mode 100644 index 000000000..fc9ec56c0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/DoubleDeserializer.html @@ -0,0 +1,223 @@ + + + + +DoubleDeserializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DoubleDeserializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.DoubleDeserializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Deserializer<Double>
    +
    +
    +
    public class DoubleDeserializer +extends Object +implements Deserializer<Double>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DoubleDeserializer

        +
        public DoubleDeserializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        deserialize

        +
        public Double deserialize(String topic, + byte[] data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a byte array into a value or object. + +

        It is recommended to deserialize a null byte array to a null object.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<Double>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        data - serialized bytes; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      • +
        +

        deserialize

        +
        public Double deserialize(String topic, + Headers headers, + ByteBuffer data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a ByteBuffer into a value or object. + +

        If ByteBufferDeserializer is used by an application, the application code cannot make any assumptions + about the returned ByteBuffer like the position, limit, capacity, etc., or if it is backed by + an array or not. + +

        Similarly, if this method is overridden, the implementation cannot make any assumptions about the + passed in ByteBuffer either. + +

        It is recommended to deserialize a null ByteBuffer to a null object. + +

        Note that the passed in Headers may be empty, but never null. + The implementation is allowed to modify the passed in headers, as a side effect of deserialization. + It is considered best practice to not delete or modify existing headers, but rather only add new ones.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<Double>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        headers - headers associated with the record
        +
        data - serialized ByteBuffer; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/DoubleSerializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/DoubleSerializer.html new file mode 100644 index 000000000..b51de0cb1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/DoubleSerializer.html @@ -0,0 +1,183 @@ + + + + +DoubleSerializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DoubleSerializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.DoubleSerializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serializer<Double>
    +
    +
    +
    public class DoubleSerializer +extends Object +implements Serializer<Double>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DoubleSerializer

        +
        public DoubleSerializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        serialize

        +
        public byte[] serialize(String topic, + Double data)
        +
        Description copied from interface: Serializer
        +
        Convert data into a byte array. + +

        It is recommended to serialize null data to the null byte array.

        +
        +
        Specified by:
        +
        serialize in interface Serializer<Double>
        +
        Parameters:
        +
        topic - topic associated with data
        +
        data - typed data; may be null
        +
        Returns:
        +
        serialized bytes; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/FloatDeserializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/FloatDeserializer.html new file mode 100644 index 000000000..01e542948 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/FloatDeserializer.html @@ -0,0 +1,223 @@ + + + + +FloatDeserializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class FloatDeserializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.FloatDeserializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Deserializer<Float>
    +
    +
    +
    public class FloatDeserializer +extends Object +implements Deserializer<Float>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        FloatDeserializer

        +
        public FloatDeserializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        deserialize

        +
        public Float deserialize(String topic, + byte[] data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a byte array into a value or object. + +

        It is recommended to deserialize a null byte array to a null object.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<Float>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        data - serialized bytes; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      • +
        +

        deserialize

        +
        public Float deserialize(String topic, + Headers headers, + ByteBuffer data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a ByteBuffer into a value or object. + +

        If ByteBufferDeserializer is used by an application, the application code cannot make any assumptions + about the returned ByteBuffer like the position, limit, capacity, etc., or if it is backed by + an array or not. + +

        Similarly, if this method is overridden, the implementation cannot make any assumptions about the + passed in ByteBuffer either. + +

        It is recommended to deserialize a null ByteBuffer to a null object. + +

        Note that the passed in Headers may be empty, but never null. + The implementation is allowed to modify the passed in headers, as a side effect of deserialization. + It is considered best practice to not delete or modify existing headers, but rather only add new ones.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<Float>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        headers - headers associated with the record
        +
        data - serialized ByteBuffer; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/FloatSerializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/FloatSerializer.html new file mode 100644 index 000000000..631511c61 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/FloatSerializer.html @@ -0,0 +1,183 @@ + + + + +FloatSerializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class FloatSerializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.FloatSerializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serializer<Float>
    +
    +
    +
    public class FloatSerializer +extends Object +implements Serializer<Float>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        FloatSerializer

        +
        public FloatSerializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        serialize

        +
        public byte[] serialize(String topic, + Float data)
        +
        Description copied from interface: Serializer
        +
        Convert data into a byte array. + +

        It is recommended to serialize null data to the null byte array.

        +
        +
        Specified by:
        +
        serialize in interface Serializer<Float>
        +
        Parameters:
        +
        topic - topic associated with data
        +
        data - typed data; may be null
        +
        Returns:
        +
        serialized bytes; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/IntegerDeserializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/IntegerDeserializer.html new file mode 100644 index 000000000..ff7bfcc2c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/IntegerDeserializer.html @@ -0,0 +1,223 @@ + + + + +IntegerDeserializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class IntegerDeserializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.IntegerDeserializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Deserializer<Integer>
    +
    +
    +
    public class IntegerDeserializer +extends Object +implements Deserializer<Integer>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        IntegerDeserializer

        +
        public IntegerDeserializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        deserialize

        +
        public Integer deserialize(String topic, + byte[] data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a byte array into a value or object. + +

        It is recommended to deserialize a null byte array to a null object.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<Integer>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        data - serialized bytes; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      • +
        +

        deserialize

        +
        public Integer deserialize(String topic, + Headers headers, + ByteBuffer data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a ByteBuffer into a value or object. + +

        If ByteBufferDeserializer is used by an application, the application code cannot make any assumptions + about the returned ByteBuffer like the position, limit, capacity, etc., or if it is backed by + an array or not. + +

        Similarly, if this method is overridden, the implementation cannot make any assumptions about the + passed in ByteBuffer either. + +

        It is recommended to deserialize a null ByteBuffer to a null object. + +

        Note that the passed in Headers may be empty, but never null. + The implementation is allowed to modify the passed in headers, as a side effect of deserialization. + It is considered best practice to not delete or modify existing headers, but rather only add new ones.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<Integer>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        headers - headers associated with the record
        +
        data - serialized ByteBuffer; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/IntegerSerializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/IntegerSerializer.html new file mode 100644 index 000000000..801d74604 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/IntegerSerializer.html @@ -0,0 +1,183 @@ + + + + +IntegerSerializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class IntegerSerializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.IntegerSerializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serializer<Integer>
    +
    +
    +
    public class IntegerSerializer +extends Object +implements Serializer<Integer>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        IntegerSerializer

        +
        public IntegerSerializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        serialize

        +
        public byte[] serialize(String topic, + Integer data)
        +
        Description copied from interface: Serializer
        +
        Convert data into a byte array. + +

        It is recommended to serialize null data to the null byte array.

        +
        +
        Specified by:
        +
        serialize in interface Serializer<Integer>
        +
        Parameters:
        +
        topic - topic associated with data
        +
        data - typed data; may be null
        +
        Returns:
        +
        serialized bytes; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/ListDeserializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/ListDeserializer.html new file mode 100644 index 000000000..9fd2dae8e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/ListDeserializer.html @@ -0,0 +1,247 @@ + + + + +ListDeserializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ListDeserializer<Inner>

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.ListDeserializer<Inner>
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Deserializer<List<Inner>>
    +
    +
    +
    public class ListDeserializer<Inner> +extends Object +implements Deserializer<List<Inner>>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ListDeserializer

        +
        public ListDeserializer()
        +
        +
      • +
      • +
        +

        ListDeserializer

        +
        public ListDeserializer(Class<L> listClass, + Deserializer<Inner> inner)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        innerDeserializer

        +
        public Deserializer<Inner> innerDeserializer()
        +
        +
      • +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs, + boolean isKey)
        +
        Description copied from interface: Deserializer
        +
        Configure this class.
        +
        +
        Specified by:
        +
        configure in interface Deserializer<Inner>
        +
        Parameters:
        +
        configs - configs in key/value pairs
        +
        isKey - whether the deserializer is used for the key or the value
        +
        +
        +
      • +
      • +
        +

        deserialize

        +
        public List<Inner> deserialize(String topic, + byte[] data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a byte array into a value or object. + +

        It is recommended to deserialize a null byte array to a null object.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<Inner>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        data - serialized bytes; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close()
        +
        Description copied from interface: Deserializer
        +
        Close this deserializer. + +

        This method must be idempotent as it may be called multiple times.

        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        Specified by:
        +
        close in interface Deserializer<Inner>
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/ListSerializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/ListSerializer.html new file mode 100644 index 000000000..2f917b092 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/ListSerializer.html @@ -0,0 +1,245 @@ + + + + +ListSerializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ListSerializer<Inner>

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.ListSerializer<Inner>
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serializer<List<Inner>>
    +
    +
    +
    public class ListSerializer<Inner> +extends Object +implements Serializer<List<Inner>>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ListSerializer

        +
        public ListSerializer()
        +
        +
      • +
      • +
        +

        ListSerializer

        +
        public ListSerializer(Serializer<Inner> inner)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        getInnerSerializer

        +
        public Serializer<Inner> getInnerSerializer()
        +
        +
      • +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs, + boolean isKey)
        +
        Description copied from interface: Serializer
        +
        Configure this class.
        +
        +
        Specified by:
        +
        configure in interface Serializer<Inner>
        +
        Parameters:
        +
        configs - configs in key/value pairs
        +
        isKey - whether the serializer is used for the key or the value
        +
        +
        +
      • +
      • +
        +

        serialize

        +
        public byte[] serialize(String topic, + List<Inner> data)
        +
        Description copied from interface: Serializer
        +
        Convert data into a byte array. + +

        It is recommended to serialize null data to the null byte array.

        +
        +
        Specified by:
        +
        serialize in interface Serializer<Inner>
        +
        Parameters:
        +
        topic - topic associated with data
        +
        data - typed data; may be null
        +
        Returns:
        +
        serialized bytes; may be null
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close()
        +
        Description copied from interface: Serializer
        +
        Close this serializer. + +

        This method must be idempotent as it may be called multiple times.

        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        Specified by:
        +
        close in interface Serializer<Inner>
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/LongDeserializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/LongDeserializer.html new file mode 100644 index 000000000..034b3d952 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/LongDeserializer.html @@ -0,0 +1,223 @@ + + + + +LongDeserializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class LongDeserializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.LongDeserializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Deserializer<Long>
    +
    +
    +
    public class LongDeserializer +extends Object +implements Deserializer<Long>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        LongDeserializer

        +
        public LongDeserializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        deserialize

        +
        public Long deserialize(String topic, + byte[] data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a byte array into a value or object. + +

        It is recommended to deserialize a null byte array to a null object.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<Long>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        data - serialized bytes; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      • +
        +

        deserialize

        +
        public Long deserialize(String topic, + Headers headers, + ByteBuffer data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a ByteBuffer into a value or object. + +

        If ByteBufferDeserializer is used by an application, the application code cannot make any assumptions + about the returned ByteBuffer like the position, limit, capacity, etc., or if it is backed by + an array or not. + +

        Similarly, if this method is overridden, the implementation cannot make any assumptions about the + passed in ByteBuffer either. + +

        It is recommended to deserialize a null ByteBuffer to a null object. + +

        Note that the passed in Headers may be empty, but never null. + The implementation is allowed to modify the passed in headers, as a side effect of deserialization. + It is considered best practice to not delete or modify existing headers, but rather only add new ones.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<Long>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        headers - headers associated with the record
        +
        data - serialized ByteBuffer; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/LongSerializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/LongSerializer.html new file mode 100644 index 000000000..3b2b66dee --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/LongSerializer.html @@ -0,0 +1,183 @@ + + + + +LongSerializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class LongSerializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.LongSerializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serializer<Long>
    +
    +
    +
    public class LongSerializer +extends Object +implements Serializer<Long>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        LongSerializer

        +
        public LongSerializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        serialize

        +
        public byte[] serialize(String topic, + Long data)
        +
        Description copied from interface: Serializer
        +
        Convert data into a byte array. + +

        It is recommended to serialize null data to the null byte array.

        +
        +
        Specified by:
        +
        serialize in interface Serializer<Long>
        +
        Parameters:
        +
        topic - topic associated with data
        +
        data - typed data; may be null
        +
        Returns:
        +
        serialized bytes; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/Serde.html b/static/41/javadoc/org/apache/kafka/common/serialization/Serde.html new file mode 100644 index 000000000..5dd5de70a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/Serde.html @@ -0,0 +1,188 @@ + + + + +Serde (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Serde<T>

    +
    +
    +
    +
    Type Parameters:
    +
    T - Type to be serialized from and deserialized into. + + A class that implements this interface is expected to have a constructor with no parameter.
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Closeable
    +
    +
    +
    All Known Implementing Classes:
    +
    Serdes.BooleanSerde, Serdes.ByteArraySerde, Serdes.ByteBufferSerde, Serdes.BytesSerde, Serdes.DoubleSerde, Serdes.FloatSerde, Serdes.IntegerSerde, Serdes.ListSerde, Serdes.LongSerde, Serdes.ShortSerde, Serdes.StringSerde, Serdes.UUIDSerde, Serdes.VoidSerde, Serdes.WrapperSerde, WindowedSerdes.SessionWindowedSerde, WindowedSerdes.TimeWindowedSerde
    +
    +
    +
    public interface Serde<T> +extends Closeable
    +
    The interface for wrapping a serializer and deserializer for the given data type.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      default void
      + +
      +
      Close this serde class, which will close the underlying serializer and deserializer.
      +
      +
      default void
      +
      configure(Map<String,?> configs, + boolean isKey)
      +
      +
      Configure this class, which will configure the underlying serializer and deserializer.
      +
      + + +
       
      + + +
       
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        default void configure(Map<String,?> configs, + boolean isKey)
        +
        Configure this class, which will configure the underlying serializer and deserializer.
        +
        +
        Parameters:
        +
        configs - configs in key/value pairs
        +
        isKey - whether is for key or value
        +
        +
        +
      • +
      • +
        +

        close

        +
        default void close()
        +
        Close this serde class, which will close the underlying serializer and deserializer. +

        + This method has to be idempotent because it might be called multiple times.

        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        +
        +
      • +
      • +
        +

        serializer

        +
        Serializer<T> serializer()
        +
        +
      • +
      • +
        +

        deserializer

        +
        Deserializer<T> deserializer()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.BooleanSerde.html b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.BooleanSerde.html new file mode 100644 index 000000000..4e1b04bcc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.BooleanSerde.html @@ -0,0 +1,140 @@ + + + + +Serdes.BooleanSerde (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Serdes.BooleanSerde

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.Serdes.WrapperSerde<Boolean> +
    org.apache.kafka.common.serialization.Serdes.BooleanSerde
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serde<Boolean>
    +
    +
    +
    Enclosing class:
    +
    Serdes
    +
    +
    +
    public static final class Serdes.BooleanSerde +extends Serdes.WrapperSerde<Boolean>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        BooleanSerde

        +
        public BooleanSerde()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.ByteArraySerde.html b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.ByteArraySerde.html new file mode 100644 index 000000000..9e6da376b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.ByteArraySerde.html @@ -0,0 +1,140 @@ + + + + +Serdes.ByteArraySerde (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Serdes.ByteArraySerde

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.Serdes.WrapperSerde<byte[]> +
    org.apache.kafka.common.serialization.Serdes.ByteArraySerde
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serde<byte[]>
    +
    +
    +
    Enclosing class:
    +
    Serdes
    +
    +
    +
    public static final class Serdes.ByteArraySerde +extends Serdes.WrapperSerde<byte[]>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ByteArraySerde

        +
        public ByteArraySerde()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.ByteBufferSerde.html b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.ByteBufferSerde.html new file mode 100644 index 000000000..667c06293 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.ByteBufferSerde.html @@ -0,0 +1,140 @@ + + + + +Serdes.ByteBufferSerde (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Serdes.ByteBufferSerde

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.Serdes.WrapperSerde<ByteBuffer> +
    org.apache.kafka.common.serialization.Serdes.ByteBufferSerde
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serde<ByteBuffer>
    +
    +
    +
    Enclosing class:
    +
    Serdes
    +
    +
    +
    public static final class Serdes.ByteBufferSerde +extends Serdes.WrapperSerde<ByteBuffer>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ByteBufferSerde

        +
        public ByteBufferSerde()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.BytesSerde.html b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.BytesSerde.html new file mode 100644 index 000000000..f22cb9c70 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.BytesSerde.html @@ -0,0 +1,140 @@ + + + + +Serdes.BytesSerde (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Serdes.BytesSerde

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.Serdes.WrapperSerde<org.apache.kafka.common.utils.Bytes> +
    org.apache.kafka.common.serialization.Serdes.BytesSerde
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serde<org.apache.kafka.common.utils.Bytes>
    +
    +
    +
    Enclosing class:
    +
    Serdes
    +
    +
    +
    public static final class Serdes.BytesSerde +extends Serdes.WrapperSerde<org.apache.kafka.common.utils.Bytes>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        BytesSerde

        +
        public BytesSerde()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.DoubleSerde.html b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.DoubleSerde.html new file mode 100644 index 000000000..5831b45d6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.DoubleSerde.html @@ -0,0 +1,140 @@ + + + + +Serdes.DoubleSerde (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Serdes.DoubleSerde

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.Serdes.WrapperSerde<Double> +
    org.apache.kafka.common.serialization.Serdes.DoubleSerde
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serde<Double>
    +
    +
    +
    Enclosing class:
    +
    Serdes
    +
    +
    +
    public static final class Serdes.DoubleSerde +extends Serdes.WrapperSerde<Double>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DoubleSerde

        +
        public DoubleSerde()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.FloatSerde.html b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.FloatSerde.html new file mode 100644 index 000000000..72065b31b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.FloatSerde.html @@ -0,0 +1,140 @@ + + + + +Serdes.FloatSerde (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Serdes.FloatSerde

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.Serdes.WrapperSerde<Float> +
    org.apache.kafka.common.serialization.Serdes.FloatSerde
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serde<Float>
    +
    +
    +
    Enclosing class:
    +
    Serdes
    +
    +
    +
    public static final class Serdes.FloatSerde +extends Serdes.WrapperSerde<Float>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        FloatSerde

        +
        public FloatSerde()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.IntegerSerde.html b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.IntegerSerde.html new file mode 100644 index 000000000..8cc5c7fde --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.IntegerSerde.html @@ -0,0 +1,140 @@ + + + + +Serdes.IntegerSerde (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Serdes.IntegerSerde

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.Serdes.WrapperSerde<Integer> +
    org.apache.kafka.common.serialization.Serdes.IntegerSerde
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serde<Integer>
    +
    +
    +
    Enclosing class:
    +
    Serdes
    +
    +
    +
    public static final class Serdes.IntegerSerde +extends Serdes.WrapperSerde<Integer>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        IntegerSerde

        +
        public IntegerSerde()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.ListSerde.html b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.ListSerde.html new file mode 100644 index 000000000..f91d9f0e5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.ListSerde.html @@ -0,0 +1,150 @@ + + + + +Serdes.ListSerde (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Serdes.ListSerde<Inner>

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.Serdes.WrapperSerde<List<Inner>> +
    org.apache.kafka.common.serialization.Serdes.ListSerde<Inner>
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serde<List<Inner>>
    +
    +
    +
    Enclosing class:
    +
    Serdes
    +
    +
    +
    public static final class Serdes.ListSerde<Inner> +extends Serdes.WrapperSerde<List<Inner>>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ListSerde

        +
        public ListSerde()
        +
        +
      • +
      • +
        +

        ListSerde

        +
        public ListSerde(Class<L> listClass, + Serde<Inner> serde)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.LongSerde.html b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.LongSerde.html new file mode 100644 index 000000000..b978f0c06 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.LongSerde.html @@ -0,0 +1,140 @@ + + + + +Serdes.LongSerde (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Serdes.LongSerde

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.Serdes.WrapperSerde<Long> +
    org.apache.kafka.common.serialization.Serdes.LongSerde
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serde<Long>
    +
    +
    +
    Enclosing class:
    +
    Serdes
    +
    +
    +
    public static final class Serdes.LongSerde +extends Serdes.WrapperSerde<Long>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        LongSerde

        +
        public LongSerde()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.ShortSerde.html b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.ShortSerde.html new file mode 100644 index 000000000..be9d969b7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.ShortSerde.html @@ -0,0 +1,140 @@ + + + + +Serdes.ShortSerde (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Serdes.ShortSerde

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.Serdes.WrapperSerde<Short> +
    org.apache.kafka.common.serialization.Serdes.ShortSerde
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serde<Short>
    +
    +
    +
    Enclosing class:
    +
    Serdes
    +
    +
    +
    public static final class Serdes.ShortSerde +extends Serdes.WrapperSerde<Short>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ShortSerde

        +
        public ShortSerde()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.StringSerde.html b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.StringSerde.html new file mode 100644 index 000000000..a4f53b9cf --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.StringSerde.html @@ -0,0 +1,140 @@ + + + + +Serdes.StringSerde (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Serdes.StringSerde

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.Serdes.WrapperSerde<String> +
    org.apache.kafka.common.serialization.Serdes.StringSerde
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serde<String>
    +
    +
    +
    Enclosing class:
    +
    Serdes
    +
    +
    +
    public static final class Serdes.StringSerde +extends Serdes.WrapperSerde<String>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StringSerde

        +
        public StringSerde()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.UUIDSerde.html b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.UUIDSerde.html new file mode 100644 index 000000000..386805e05 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.UUIDSerde.html @@ -0,0 +1,140 @@ + + + + +Serdes.UUIDSerde (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Serdes.UUIDSerde

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.Serdes.WrapperSerde<UUID> +
    org.apache.kafka.common.serialization.Serdes.UUIDSerde
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serde<UUID>
    +
    +
    +
    Enclosing class:
    +
    Serdes
    +
    +
    +
    public static final class Serdes.UUIDSerde +extends Serdes.WrapperSerde<UUID>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UUIDSerde

        +
        public UUIDSerde()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.VoidSerde.html b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.VoidSerde.html new file mode 100644 index 000000000..d267b1634 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.VoidSerde.html @@ -0,0 +1,140 @@ + + + + +Serdes.VoidSerde (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Serdes.VoidSerde

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.Serdes.WrapperSerde<Void> +
    org.apache.kafka.common.serialization.Serdes.VoidSerde
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serde<Void>
    +
    +
    +
    Enclosing class:
    +
    Serdes
    +
    +
    +
    public static final class Serdes.VoidSerde +extends Serdes.WrapperSerde<Void>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        VoidSerde

        +
        public VoidSerde()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.WrapperSerde.html b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.WrapperSerde.html new file mode 100644 index 000000000..9e7d242b2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.WrapperSerde.html @@ -0,0 +1,235 @@ + + + + +Serdes.WrapperSerde (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Serdes.WrapperSerde<T>

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.Serdes.WrapperSerde<T>
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serde<T>
    +
    +
    +
    Direct Known Subclasses:
    +
    Serdes.BooleanSerde, Serdes.ByteArraySerde, Serdes.ByteBufferSerde, Serdes.BytesSerde, Serdes.DoubleSerde, Serdes.FloatSerde, Serdes.IntegerSerde, Serdes.ListSerde, Serdes.LongSerde, Serdes.ShortSerde, Serdes.StringSerde, Serdes.UUIDSerde, Serdes.VoidSerde, WindowedSerdes.SessionWindowedSerde, WindowedSerdes.TimeWindowedSerde
    +
    +
    +
    Enclosing class:
    +
    Serdes
    +
    +
    +
    public static class Serdes.WrapperSerde<T> +extends Object +implements Serde<T>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs, + boolean isKey)
        +
        Description copied from interface: Serde
        +
        Configure this class, which will configure the underlying serializer and deserializer.
        +
        +
        Specified by:
        +
        configure in interface Serde<T>
        +
        Parameters:
        +
        configs - configs in key/value pairs
        +
        isKey - whether is for key or value
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close()
        +
        Description copied from interface: Serde
        +
        Close this serde class, which will close the underlying serializer and deserializer. +

        + This method has to be idempotent because it might be called multiple times.

        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        Specified by:
        +
        close in interface Serde<T>
        +
        +
        +
      • +
      • +
        +

        serializer

        +
        public Serializer<T> serializer()
        +
        +
        Specified by:
        +
        serializer in interface Serde<T>
        +
        +
        +
      • +
      • +
        +

        deserializer

        +
        public Deserializer<T> deserializer()
        +
        +
        Specified by:
        +
        deserializer in interface Serde<T>
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.html b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.html new file mode 100644 index 000000000..b80ef9ce0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/Serdes.html @@ -0,0 +1,390 @@ + + + + +Serdes (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Serdes

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.Serdes
    +
    +
    +
    +
    public class Serdes +extends Object
    +
    Factory for creating serializers / deserializers.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Serdes

        +
        public Serdes()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        serdeFrom

        +
        public static <T> Serde<T> serdeFrom(Class<T> type)
        +
        +
      • +
      • +
        +

        serdeFrom

        +
        public static <T> Serde<T> serdeFrom(Serializer<T> serializer, + Deserializer<T> deserializer)
        +
        Construct a serde object from separate serializer and deserializer
        +
        +
        Parameters:
        +
        serializer - must not be null.
        +
        deserializer - must not be null.
        +
        +
        +
      • +
      • +
        +

        Long

        +
        public static Serde<Long> Long()
        +
        A serde for nullable Long type.
        +
        +
      • +
      • +
        +

        Integer

        +
        public static Serde<Integer> Integer()
        +
        A serde for nullable Integer type.
        +
        +
      • +
      • +
        +

        Short

        +
        public static Serde<Short> Short()
        +
        A serde for nullable Short type.
        +
        +
      • +
      • +
        +

        Float

        +
        public static Serde<Float> Float()
        +
        A serde for nullable Float type.
        +
        +
      • +
      • +
        +

        Double

        +
        public static Serde<Double> Double()
        +
        A serde for nullable Double type.
        +
        +
      • +
      • +
        +

        String

        +
        public static Serde<String> String()
        +
        A serde for nullable String type.
        +
        +
      • +
      • +
        +

        ByteBuffer

        +
        public static Serde<ByteBuffer> ByteBuffer()
        +
        A serde for nullable ByteBuffer type.
        +
        +
      • +
      • +
        +

        Bytes

        +
        public static Serde<org.apache.kafka.common.utils.Bytes> Bytes()
        +
        A serde for nullable Bytes type.
        +
        +
      • +
      • +
        +

        UUID

        +
        public static Serde<UUID> UUID()
        +
        A serde for nullable UUID type
        +
        +
      • +
      • +
        +

        Boolean

        +
        public static Serde<Boolean> Boolean()
        +
        A serde for nullable Boolean type.
        +
        +
      • +
      • +
        +

        ByteArray

        +
        public static Serde<byte[]> ByteArray()
        +
        A serde for nullable byte[] type.
        +
        +
      • +
      • +
        +

        Void

        +
        public static Serde<Void> Void()
        +
        A serde for Void type.
        +
        +
      • +
      • +
        +

        ListSerde

        +
        public static <L extends List<Inner>, +Inner> +Serde<List<Inner>> ListSerde(Class<L> listClass, + Serde<Inner> innerSerde)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/Serializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/Serializer.html new file mode 100644 index 000000000..9b68b2676 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/Serializer.html @@ -0,0 +1,228 @@ + + + + +Serializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Serializer<T>

    +
    +
    +
    +
    Type Parameters:
    +
    T - Type to be serialized from.
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Closeable
    +
    +
    +
    All Known Implementing Classes:
    +
    BooleanSerializer, ByteArraySerializer, ByteBufferSerializer, BytesSerializer, DoubleSerializer, FloatSerializer, IntegerSerializer, ListSerializer, LongSerializer, SessionWindowedSerializer, ShortSerializer, StringSerializer, TimeWindowedSerializer, UUIDSerializer, VoidSerializer
    +
    +
    +
    public interface Serializer<T> +extends Closeable
    +
    An interface for converting objects to bytes. + A class that implements this interface is expected to have a constructor with no parameter. + +

    This interface can be combined with ClusterResourceListener + to receive cluster metadata once it's available, as well as Monitorable + to enable the serializer to register metrics. For the latter, the following tags are automatically added to all + metrics registered: config set to either key.serializer or value.serializer, + and class set to the serializer class name.

    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      default void
      + +
      +
      Close this serializer.
      +
      +
      default void
      +
      configure(Map<String,?> configs, + boolean isKey)
      +
      +
      Configure this class.
      +
      +
      default byte[]
      +
      serialize(String topic, + Headers headers, + T data)
      +
      +
      Convert data into a byte array.
      +
      +
      byte[]
      +
      serialize(String topic, + T data)
      +
      +
      Convert data into a byte array.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        default void configure(Map<String,?> configs, + boolean isKey)
        +
        Configure this class.
        +
        +
        Parameters:
        +
        configs - configs in key/value pairs
        +
        isKey - whether the serializer is used for the key or the value
        +
        +
        +
      • +
      • +
        +

        serialize

        +
        byte[] serialize(String topic, + T data)
        +
        Convert data into a byte array. + +

        It is recommended to serialize null data to the null byte array.

        +
        +
        Parameters:
        +
        topic - topic associated with data
        +
        data - typed data; may be null
        +
        Returns:
        +
        serialized bytes; may be null
        +
        +
        +
      • +
      • +
        +

        serialize

        +
        default byte[] serialize(String topic, + Headers headers, + T data)
        +
        Convert data into a byte array. + +

        It is recommended to serialize null data to the null byte array. + +

        Note that the passed in Headers may be empty, but never null. + The implementation is allowed to modify the passed in headers, as a side effect of serialization. + It is considered best practice to not delete or modify existing headers, but rather only add new ones.

        +
        +
        Parameters:
        +
        topic - topic associated with data
        +
        headers - headers associated with the record
        +
        data - typed data; may be null
        +
        Returns:
        +
        serialized bytes; may be null
        +
        +
        +
      • +
      • +
        +

        close

        +
        default void close()
        +
        Close this serializer. + +

        This method must be idempotent as it may be called multiple times.

        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/ShortDeserializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/ShortDeserializer.html new file mode 100644 index 000000000..a5125b4b2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/ShortDeserializer.html @@ -0,0 +1,223 @@ + + + + +ShortDeserializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ShortDeserializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.ShortDeserializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Deserializer<Short>
    +
    +
    +
    public class ShortDeserializer +extends Object +implements Deserializer<Short>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ShortDeserializer

        +
        public ShortDeserializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        deserialize

        +
        public Short deserialize(String topic, + byte[] data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a byte array into a value or object. + +

        It is recommended to deserialize a null byte array to a null object.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<Short>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        data - serialized bytes; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      • +
        +

        deserialize

        +
        public Short deserialize(String topic, + Headers headers, + ByteBuffer data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a ByteBuffer into a value or object. + +

        If ByteBufferDeserializer is used by an application, the application code cannot make any assumptions + about the returned ByteBuffer like the position, limit, capacity, etc., or if it is backed by + an array or not. + +

        Similarly, if this method is overridden, the implementation cannot make any assumptions about the + passed in ByteBuffer either. + +

        It is recommended to deserialize a null ByteBuffer to a null object. + +

        Note that the passed in Headers may be empty, but never null. + The implementation is allowed to modify the passed in headers, as a side effect of deserialization. + It is considered best practice to not delete or modify existing headers, but rather only add new ones.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<Short>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        headers - headers associated with the record
        +
        data - serialized ByteBuffer; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/ShortSerializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/ShortSerializer.html new file mode 100644 index 000000000..c6af20a9e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/ShortSerializer.html @@ -0,0 +1,183 @@ + + + + +ShortSerializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ShortSerializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.ShortSerializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serializer<Short>
    +
    +
    +
    public class ShortSerializer +extends Object +implements Serializer<Short>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ShortSerializer

        +
        public ShortSerializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        serialize

        +
        public byte[] serialize(String topic, + Short data)
        +
        Description copied from interface: Serializer
        +
        Convert data into a byte array. + +

        It is recommended to serialize null data to the null byte array.

        +
        +
        Specified by:
        +
        serialize in interface Serializer<Short>
        +
        Parameters:
        +
        topic - topic associated with data
        +
        data - typed data; may be null
        +
        Returns:
        +
        serialized bytes; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/StringDeserializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/StringDeserializer.html new file mode 100644 index 000000000..43986f5a5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/StringDeserializer.html @@ -0,0 +1,247 @@ + + + + +StringDeserializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StringDeserializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.StringDeserializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Deserializer<String>
    +
    +
    +
    public class StringDeserializer +extends Object +implements Deserializer<String>
    +
    String encoding defaults to UTF8 and can be customized by setting the property key.deserializer.encoding, + value.deserializer.encoding or deserializer.encoding. The first two take precedence over the last.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StringDeserializer

        +
        public StringDeserializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs, + boolean isKey)
        +
        Description copied from interface: Deserializer
        +
        Configure this class.
        +
        +
        Specified by:
        +
        configure in interface Deserializer<String>
        +
        Parameters:
        +
        configs - configs in key/value pairs
        +
        isKey - whether the deserializer is used for the key or the value
        +
        +
        +
      • +
      • +
        +

        deserialize

        +
        public String deserialize(String topic, + byte[] data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a byte array into a value or object. + +

        It is recommended to deserialize a null byte array to a null object.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<String>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        data - serialized bytes; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      • +
        +

        deserialize

        +
        public String deserialize(String topic, + Headers headers, + ByteBuffer data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a ByteBuffer into a value or object. + +

        If ByteBufferDeserializer is used by an application, the application code cannot make any assumptions + about the returned ByteBuffer like the position, limit, capacity, etc., or if it is backed by + an array or not. + +

        Similarly, if this method is overridden, the implementation cannot make any assumptions about the + passed in ByteBuffer either. + +

        It is recommended to deserialize a null ByteBuffer to a null object. + +

        Note that the passed in Headers may be empty, but never null. + The implementation is allowed to modify the passed in headers, as a side effect of deserialization. + It is considered best practice to not delete or modify existing headers, but rather only add new ones.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<String>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        headers - headers associated with the record
        +
        data - serialized ByteBuffer; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/StringSerializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/StringSerializer.html new file mode 100644 index 000000000..1eae957eb --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/StringSerializer.html @@ -0,0 +1,207 @@ + + + + +StringSerializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StringSerializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.StringSerializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serializer<String>
    +
    +
    +
    public class StringSerializer +extends Object +implements Serializer<String>
    +
    String encoding defaults to UTF8 and can be customized by setting the property key.serializer.encoding, + value.serializer.encoding or serializer.encoding. The first two take precedence over the last.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StringSerializer

        +
        public StringSerializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs, + boolean isKey)
        +
        Description copied from interface: Serializer
        +
        Configure this class.
        +
        +
        Specified by:
        +
        configure in interface Serializer<String>
        +
        Parameters:
        +
        configs - configs in key/value pairs
        +
        isKey - whether the serializer is used for the key or the value
        +
        +
        +
      • +
      • +
        +

        serialize

        +
        public byte[] serialize(String topic, + String data)
        +
        Description copied from interface: Serializer
        +
        Convert data into a byte array. + +

        It is recommended to serialize null data to the null byte array.

        +
        +
        Specified by:
        +
        serialize in interface Serializer<String>
        +
        Parameters:
        +
        topic - topic associated with data
        +
        data - typed data; may be null
        +
        Returns:
        +
        serialized bytes; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/UUIDDeserializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/UUIDDeserializer.html new file mode 100644 index 000000000..588d29d1d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/UUIDDeserializer.html @@ -0,0 +1,247 @@ + + + + +UUIDDeserializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UUIDDeserializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.UUIDDeserializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Deserializer<UUID>
    +
    +
    +
    public class UUIDDeserializer +extends Object +implements Deserializer<UUID>
    +
    We are converting the byte array to String before deserializing to UUID. String encoding defaults to UTF8 and can be customized by setting + the property key.deserializer.encoding, value.deserializer.encoding or deserializer.encoding. The first two take precedence over the last.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UUIDDeserializer

        +
        public UUIDDeserializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs, + boolean isKey)
        +
        Description copied from interface: Deserializer
        +
        Configure this class.
        +
        +
        Specified by:
        +
        configure in interface Deserializer<UUID>
        +
        Parameters:
        +
        configs - configs in key/value pairs
        +
        isKey - whether the deserializer is used for the key or the value
        +
        +
        +
      • +
      • +
        +

        deserialize

        +
        public UUID deserialize(String topic, + byte[] data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a byte array into a value or object. + +

        It is recommended to deserialize a null byte array to a null object.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<UUID>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        data - serialized bytes; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      • +
        +

        deserialize

        +
        public UUID deserialize(String topic, + Headers headers, + ByteBuffer data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a ByteBuffer into a value or object. + +

        If ByteBufferDeserializer is used by an application, the application code cannot make any assumptions + about the returned ByteBuffer like the position, limit, capacity, etc., or if it is backed by + an array or not. + +

        Similarly, if this method is overridden, the implementation cannot make any assumptions about the + passed in ByteBuffer either. + +

        It is recommended to deserialize a null ByteBuffer to a null object. + +

        Note that the passed in Headers may be empty, but never null. + The implementation is allowed to modify the passed in headers, as a side effect of deserialization. + It is considered best practice to not delete or modify existing headers, but rather only add new ones.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<UUID>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        headers - headers associated with the record
        +
        data - serialized ByteBuffer; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/UUIDSerializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/UUIDSerializer.html new file mode 100644 index 000000000..b0ddcd5f7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/UUIDSerializer.html @@ -0,0 +1,207 @@ + + + + +UUIDSerializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UUIDSerializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.UUIDSerializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serializer<UUID>
    +
    +
    +
    public class UUIDSerializer +extends Object +implements Serializer<UUID>
    +
    We are converting UUID to String before serializing. String encoding defaults to UTF8 and can be customized by setting + the property key.deserializer.encoding, value.deserializer.encoding or deserializer.encoding. The first two take precedence over the last.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UUIDSerializer

        +
        public UUIDSerializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs, + boolean isKey)
        +
        Description copied from interface: Serializer
        +
        Configure this class.
        +
        +
        Specified by:
        +
        configure in interface Serializer<UUID>
        +
        Parameters:
        +
        configs - configs in key/value pairs
        +
        isKey - whether the serializer is used for the key or the value
        +
        +
        +
      • +
      • +
        +

        serialize

        +
        public byte[] serialize(String topic, + UUID data)
        +
        Description copied from interface: Serializer
        +
        Convert data into a byte array. + +

        It is recommended to serialize null data to the null byte array.

        +
        +
        Specified by:
        +
        serialize in interface Serializer<UUID>
        +
        Parameters:
        +
        topic - topic associated with data
        +
        data - typed data; may be null
        +
        Returns:
        +
        serialized bytes; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/VoidDeserializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/VoidDeserializer.html new file mode 100644 index 000000000..06ddabd90 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/VoidDeserializer.html @@ -0,0 +1,223 @@ + + + + +VoidDeserializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class VoidDeserializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.VoidDeserializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Deserializer<Void>
    +
    +
    +
    public class VoidDeserializer +extends Object +implements Deserializer<Void>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        VoidDeserializer

        +
        public VoidDeserializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        deserialize

        +
        public Void deserialize(String topic, + byte[] data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a byte array into a value or object. + +

        It is recommended to deserialize a null byte array to a null object.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<Void>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        data - serialized bytes; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      • +
        +

        deserialize

        +
        public Void deserialize(String topic, + Headers headers, + ByteBuffer data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a ByteBuffer into a value or object. + +

        If ByteBufferDeserializer is used by an application, the application code cannot make any assumptions + about the returned ByteBuffer like the position, limit, capacity, etc., or if it is backed by + an array or not. + +

        Similarly, if this method is overridden, the implementation cannot make any assumptions about the + passed in ByteBuffer either. + +

        It is recommended to deserialize a null ByteBuffer to a null object. + +

        Note that the passed in Headers may be empty, but never null. + The implementation is allowed to modify the passed in headers, as a side effect of deserialization. + It is considered best practice to not delete or modify existing headers, but rather only add new ones.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<Void>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        headers - headers associated with the record
        +
        data - serialized ByteBuffer; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/VoidSerializer.html b/static/41/javadoc/org/apache/kafka/common/serialization/VoidSerializer.html new file mode 100644 index 000000000..0381ef28f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/VoidSerializer.html @@ -0,0 +1,183 @@ + + + + +VoidSerializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class VoidSerializer

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.VoidSerializer
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serializer<Void>
    +
    +
    +
    public class VoidSerializer +extends Object +implements Serializer<Void>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        VoidSerializer

        +
        public VoidSerializer()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        serialize

        +
        public byte[] serialize(String topic, + Void data)
        +
        Description copied from interface: Serializer
        +
        Convert data into a byte array. + +

        It is recommended to serialize null data to the null byte array.

        +
        +
        Specified by:
        +
        serialize in interface Serializer<Void>
        +
        Parameters:
        +
        topic - topic associated with data
        +
        data - typed data; may be null
        +
        Returns:
        +
        serialized bytes; may be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/package-summary.html b/static/41/javadoc/org/apache/kafka/common/serialization/package-summary.html new file mode 100644 index 000000000..7e9e4e023 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/package-summary.html @@ -0,0 +1,211 @@ + + + + +org.apache.kafka.common.serialization (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.common.serialization

    +
    +
    +
    package org.apache.kafka.common.serialization
    +
    +
    Provides interface and some implementations of serialization/deserialization routines for various objects.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/common/serialization/package-tree.html b/static/41/javadoc/org/apache/kafka/common/serialization/package-tree.html new file mode 100644 index 000000000..2e0b2bff1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/common/serialization/package-tree.html @@ -0,0 +1,130 @@ + + + + +org.apache.kafka.common.serialization Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.common.serialization

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/components/Versioned.html b/static/41/javadoc/org/apache/kafka/connect/components/Versioned.html new file mode 100644 index 000000000..b26e8394a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/components/Versioned.html @@ -0,0 +1,140 @@ + + + + +Versioned (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Versioned

    +
    +
    +
    +
    All Known Subinterfaces:
    +
    ConnectRestExtension
    +
    +
    +
    All Known Implementing Classes:
    +
    Connector, MockConnector, MockSinkConnector, MockSourceConnector, SchemaSourceConnector, SimpleHeaderConverter, SinkConnector, SourceConnector, StringConverter, VerifiableSinkConnector, VerifiableSourceConnector
    +
    +
    +
    public interface Versioned
    +
    Connect requires some components implement this interface to define a version string.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Get the version of this component.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        version

        +
        String version()
        +
        Get the version of this component.
        +
        +
        Returns:
        +
        the version, formatted as a String. The version may not be null or empty.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/components/package-summary.html b/static/41/javadoc/org/apache/kafka/connect/components/package-summary.html new file mode 100644 index 000000000..77fa1cbc8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/components/package-summary.html @@ -0,0 +1,87 @@ + + + + +org.apache.kafka.connect.components (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.connect.components

    +
    +
    +
    package org.apache.kafka.connect.components
    +
    +
    Provides common interfaces used to describe pluggable components.
    +
    +
    +
      +
    • +
      +
      Interfaces
      +
      +
      Class
      +
      Description
      + +
      +
      Connect requires some components implement this interface to define a version string.
      +
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/components/package-tree.html b/static/41/javadoc/org/apache/kafka/connect/components/package-tree.html new file mode 100644 index 000000000..d343e7a51 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/components/package-tree.html @@ -0,0 +1,67 @@ + + + + +org.apache.kafka.connect.components Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.connect.components

    +Package Hierarchies: + +
    +
    +

    Interface Hierarchy

    +
      +
    • org.apache.kafka.connect.components.Versioned
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/connector/ConnectRecord.html b/static/41/javadoc/org/apache/kafka/connect/connector/ConnectRecord.html new file mode 100644 index 000000000..8de2b6b5b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/connector/ConnectRecord.html @@ -0,0 +1,388 @@ + + + + +ConnectRecord (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConnectRecord<R extends ConnectRecord<R>>

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.connector.ConnectRecord<R>
    +
    +
    +
    +
    Direct Known Subclasses:
    +
    SinkRecord, SourceRecord
    +
    +
    +
    public abstract class ConnectRecord<R extends ConnectRecord<R>> +extends Object
    +

    + Base class for records containing data to be copied to/from Kafka. This corresponds closely to + Kafka's ProducerRecord and ConsumerRecord classes, and holds the data that may be used by both + sources and sinks (topic, kafkaPartition, key, value, timestamp, headers). Although both implementations include a + notion of offset, it is not included here because they differ in type. +

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        topic

        +
        public String topic()
        +
        +
      • +
      • +
        +

        kafkaPartition

        +
        public Integer kafkaPartition()
        +
        +
      • +
      • +
        +

        key

        +
        public Object key()
        +
        +
      • +
      • +
        +

        keySchema

        +
        public Schema keySchema()
        +
        +
      • +
      • +
        +

        value

        +
        public Object value()
        +
        +
      • +
      • +
        +

        valueSchema

        +
        public Schema valueSchema()
        +
        +
      • +
      • +
        +

        timestamp

        +
        public Long timestamp()
        +
        +
      • +
      • +
        +

        headers

        +
        public Headers headers()
        +
        Get the headers for this record.
        +
        +
        Returns:
        +
        the headers; never null
        +
        +
        +
      • +
      • +
        +

        newRecord

        +
        public abstract R newRecord(String topic, + Integer kafkaPartition, + Schema keySchema, + Object key, + Schema valueSchema, + Object value, + Long timestamp)
        +
        Create a new record of the same type as itself, with the specified parameter values. All other fields in this record will be copied + over to the new record. Since the headers are mutable, the resulting record will have a copy of this record's headers.
        +
        +
        Parameters:
        +
        topic - the name of the topic; may be null
        +
        kafkaPartition - the partition number for the Kafka topic; may be null
        +
        keySchema - the schema for the key; may be null
        +
        key - the key; may be null
        +
        valueSchema - the schema for the value; may be null
        +
        value - the value; may be null
        +
        timestamp - the timestamp; may be null
        +
        Returns:
        +
        the new record
        +
        +
        +
      • +
      • +
        +

        newRecord

        +
        public abstract R newRecord(String topic, + Integer kafkaPartition, + Schema keySchema, + Object key, + Schema valueSchema, + Object value, + Long timestamp, + Iterable<Header> headers)
        +
        Create a new record of the same type as itself, with the specified parameter values. All other fields in this record will be copied + over to the new record.
        +
        +
        Parameters:
        +
        topic - the name of the topic; may be null
        +
        kafkaPartition - the partition number for the Kafka topic; may be null
        +
        keySchema - the schema for the key; may be null
        +
        key - the key; may be null
        +
        valueSchema - the schema for the value; may be null
        +
        value - the value; may be null
        +
        timestamp - the timestamp; may be null
        +
        headers - the headers; may be null or empty
        +
        Returns:
        +
        the new record
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/connector/Connector.html b/static/41/javadoc/org/apache/kafka/connect/connector/Connector.html new file mode 100644 index 000000000..1915f1d1f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/connector/Connector.html @@ -0,0 +1,341 @@ + + + + +Connector (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Connector

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.connector.Connector
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Versioned
    +
    +
    +
    Direct Known Subclasses:
    +
    MockConnector, SinkConnector, SourceConnector
    +
    +
    +
    public abstract class Connector +extends Object +implements Versioned
    +

    + Connectors manage integration of Kafka Connect with another system, either as an input that ingests + data into Kafka or an output that passes data to an external system. Implementations should + not use this class directly; they should inherit from SourceConnector + or SinkConnector. +

    +

    + Connectors have two primary roles. First, given some configuration, they are responsible for + creating configurations for a set of Tasks that split up the data processing. For + example, a database Connector might create Tasks by dividing the set of tables evenly among + tasks. Second, they are responsible for monitoring inputs for changes that require + reconfiguration and notifying the Kafka Connect runtime via the ConnectorContext. Continuing the + previous example, the connector might periodically check for new tables and notify Kafka Connect of + additions and deletions. Kafka Connect will then request new configurations and update the running + Tasks. +

    +
    +
    +
      + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      abstract ConfigDef
      + +
      +
      Define the configuration for the connector.
      +
      +
      void
      + +
      +
      Initialize this connector, using the provided ConnectorContext to notify the runtime of + input configuration changes.
      +
      +
      void
      + +
      +
      + Initialize this connector, using the provided ConnectorContext to notify the runtime of + input configuration changes and using the provided set of Task configurations.
      +
      +
      void
      + +
      +
      Reconfigure this Connector.
      +
      +
      abstract void
      + +
      +
      Start this Connector.
      +
      +
      abstract void
      + +
      +
      Stop this connector.
      +
      +
      abstract Class<? extends Task>
      + +
      +
      Returns the Task implementation for this Connector.
      +
      +
      abstract List<Map<String,String>>
      +
      taskConfigs(int maxTasks)
      +
      +
      Returns a set of configurations for Tasks based on the current configuration, + producing at most maxTasks configurations.
      +
      + +
      validate(Map<String,String> connectorConfigs)
      +
      +
      Validate the connector configuration values against configuration definitions.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      +
      +

      Methods inherited from interface org.apache.kafka.connect.components.Versioned

      +version
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Connector

        +
        public Connector()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        initialize

        +
        public void initialize(ConnectorContext ctx)
        +
        Initialize this connector, using the provided ConnectorContext to notify the runtime of + input configuration changes.
        +
        +
        Parameters:
        +
        ctx - context object used to interact with the Kafka Connect runtime
        +
        +
        +
      • +
      • +
        +

        initialize

        +
        public void initialize(ConnectorContext ctx, + List<Map<String,String>> taskConfigs)
        +

        + Initialize this connector, using the provided ConnectorContext to notify the runtime of + input configuration changes and using the provided set of Task configurations. + This version is only used to recover from failures. +

        +

        + The default implementation ignores the provided Task configurations. During recovery, Kafka Connect will request + an updated set of configurations and update the running Tasks appropriately. However, Connectors should + implement special handling of this case if it will avoid unnecessary changes to running Tasks. +

        +
        +
        Parameters:
        +
        ctx - context object used to interact with the Kafka Connect runtime
        +
        taskConfigs - existing task configurations, which may be used when generating new task configs to avoid + churn in partition to task assignments
        +
        +
        +
      • +
      • +
        +

        start

        +
        public abstract void start(Map<String,String> props)
        +
        Start this Connector. This method will only be called on a clean Connector, i.e. it has + either just been instantiated and initialized or stop() has been invoked.
        +
        +
        Parameters:
        +
        props - configuration settings
        +
        +
        +
      • +
      • +
        +

        reconfigure

        +
        public void reconfigure(Map<String,String> props)
        +
        Reconfigure this Connector. Most implementations will not override this, using the default + implementation that calls stop() followed by start(Map). + Implementations only need to override this if they want to handle this process more + efficiently, e.g. without shutting down network connections to the external system.
        +
        +
        Parameters:
        +
        props - new configuration settings
        +
        +
        +
      • +
      • +
        +

        taskClass

        +
        public abstract Class<? extends Task> taskClass()
        +
        Returns the Task implementation for this Connector.
        +
        +
      • +
      • +
        +

        taskConfigs

        +
        public abstract List<Map<String,String>> taskConfigs(int maxTasks)
        +
        Returns a set of configurations for Tasks based on the current configuration, + producing at most maxTasks configurations.
        +
        +
        Parameters:
        +
        maxTasks - maximum number of configurations to generate
        +
        Returns:
        +
        configurations for Tasks
        +
        +
        +
      • +
      • +
        +

        stop

        +
        public abstract void stop()
        +
        Stop this connector.
        +
        +
      • +
      • +
        +

        validate

        +
        public Config validate(Map<String,String> connectorConfigs)
        +
        Validate the connector configuration values against configuration definitions.
        +
        +
        Parameters:
        +
        connectorConfigs - the provided configuration values
        +
        Returns:
        +
        a parsed and validated Config containing any relevant validation errors with the raw + connectorConfigs which should prevent this configuration from being used.
        +
        +
        +
      • +
      • +
        +

        config

        +
        public abstract ConfigDef config()
        +
        Define the configuration for the connector.
        +
        +
        Returns:
        +
        The ConfigDef for this connector; may not be null.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/connector/ConnectorContext.html b/static/41/javadoc/org/apache/kafka/connect/connector/ConnectorContext.html new file mode 100644 index 000000000..7c657dc7c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/connector/ConnectorContext.html @@ -0,0 +1,183 @@ + + + + +ConnectorContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ConnectorContext

    +
    +
    +
    +
    All Known Subinterfaces:
    +
    SinkConnectorContext, SourceConnectorContext
    +
    +
    +
    public interface ConnectorContext
    +
    ConnectorContext allows Connectors to proactively interact with the Kafka Connect runtime.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        requestTaskReconfiguration

        +
        void requestTaskReconfiguration()
        +
        Requests that the runtime reconfigure the Tasks for this source. This should be used to + indicate to the runtime that something about the input/output has changed (e.g. partitions + added/removed) and the running Tasks will need to be modified.
        +
        +
      • +
      • +
        +

        raiseError

        +
        void raiseError(Exception e)
        +
        Raise an unrecoverable exception to the Connect framework. This will cause the status of the + connector to transition to FAILED.
        +
        +
        Parameters:
        +
        e - Exception to be raised.
        +
        +
        +
      • +
      • +
        +

        pluginMetrics

        +
        PluginMetrics pluginMetrics()
        +
        Get a PluginMetrics that can be used to define metrics + +

        This method was added in Apache Kafka 4.1. Connectors that use this method but want to + maintain backward compatibility so they can also be deployed to older Connect runtimes + should guard the call to this method with a try-catch block, since calling this method will result in a + NoSuchMethodError or NoClassDefFoundError when the connector is deployed to + Connect runtimes older than Kafka 4.1. For example: +

        +     PluginMetrics pluginMetrics;
        +     try {
        +         pluginMetrics = context.pluginMetrics();
        +     } catch (NoSuchMethodError | NoClassDefFoundError e) {
        +         pluginMetrics = null;
        +     }
        + 
        +
        +
        Returns:
        +
        the pluginMetrics instance
        +
        Since:
        +
        4.1
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/connector/Task.html b/static/41/javadoc/org/apache/kafka/connect/connector/Task.html new file mode 100644 index 000000000..b57a9887a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/connector/Task.html @@ -0,0 +1,174 @@ + + + + +Task (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Task

    +
    +
    +
    +
    All Known Implementing Classes:
    +
    MockSinkTask, MockSourceTask, SchemaSourceTask, SinkTask, SourceTask, VerifiableSinkTask, VerifiableSourceTask
    +
    +
    +
    public interface Task
    +

    + Tasks contain the code that actually copies data to/from another system. They receive + a configuration from their parent Connector, assigning them a fraction of the connector's work. + The Kafka Connect framework then pushes/pulls data from the Task. The Task must also be able to + respond to reconfiguration requests. +

    +

    + Task only contains the minimal shared functionality between + SourceTask and + SinkTask. +

    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
      +
      Start the Task
      +
      +
      void
      + +
      +
      Stop this task.
      +
      + + +
      +
      Get the version of this task.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        version

        +
        String version()
        +
        Get the version of this task. Usually this should be the same as the corresponding Connector class's version.
        +
        +
        Returns:
        +
        the version, formatted as a String
        +
        +
        +
      • +
      • +
        +

        start

        +
        void start(Map<String,String> props)
        +
        Start the Task
        +
        +
        Parameters:
        +
        props - initial configuration
        +
        +
        +
      • +
      • +
        +

        stop

        +
        void stop()
        +
        Stop this task.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/connector/package-summary.html b/static/41/javadoc/org/apache/kafka/connect/connector/package-summary.html new file mode 100644 index 000000000..6343fd464 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/connector/package-summary.html @@ -0,0 +1,122 @@ + + + + +org.apache.kafka.connect.connector (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.connect.connector

    +
    +
    +
    package org.apache.kafka.connect.connector
    +
    +
    Provides interfaces for Connector and Task implementations.
    +
    +
    +
      +
    • + +
    • +
    • +
      +
      +
      +
      +
      Class
      +
      Description
      + +
      +
      + Connectors manage integration of Kafka Connect with another system, either as an input that ingests + data into Kafka or an output that passes data to an external system.
      +
      + +
      +
      ConnectorContext allows Connectors to proactively interact with the Kafka Connect runtime.
      +
      + +
      +
      + Base class for records containing data to be copied to/from Kafka.
      +
      + +
      +
      + Tasks contain the code that actually copies data to/from another system.
      +
      +
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/connector/package-tree.html b/static/41/javadoc/org/apache/kafka/connect/connector/package-tree.html new file mode 100644 index 000000000..61a9b51a2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/connector/package-tree.html @@ -0,0 +1,79 @@ + + + + +org.apache.kafka.connect.connector Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.connect.connector

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/connector/policy/ConnectorClientConfigOverridePolicy.html b/static/41/javadoc/org/apache/kafka/connect/connector/policy/ConnectorClientConfigOverridePolicy.html new file mode 100644 index 000000000..134709d46 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/connector/policy/ConnectorClientConfigOverridePolicy.html @@ -0,0 +1,163 @@ + + + + +ConnectorClientConfigOverridePolicy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ConnectorClientConfigOverridePolicy

    +
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Configurable
    +
    +
    +
    public interface ConnectorClientConfigOverridePolicy +extends Configurable, AutoCloseable
    +
    An interface for enforcing a policy on overriding of Kafka client configs via the connector configs. +

    + Common use cases are ability to provide principal per connector, sasl.jaas.config + and/or enforcing that the producer/consumer configurations for optimizations are within acceptable ranges. +

    Kafka Connect discovers implementations of this interface using the Java ServiceLoader mechanism. + To support this, implementations of this interface should also contain a service provider configuration file in + META-INF/services/org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy. +

    + Implement Monitorable to enable the policy to register metrics. + The following tags are automatically added to all metrics registered: config set to + connector.client.config.override.policy, and class set to the + ConnectorClientConfigOverridePolicy class name.

    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      validate(ConnectorClientConfigRequest connectorClientConfigRequest)
      +
      +
      Workers will invoke this before configuring per-connector Kafka admin, producer, and consumer client instances + to validate if all the overridden client configurations are allowed per the policy implementation.
      +
      +
      +
      +
      +
      +

      Methods inherited from interface java.lang.AutoCloseable

      +close
      +
      +

      Methods inherited from interface org.apache.kafka.common.Configurable

      +configure
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        validate

        +
        List<ConfigValue> validate(ConnectorClientConfigRequest connectorClientConfigRequest)
        +
        Workers will invoke this before configuring per-connector Kafka admin, producer, and consumer client instances + to validate if all the overridden client configurations are allowed per the policy implementation. + This would also be invoked during the validation of connector configs via the REST API. +

        + If there are any policy violations, the connector will not be started.

        +
        +
        Parameters:
        +
        connectorClientConfigRequest - an instance of ConnectorClientConfigRequest that provides the configs + to be overridden and its context; never null
        +
        Returns:
        +
        list of ConfigValue instances that describe each client configuration in the request and includes an + error if the configuration is not allowed by the policy; never null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/connector/policy/ConnectorClientConfigRequest.ClientType.html b/static/41/javadoc/org/apache/kafka/connect/connector/policy/ConnectorClientConfigRequest.ClientType.html new file mode 100644 index 000000000..50b7d8087 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/connector/policy/ConnectorClientConfigRequest.ClientType.html @@ -0,0 +1,229 @@ + + + + +ConnectorClientConfigRequest.ClientType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class ConnectorClientConfigRequest.ClientType

    +
    +
    java.lang.Object +
    java.lang.Enum<ConnectorClientConfigRequest.ClientType> +
    org.apache.kafka.connect.connector.policy.ConnectorClientConfigRequest.ClientType
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<ConnectorClientConfigRequest.ClientType>, Constable
    +
    +
    +
    Enclosing class:
    +
    ConnectorClientConfigRequest
    +
    +
    +
    public static enum ConnectorClientConfigRequest.ClientType +extends Enum<ConnectorClientConfigRequest.ClientType>
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/connector/policy/ConnectorClientConfigRequest.html b/static/41/javadoc/org/apache/kafka/connect/connector/policy/ConnectorClientConfigRequest.html new file mode 100644 index 000000000..495dc29dd --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/connector/policy/ConnectorClientConfigRequest.html @@ -0,0 +1,269 @@ + + + + +ConnectorClientConfigRequest (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConnectorClientConfigRequest

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.connector.policy.ConnectorClientConfigRequest
    +
    +
    +
    +
    public class ConnectorClientConfigRequest +extends Object
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/connector/policy/package-summary.html b/static/41/javadoc/org/apache/kafka/connect/connector/policy/package-summary.html new file mode 100644 index 000000000..523108cea --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/connector/policy/package-summary.html @@ -0,0 +1,113 @@ + + + + +org.apache.kafka.connect.connector.policy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.connect.connector.policy

    +
    +
    +
    package org.apache.kafka.connect.connector.policy
    +
    +
    Provides pluggable interfaces for policies controlling how users can configure connectors. + For example, the + ConnectorClientConfigOverridePolicy + interface can be used to control which Kafka client properties can be overridden on a per-connector basis.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/connector/policy/package-tree.html b/static/41/javadoc/org/apache/kafka/connect/connector/policy/package-tree.html new file mode 100644 index 000000000..d18386f46 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/connector/policy/package-tree.html @@ -0,0 +1,100 @@ + + + + +org.apache.kafka.connect.connector.policy Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.connect.connector.policy

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/data/ConnectSchema.html b/static/41/javadoc/org/apache/kafka/connect/data/ConnectSchema.html new file mode 100644 index 000000000..3d5538a57 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/data/ConnectSchema.html @@ -0,0 +1,545 @@ + + + + +ConnectSchema (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConnectSchema

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.data.ConnectSchema
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Schema
    +
    +
    +
    public class ConnectSchema +extends Object +implements Schema
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ConnectSchema

        +
        public ConnectSchema(Schema.Type type, + boolean optional, + Object defaultValue, + String name, + Integer version, + String doc, + Map<String,String> parameters, + List<Field> fields, + Schema keySchema, + Schema valueSchema)
        +
        Construct a Schema. Most users should not construct schemas manually, preferring SchemaBuilder instead.
        +
        +
      • +
      • +
        +

        ConnectSchema

        +
        public ConnectSchema(Schema.Type type, + boolean optional, + Object defaultValue, + String name, + Integer version, + String doc)
        +
        Construct a Schema for a primitive type, setting schema parameters, struct fields, and key and value schemas to null.
        +
        +
      • +
      • +
        +

        ConnectSchema

        +
        public ConnectSchema(Schema.Type type)
        +
        Construct a default schema for a primitive type. The schema is required, has no default value, name, version, + or documentation.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        type

        +
        public Schema.Type type()
        +
        +
        Specified by:
        +
        type in interface Schema
        +
        Returns:
        +
        the type of this schema
        +
        +
        +
      • +
      • +
        +

        isOptional

        +
        public boolean isOptional()
        +
        +
        Specified by:
        +
        isOptional in interface Schema
        +
        Returns:
        +
        true if this field is optional, false otherwise
        +
        +
        +
      • +
      • +
        +

        defaultValue

        +
        public Object defaultValue()
        +
        +
        Specified by:
        +
        defaultValue in interface Schema
        +
        Returns:
        +
        the default value for this schema
        +
        +
        +
      • +
      • +
        +

        name

        +
        public String name()
        +
        +
        Specified by:
        +
        name in interface Schema
        +
        Returns:
        +
        the name of this schema
        +
        +
        +
      • +
      • +
        +

        version

        +
        public Integer version()
        +
        Description copied from interface: Schema
        +
        Get the optional version of the schema. If a version is included, newer versions must be larger than older ones.
        +
        +
        Specified by:
        +
        version in interface Schema
        +
        Returns:
        +
        the version of this schema
        +
        +
        +
      • +
      • +
        +

        doc

        +
        public String doc()
        +
        +
        Specified by:
        +
        doc in interface Schema
        +
        Returns:
        +
        the documentation for this schema
        +
        +
        +
      • +
      • +
        +

        parameters

        +
        public Map<String,String> parameters()
        +
        Description copied from interface: Schema
        +
        Get a map of schema parameters.
        +
        +
        Specified by:
        +
        parameters in interface Schema
        +
        Returns:
        +
        Map containing parameters for this schema, or null if there are no parameters
        +
        +
        +
      • +
      • +
        +

        fields

        +
        public List<Field> fields()
        +
        Description copied from interface: Schema
        +
        Get the list of Fields for this Schema. Throws a DataException if this schema is not a + Schema.Type.STRUCT.
        +
        +
        Specified by:
        +
        fields in interface Schema
        +
        Returns:
        +
        the list of fields for this Schema
        +
        +
        +
      • +
      • +
        +

        field

        +
        public Field field(String fieldName)
        +
        Description copied from interface: Schema
        +
        Get a Field for this Schema by name. Throws a DataException if this schema is not a + Schema.Type.STRUCT.
        +
        +
        Specified by:
        +
        field in interface Schema
        +
        Parameters:
        +
        fieldName - the name of the field to look up
        +
        Returns:
        +
        the Field object for the specified field, or null if there is no field with the given name
        +
        +
        +
      • +
      • +
        +

        keySchema

        +
        public Schema keySchema()
        +
        Description copied from interface: Schema
        +
        Get the key schema for this map schema. Throws a DataException if this schema is not a map.
        +
        +
        Specified by:
        +
        keySchema in interface Schema
        +
        Returns:
        +
        the key schema
        +
        +
        +
      • +
      • +
        +

        valueSchema

        +
        public Schema valueSchema()
        +
        Description copied from interface: Schema
        +
        Get the value schema for this map or array schema. Throws a DataException if this schema is not a map or array.
        +
        +
        Specified by:
        +
        valueSchema in interface Schema
        +
        Returns:
        +
        the value schema
        +
        +
        +
      • +
      • +
        +

        validateValue

        +
        public static void validateValue(Schema schema, + Object value)
        +
        Validate that the value can be used with the schema, i.e. that its type matches the schema type and nullability + requirements. Throws a DataException if the value is invalid.
        +
        +
        Parameters:
        +
        schema - Schema to test
        +
        value - value to test
        +
        +
        +
      • +
      • +
        +

        validateValue

        +
        public static void validateValue(String field, + Schema schema, + Object value)
        +
        +
      • +
      • +
        +

        validateValue

        +
        public void validateValue(Object value)
        +
        Validate that the value can be used for this schema, i.e. that its type matches the schema type and optional + requirements. Throws a DataException if the value is invalid.
        +
        +
        Parameters:
        +
        value - the value to validate
        +
        +
        +
      • +
      • +
        +

        schema

        +
        public ConnectSchema schema()
        +
        Description copied from interface: Schema
        +
        Return a concrete instance of the Schema
        +
        +
        Specified by:
        +
        schema in interface Schema
        +
        Returns:
        +
        the Schema
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        schemaType

        +
        public static Schema.Type schemaType(Class<?> klass)
        +
        Get the Schema.Type associated with the given class.
        +
        +
        Parameters:
        +
        klass - the Class whose associated schema type is to be returned
        +
        Returns:
        +
        the corresponding type, or null if there is no matching type
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/data/Date.html b/static/41/javadoc/org/apache/kafka/connect/data/Date.html new file mode 100644 index 000000000..707c2b944 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/data/Date.html @@ -0,0 +1,258 @@ + + + + +Date (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Date

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.data.Date
    +
    +
    +
    +
    public class Date +extends Object
    +

    + A date representing a calendar day with no time of day or timezone. The corresponding Java type is a Date + with hours, minutes, seconds, milliseconds set to 0. The underlying representation is an integer representing the + number of standardized days (based on a number of milliseconds with 24 hours/day, 60 minutes/hour, 60 seconds/minute, + 1000 milliseconds/second with n) since Unix epoch. +

    +
    +
    +
      + +
    • +
      +

      Field Summary

      +
      Fields
      +
      +
      Modifier and Type
      +
      Field
      +
      Description
      +
      static final String
      + +
       
      +
      static final Schema
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Returns a SchemaBuilder for a Date.
      +
      +
      static int
      +
      fromLogical(Schema schema, + Date value)
      +
      +
      Convert a value from its logical format (Date) to its encoded format (int).
      +
      +
      static Date
      +
      toLogical(Schema schema, + int value)
      +
      +
      Convert a value from its encoded format (int) to its logical format (Date).
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        LOGICAL_NAME

        +
        public static final String LOGICAL_NAME
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SCHEMA

        +
        public static final Schema SCHEMA
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Date

        +
        public Date()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        builder

        +
        public static SchemaBuilder builder()
        +
        Returns a SchemaBuilder for a Date. By returning a SchemaBuilder you can override additional schema settings such + as required/optional, default value, and documentation.
        +
        +
        Returns:
        +
        a SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        fromLogical

        +
        public static int fromLogical(Schema schema, + Date value)
        +
        Convert a value from its logical format (Date) to its encoded format (int).
        +
        +
        Parameters:
        +
        value - the logical value
        +
        Returns:
        +
        the encoded value
        +
        +
        +
      • +
      • +
        +

        toLogical

        +
        public static Date toLogical(Schema schema, + int value)
        +
        Convert a value from its encoded format (int) to its logical format (Date).
        +
        +
        Parameters:
        +
        value - the encoded value
        +
        Returns:
        +
        the logical value
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/data/Decimal.html b/static/41/javadoc/org/apache/kafka/connect/data/Decimal.html new file mode 100644 index 000000000..879bf6771 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/data/Decimal.html @@ -0,0 +1,285 @@ + + + + +Decimal (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Decimal

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.data.Decimal
    +
    +
    +
    +
    public class Decimal +extends Object
    +

    + An arbitrary-precision signed decimal number. The value is unscaled * 10 ^ -scale where: +

      +
    • unscaled is an integer
    • +
    • scale is an integer representing how many digits the decimal point should be shifted on the unscaled value
    • +
    +

    +

    + Decimal does not provide a fixed schema because it is parameterized by the scale, which is fixed on the schema + rather than being part of the value. +

    +

    + The underlying representation of this type is bytes containing a two's complement integer +

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Decimal

        +
        public Decimal()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        builder

        +
        public static SchemaBuilder builder(int scale)
        +
        Returns a SchemaBuilder for a Decimal with the given scale factor. By returning a SchemaBuilder you can override + additional schema settings such as required/optional, default value, and documentation.
        +
        +
        Parameters:
        +
        scale - the scale factor to apply to unscaled values
        +
        Returns:
        +
        a SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        schema

        +
        public static Schema schema(int scale)
        +
        +
      • +
      • +
        +

        fromLogical

        +
        public static byte[] fromLogical(Schema schema, + BigDecimal value)
        +
        Convert a value from its logical format (BigDecimal) to its encoded format (byte[]).
        +
        +
        Parameters:
        +
        value - the logical value
        +
        Returns:
        +
        the encoded value
        +
        +
        +
      • +
      • +
        +

        toLogical

        +
        public static BigDecimal toLogical(Schema schema, + byte[] value)
        +
        Convert a value from its encoded format (byte[]) to its logical format (BigDecimal).
        +
        +
        Parameters:
        +
        value - the encoded value
        +
        Returns:
        +
        the logical value
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/data/Field.html b/static/41/javadoc/org/apache/kafka/connect/data/Field.html new file mode 100644 index 000000000..c5c52bac2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/data/Field.html @@ -0,0 +1,243 @@ + + + + +Field (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Field

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.data.Field
    +
    +
    +
    +
    public class Field +extends Object
    +

    + A field in a Struct, consisting of a field name, index, and Schema for the field value. +

    +
    +
    +
      + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      +
      Field(String name, + int index, + Schema schema)
      +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      boolean
      + +
       
      +
      int
      + +
       
      +
      int
      + +
      +
      Get the index of this field within the struct.
      +
      + + +
      +
      Get the name of this field.
      +
      + + +
      +
      Get the schema of this field
      +
      + + +
       
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +getClass, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Field

        +
        public Field(String name, + int index, + Schema schema)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        name

        +
        public String name()
        +
        Get the name of this field.
        +
        +
        Returns:
        +
        the name of this field
        +
        +
        +
      • +
      • +
        +

        index

        +
        public int index()
        +
        Get the index of this field within the struct.
        +
        +
        Returns:
        +
        the index of this field
        +
        +
        +
      • +
      • +
        +

        schema

        +
        public Schema schema()
        +
        Get the schema of this field
        +
        +
        Returns:
        +
        the schema of values of this field
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/data/Schema.Type.html b/static/41/javadoc/org/apache/kafka/connect/data/Schema.Type.html new file mode 100644 index 000000000..0a5b5ad54 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/data/Schema.Type.html @@ -0,0 +1,368 @@ + + + + +Schema.Type (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class Schema.Type

    +
    +
    java.lang.Object +
    java.lang.Enum<Schema.Type> +
    org.apache.kafka.connect.data.Schema.Type
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<Schema.Type>, Constable
    +
    +
    +
    Enclosing interface:
    +
    Schema
    +
    +
    +
    public static enum Schema.Type +extends Enum<Schema.Type>
    +
    The type of a schema. These only include the core types; logical types must be determined by checking the schema name.
    +
    +
    +
      + +
    • +
      +

      Nested Class Summary

      +
      +

      Nested classes/interfaces inherited from class java.lang.Enum

      +Enum.EnumDesc<E extends Enum<E>>
      +
      +
    • + +
    • +
      +

      Enum Constant Summary

      +
      Enum Constants
      +
      +
      Enum Constant
      +
      Description
      + +
      +
      An ordered sequence of elements, each of which shares the same type.
      +
      + +
      +
      Boolean value (true or false)
      +
      + +
      +
      Sequence of unsigned 8-bit bytes
      +
      + +
      +
      32-bit IEEE 754 floating point number
      +
      + +
      +
      64-bit IEEE 754 floating point number
      +
      + +
      +
      16-bit signed integer
      +
      + +
      +
      32-bit signed integer
      +
      + +
      +
      64-bit signed integer
      +
      + +
      +
      8-bit signed integer
      +
      + +
      +
      A mapping from keys to values.
      +
      + +
      +
      Character string that supports all Unicode characters.
      +
      + +
      +
      A structured record containing a set of named fields, each field using a fixed, independent Schema.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
       
      +
      boolean
      + +
       
      + + +
      +
      Returns the enum constant of this class with the specified name.
      +
      +
      static Schema.Type[]
      + +
      +
      Returns an array containing the constants of this enum class, in +the order they are declared.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Enum

      +compareTo, describeConstable, equals, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
      +
      +

      Methods inherited from class java.lang.Object

      +getClass, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      +
        +
      • +
        +

        INT8

        +
        public static final Schema.Type INT8
        +
        8-bit signed integer +

        + Note that if you have an unsigned 8-bit data source, INT16 will be required to safely capture all valid values

        +
        +
      • +
      • +
        +

        INT16

        +
        public static final Schema.Type INT16
        +
        16-bit signed integer +

        + Note that if you have an unsigned 16-bit data source, INT32 will be required to safely capture all valid values

        +
        +
      • +
      • +
        +

        INT32

        +
        public static final Schema.Type INT32
        +
        32-bit signed integer +

        + Note that if you have an unsigned 32-bit data source, INT64 will be required to safely capture all valid values

        +
        +
      • +
      • +
        +

        INT64

        +
        public static final Schema.Type INT64
        +
        64-bit signed integer +

        + Note that if you have an unsigned 64-bit data source, the Decimal logical type (encoded as BYTES) + will be required to safely capture all valid values

        +
        +
      • +
      • +
        +

        FLOAT32

        +
        public static final Schema.Type FLOAT32
        +
        32-bit IEEE 754 floating point number
        +
        +
      • +
      • +
        +

        FLOAT64

        +
        public static final Schema.Type FLOAT64
        +
        64-bit IEEE 754 floating point number
        +
        +
      • +
      • +
        +

        BOOLEAN

        +
        public static final Schema.Type BOOLEAN
        +
        Boolean value (true or false)
        +
        +
      • +
      • +
        +

        STRING

        +
        public static final Schema.Type STRING
        +
        Character string that supports all Unicode characters. +

        + Note that this does not imply any specific encoding (e.g. UTF-8) as this is an in-memory representation.

        +
        +
      • +
      • +
        +

        BYTES

        +
        public static final Schema.Type BYTES
        +
        Sequence of unsigned 8-bit bytes
        +
        +
      • +
      • +
        +

        ARRAY

        +
        public static final Schema.Type ARRAY
        +
        An ordered sequence of elements, each of which shares the same type.
        +
        +
      • +
      • +
        +

        MAP

        +
        public static final Schema.Type MAP
        +
        A mapping from keys to values. Both keys and values can be arbitrarily complex types, including complex types + such as Struct.
        +
        +
      • +
      • +
        +

        STRUCT

        +
        public static final Schema.Type STRUCT
        +
        A structured record containing a set of named fields, each field using a fixed, independent Schema.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static Schema.Type[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static Schema.Type valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        getName

        +
        public String getName()
        +
        +
      • +
      • +
        +

        isPrimitive

        +
        public boolean isPrimitive()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/data/Schema.html b/static/41/javadoc/org/apache/kafka/connect/data/Schema.html new file mode 100644 index 000000000..6d0b3663d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/data/Schema.html @@ -0,0 +1,515 @@ + + + + +Schema (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Schema

    +
    +
    +
    +
    All Known Implementing Classes:
    +
    ConnectSchema, SchemaBuilder
    +
    +
    +
    public interface Schema
    +

    + Definition of an abstract data type. Data types can be primitive types (integer types, floating point types, + boolean, strings, and bytes) or complex types (typed arrays, maps with one key schema and value schema, + and structs that have a fixed set of field names each with an associated value schema). Any type can be specified + as optional, allowing it to be omitted (resulting in null values when it is missing) and can specify a default + value. +

    +

    + All schemas may have some associated metadata: a name, version, and documentation. These are all considered part + of the schema itself and included when comparing schemas. Besides adding important metadata, these fields enable + the specification of logical types that specify additional constraints and semantics (e.g. UNIX timestamps are + just an int64, but the user needs the know about the additional semantics to interpret it properly). +

    +

    + Schemas can be created directly, but in most cases using SchemaBuilder will be simpler. +

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        INT8_SCHEMA

        +
        static final Schema INT8_SCHEMA
        +
        +
      • +
      • +
        +

        INT16_SCHEMA

        +
        static final Schema INT16_SCHEMA
        +
        +
      • +
      • +
        +

        INT32_SCHEMA

        +
        static final Schema INT32_SCHEMA
        +
        +
      • +
      • +
        +

        INT64_SCHEMA

        +
        static final Schema INT64_SCHEMA
        +
        +
      • +
      • +
        +

        FLOAT32_SCHEMA

        +
        static final Schema FLOAT32_SCHEMA
        +
        +
      • +
      • +
        +

        FLOAT64_SCHEMA

        +
        static final Schema FLOAT64_SCHEMA
        +
        +
      • +
      • +
        +

        BOOLEAN_SCHEMA

        +
        static final Schema BOOLEAN_SCHEMA
        +
        +
      • +
      • +
        +

        STRING_SCHEMA

        +
        static final Schema STRING_SCHEMA
        +
        +
      • +
      • +
        +

        BYTES_SCHEMA

        +
        static final Schema BYTES_SCHEMA
        +
        +
      • +
      • +
        +

        OPTIONAL_INT8_SCHEMA

        +
        static final Schema OPTIONAL_INT8_SCHEMA
        +
        +
      • +
      • +
        +

        OPTIONAL_INT16_SCHEMA

        +
        static final Schema OPTIONAL_INT16_SCHEMA
        +
        +
      • +
      • +
        +

        OPTIONAL_INT32_SCHEMA

        +
        static final Schema OPTIONAL_INT32_SCHEMA
        +
        +
      • +
      • +
        +

        OPTIONAL_INT64_SCHEMA

        +
        static final Schema OPTIONAL_INT64_SCHEMA
        +
        +
      • +
      • +
        +

        OPTIONAL_FLOAT32_SCHEMA

        +
        static final Schema OPTIONAL_FLOAT32_SCHEMA
        +
        +
      • +
      • +
        +

        OPTIONAL_FLOAT64_SCHEMA

        +
        static final Schema OPTIONAL_FLOAT64_SCHEMA
        +
        +
      • +
      • +
        +

        OPTIONAL_BOOLEAN_SCHEMA

        +
        static final Schema OPTIONAL_BOOLEAN_SCHEMA
        +
        +
      • +
      • +
        +

        OPTIONAL_STRING_SCHEMA

        +
        static final Schema OPTIONAL_STRING_SCHEMA
        +
        +
      • +
      • +
        +

        OPTIONAL_BYTES_SCHEMA

        +
        static final Schema OPTIONAL_BYTES_SCHEMA
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        type

        +
        Schema.Type type()
        +
        +
        Returns:
        +
        the type of this schema
        +
        +
        +
      • +
      • +
        +

        isOptional

        +
        boolean isOptional()
        +
        +
        Returns:
        +
        true if this field is optional, false otherwise
        +
        +
        +
      • +
      • +
        +

        defaultValue

        +
        Object defaultValue()
        +
        +
        Returns:
        +
        the default value for this schema
        +
        +
        +
      • +
      • +
        +

        name

        +
        String name()
        +
        +
        Returns:
        +
        the name of this schema
        +
        +
        +
      • +
      • +
        +

        version

        +
        Integer version()
        +
        Get the optional version of the schema. If a version is included, newer versions must be larger than older ones.
        +
        +
        Returns:
        +
        the version of this schema
        +
        +
        +
      • +
      • +
        +

        doc

        +
        String doc()
        +
        +
        Returns:
        +
        the documentation for this schema
        +
        +
        +
      • +
      • +
        +

        parameters

        +
        Map<String,String> parameters()
        +
        Get a map of schema parameters.
        +
        +
        Returns:
        +
        Map containing parameters for this schema, or null if there are no parameters
        +
        +
        +
      • +
      • +
        +

        keySchema

        +
        Schema keySchema()
        +
        Get the key schema for this map schema. Throws a DataException if this schema is not a map.
        +
        +
        Returns:
        +
        the key schema
        +
        +
        +
      • +
      • +
        +

        valueSchema

        +
        Schema valueSchema()
        +
        Get the value schema for this map or array schema. Throws a DataException if this schema is not a map or array.
        +
        +
        Returns:
        +
        the value schema
        +
        +
        +
      • +
      • +
        +

        fields

        +
        List<Field> fields()
        +
        Get the list of Fields for this Schema. Throws a DataException if this schema is not a + Schema.Type.STRUCT.
        +
        +
        Returns:
        +
        the list of fields for this Schema
        +
        +
        +
      • +
      • +
        +

        field

        +
        Field field(String fieldName)
        +
        Get a Field for this Schema by name. Throws a DataException if this schema is not a + Schema.Type.STRUCT.
        +
        +
        Parameters:
        +
        fieldName - the name of the field to look up
        +
        Returns:
        +
        the Field object for the specified field, or null if there is no field with the given name
        +
        +
        +
      • +
      • +
        +

        schema

        +
        Schema schema()
        +
        Return a concrete instance of the Schema
        +
        +
        Returns:
        +
        the Schema
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/data/SchemaAndValue.html b/static/41/javadoc/org/apache/kafka/connect/data/SchemaAndValue.html new file mode 100644 index 000000000..e4e24608a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/data/SchemaAndValue.html @@ -0,0 +1,238 @@ + + + + +SchemaAndValue (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SchemaAndValue

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.data.SchemaAndValue
    +
    +
    +
    +
    public class SchemaAndValue +extends Object
    +
    A composite containing a Schema and associated value
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SchemaAndValue

        +
        public SchemaAndValue(Schema schema, + Object value)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        schema

        +
        public Schema schema()
        +
        +
      • +
      • +
        +

        value

        +
        public Object value()
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/data/SchemaBuilder.html b/static/41/javadoc/org/apache/kafka/connect/data/SchemaBuilder.html new file mode 100644 index 000000000..83f7998ef --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/data/SchemaBuilder.html @@ -0,0 +1,772 @@ + + + + +SchemaBuilder (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SchemaBuilder

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.data.SchemaBuilder
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Schema
    +
    +
    +
    public class SchemaBuilder +extends Object +implements Schema
    +

    + SchemaBuilder provides a fluent API for constructing Schema objects. It allows you to set each of the + properties for the schema and each call returns the SchemaBuilder so the calls can be chained. When nested types + are required, use one of the predefined schemas from Schema or use a second SchemaBuilder inline. +

    +

    + Here is an example of building a struct schema: +

    +     Schema dateSchema = SchemaBuilder.struct()
    +         .name("com.example.CalendarDate").version(2).doc("A calendar date including month, day, and year.")
    +         .field("month", Schema.STRING_SCHEMA)
    +         .field("day", Schema.INT8_SCHEMA)
    +         .field("year", Schema.INT16_SCHEMA)
    +         .build();
    +     
    +

    +

    + Here is an example of using a second SchemaBuilder to construct complex, nested types: +

    +     Schema userListSchema = SchemaBuilder.array(
    +         SchemaBuilder.struct().name("com.example.User").field("username", Schema.STRING_SCHEMA).field("id", Schema.INT64_SCHEMA).build()
    +     ).build();
    +     
    +

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SchemaBuilder

        +
        public SchemaBuilder(Schema.Type type)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        isOptional

        +
        public boolean isOptional()
        +
        +
        Specified by:
        +
        isOptional in interface Schema
        +
        Returns:
        +
        true if this field is optional, false otherwise
        +
        +
        +
      • +
      • +
        +

        optional

        +
        public SchemaBuilder optional()
        +
        Set this schema as optional.
        +
        +
        Returns:
        +
        the SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        required

        +
        public SchemaBuilder required()
        +
        Set this schema as required. This is the default, but this method can be used to make this choice explicit.
        +
        +
        Returns:
        +
        the SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        defaultValue

        +
        public Object defaultValue()
        +
        +
        Specified by:
        +
        defaultValue in interface Schema
        +
        Returns:
        +
        the default value for this schema
        +
        +
        +
      • +
      • +
        +

        defaultValue

        +
        public SchemaBuilder defaultValue(Object value)
        +
        Set the default value for this schema. The value is validated against the schema type, throwing a + SchemaBuilderException if it does not match.
        +
        +
        Parameters:
        +
        value - the default value
        +
        Returns:
        +
        the SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        name

        +
        public String name()
        +
        +
        Specified by:
        +
        name in interface Schema
        +
        Returns:
        +
        the name of this schema
        +
        +
        +
      • +
      • +
        +

        name

        +
        public SchemaBuilder name(String name)
        +
        Set the name of this schema.
        +
        +
        Parameters:
        +
        name - the schema name
        +
        Returns:
        +
        the SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        version

        +
        public Integer version()
        +
        Description copied from interface: Schema
        +
        Get the optional version of the schema. If a version is included, newer versions must be larger than older ones.
        +
        +
        Specified by:
        +
        version in interface Schema
        +
        Returns:
        +
        the version of this schema
        +
        +
        +
      • +
      • +
        +

        version

        +
        public SchemaBuilder version(Integer version)
        +
        Set the version of this schema. Schema versions are integers which, if provided, must indicate which schema is + newer and which is older by their ordering.
        +
        +
        Parameters:
        +
        version - the schema version
        +
        Returns:
        +
        the SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        doc

        +
        public String doc()
        +
        +
        Specified by:
        +
        doc in interface Schema
        +
        Returns:
        +
        the documentation for this schema
        +
        +
        +
      • +
      • +
        +

        doc

        +
        public SchemaBuilder doc(String doc)
        +
        Set the documentation for this schema.
        +
        +
        Parameters:
        +
        doc - the documentation
        +
        Returns:
        +
        the SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        parameters

        +
        public Map<String,String> parameters()
        +
        Description copied from interface: Schema
        +
        Get a map of schema parameters.
        +
        +
        Specified by:
        +
        parameters in interface Schema
        +
        Returns:
        +
        Map containing parameters for this schema, or null if there are no parameters
        +
        +
        +
      • +
      • +
        +

        parameter

        +
        public SchemaBuilder parameter(String propertyName, + String propertyValue)
        +
        Set a schema parameter.
        +
        +
        Parameters:
        +
        propertyName - name of the schema property to define
        +
        propertyValue - value of the schema property to define, as a String
        +
        Returns:
        +
        the SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        parameters

        +
        public SchemaBuilder parameters(Map<String,String> props)
        +
        Set schema parameters. This operation is additive; it does not remove existing parameters that do not appear in + the set of properties pass to this method.
        +
        +
        Parameters:
        +
        props - Map of properties to set
        +
        Returns:
        +
        the SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        type

        +
        public Schema.Type type()
        +
        +
        Specified by:
        +
        type in interface Schema
        +
        Returns:
        +
        the type of this schema
        +
        +
        +
      • +
      • +
        +

        type

        +
        public static SchemaBuilder type(Schema.Type type)
        +
        Create a SchemaBuilder for the specified type. +

        + Usually it will be simpler to use one of the variants like string() or struct(), but this form + can be useful when generating schemas dynamically.

        +
        +
        Parameters:
        +
        type - the schema type
        +
        Returns:
        +
        a new SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        int8

        +
        public static SchemaBuilder int8()
        +
        +
        Returns:
        +
        a new Schema.Type.INT8 SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        int16

        +
        public static SchemaBuilder int16()
        +
        +
        Returns:
        +
        a new Schema.Type.INT16 SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        int32

        +
        public static SchemaBuilder int32()
        +
        +
        Returns:
        +
        a new Schema.Type.INT32 SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        int64

        +
        public static SchemaBuilder int64()
        +
        +
        Returns:
        +
        a new Schema.Type.INT64 SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        float32

        +
        public static SchemaBuilder float32()
        +
        +
        Returns:
        +
        a new Schema.Type.FLOAT32 SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        float64

        +
        public static SchemaBuilder float64()
        +
        +
        Returns:
        +
        a new Schema.Type.FLOAT64 SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        bool

        +
        public static SchemaBuilder bool()
        +
        +
        Returns:
        +
        a new Schema.Type.BOOLEAN SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        string

        +
        public static SchemaBuilder string()
        +
        +
        Returns:
        +
        a new Schema.Type.STRING SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        bytes

        +
        public static SchemaBuilder bytes()
        +
        +
        Returns:
        +
        a new Schema.Type.BYTES SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        struct

        +
        public static SchemaBuilder struct()
        +
        +
        Returns:
        +
        a new Schema.Type.STRUCT SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        field

        +
        public SchemaBuilder field(String fieldName, + Schema fieldSchema)
        +
        Add a field to this Schema.Type.STRUCT schema. Throws a SchemaBuilderException if this is not a struct schema.
        +
        +
        Parameters:
        +
        fieldName - the name of the field to add
        +
        fieldSchema - the Schema for the field's value
        +
        Returns:
        +
        the SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        fields

        +
        public List<Field> fields()
        +
        Get the list of fields for this Schema. Throws a DataException if this schema is not a Schema.Type.STRUCT.
        +
        +
        Specified by:
        +
        fields in interface Schema
        +
        Returns:
        +
        the list of fields for this Schema
        +
        +
        +
      • +
      • +
        +

        field

        +
        public Field field(String fieldName)
        +
        Description copied from interface: Schema
        +
        Get a Field for this Schema by name. Throws a DataException if this schema is not a + Schema.Type.STRUCT.
        +
        +
        Specified by:
        +
        field in interface Schema
        +
        Parameters:
        +
        fieldName - the name of the field to look up
        +
        Returns:
        +
        the Field object for the specified field, or null if there is no field with the given name
        +
        +
        +
      • +
      • +
        +

        array

        +
        public static SchemaBuilder array(Schema valueSchema)
        +
        +
        Parameters:
        +
        valueSchema - the schema for elements of the array
        +
        Returns:
        +
        a new Schema.Type.ARRAY SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        map

        +
        public static SchemaBuilder map(Schema keySchema, + Schema valueSchema)
        +
        +
        Parameters:
        +
        keySchema - the schema for keys in the map
        +
        valueSchema - the schema for values in the map
        +
        Returns:
        +
        a new Schema.Type.MAP SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        keySchema

        +
        public Schema keySchema()
        +
        Description copied from interface: Schema
        +
        Get the key schema for this map schema. Throws a DataException if this schema is not a map.
        +
        +
        Specified by:
        +
        keySchema in interface Schema
        +
        Returns:
        +
        the key schema
        +
        +
        +
      • +
      • +
        +

        valueSchema

        +
        public Schema valueSchema()
        +
        Description copied from interface: Schema
        +
        Get the value schema for this map or array schema. Throws a DataException if this schema is not a map or array.
        +
        +
        Specified by:
        +
        valueSchema in interface Schema
        +
        Returns:
        +
        the value schema
        +
        +
        +
      • +
      • +
        +

        build

        +
        public Schema build()
        +
        Build the Schema using the current settings
        +
        +
        Returns:
        +
        the Schema
        +
        +
        +
      • +
      • +
        +

        schema

        +
        public Schema schema()
        +
        Return a concrete instance of the Schema specified by this builder
        +
        +
        Specified by:
        +
        schema in interface Schema
        +
        Returns:
        +
        the Schema
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/data/SchemaProjector.html b/static/41/javadoc/org/apache/kafka/connect/data/SchemaProjector.html new file mode 100644 index 000000000..6107ac85a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/data/SchemaProjector.html @@ -0,0 +1,180 @@ + + + + +SchemaProjector (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SchemaProjector

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.data.SchemaProjector
    +
    +
    +
    +
    public class SchemaProjector +extends Object
    +

    + SchemaProjector is a utility to project a value between compatible schemas and throw exceptions + when non compatible schemas are provided. +

    +
    +
    +
      + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      static Object
      +
      project(Schema source, + Object record, + Schema target)
      +
      +
      This method projects a value between compatible schemas and throws exceptions when non-compatible schemas are provided
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SchemaProjector

        +
        public SchemaProjector()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        project

        +
        public static Object project(Schema source, + Object record, + Schema target) + throws SchemaProjectorException
        +
        This method projects a value between compatible schemas and throws exceptions when non-compatible schemas are provided
        +
        +
        Parameters:
        +
        source - the schema used to construct the record
        +
        record - the value to project from source schema to target schema
        +
        target - the schema to project the record to
        +
        Returns:
        +
        the projected value with target schema
        +
        Throws:
        +
        SchemaProjectorException - if the target schema is not optional and does not have a default value
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/data/Struct.html b/static/41/javadoc/org/apache/kafka/connect/data/Struct.html new file mode 100644 index 000000000..531b5cb12 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/data/Struct.html @@ -0,0 +1,488 @@ + + + + +Struct (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Struct

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.data.Struct
    +
    +
    +
    +
    public class Struct +extends Object
    +

    + A structured record containing a set of named fields with values, each field using an independent Schema. + Struct objects must specify a complete Schema up front, and only fields specified in the Schema may be set. +

    +

    + The Struct's put(String, Object) method returns the Struct itself to provide a fluent API for constructing + complete objects: +

    +         Schema schema = SchemaBuilder.struct().name("com.example.Person")
    +             .field("name", Schema.STRING_SCHEMA).field("age", Schema.INT32_SCHEMA).build()
    +         Struct struct = new Struct(schema).put("name", "Bobby McGee").put("age", 21)
    +     
    +

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Struct

        +
        public Struct(Schema schema)
        +
        Create a new Struct for this Schema
        +
        +
        Parameters:
        +
        schema - the Schema for the Struct
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        schema

        +
        public Schema schema()
        +
        Get the schema for this Struct.
        +
        +
        Returns:
        +
        the Struct's schema
        +
        +
        +
      • +
      • +
        +

        get

        +
        public Object get(String fieldName)
        +
        Get the value of a field, returning the default value if no value has been set yet and a default value is specified + in the field's schema. Because this handles fields of all types, the value is returned as an Object and + must be cast to a more specific type.
        +
        +
        Parameters:
        +
        fieldName - the field name to lookup
        +
        Returns:
        +
        the value for the field
        +
        +
        +
      • +
      • +
        +

        get

        +
        public Object get(Field field)
        +
        Get the value of a field, returning the default value if no value has been set yet and a default value is specified + in the field's schema. Because this handles fields of all types, the value is returned as an Object and + must be cast to a more specific type.
        +
        +
        Parameters:
        +
        field - the field to lookup
        +
        Returns:
        +
        the value for the field
        +
        +
        +
      • +
      • +
        +

        getWithoutDefault

        +
        public Object getWithoutDefault(String fieldName)
        +
        Get the underlying raw value for the field without accounting for default values.
        +
        +
        Parameters:
        +
        fieldName - the field to get the value of
        +
        Returns:
        +
        the raw value
        +
        +
        +
      • +
      • +
        +

        getInt8

        +
        public Byte getInt8(String fieldName)
        +
        Equivalent to calling get(String) and casting the result to a Byte.
        +
        +
      • +
      • +
        +

        getInt16

        +
        public Short getInt16(String fieldName)
        +
        Equivalent to calling get(String) and casting the result to a Short.
        +
        +
      • +
      • +
        +

        getInt32

        +
        public Integer getInt32(String fieldName)
        +
        Equivalent to calling get(String) and casting the result to an Integer.
        +
        +
      • +
      • +
        +

        getInt64

        +
        public Long getInt64(String fieldName)
        +
        Equivalent to calling get(String) and casting the result to a Long.
        +
        +
      • +
      • +
        +

        getFloat32

        +
        public Float getFloat32(String fieldName)
        +
        Equivalent to calling get(String) and casting the result to a Float.
        +
        +
      • +
      • +
        +

        getFloat64

        +
        public Double getFloat64(String fieldName)
        +
        Equivalent to calling get(String) and casting the result to a Double.
        +
        +
      • +
      • +
        +

        getBoolean

        +
        public Boolean getBoolean(String fieldName)
        +
        Equivalent to calling get(String) and casting the result to a Boolean.
        +
        +
      • +
      • +
        +

        getString

        +
        public String getString(String fieldName)
        +
        Equivalent to calling get(String) and casting the result to a String.
        +
        +
      • +
      • +
        +

        getBytes

        +
        public byte[] getBytes(String fieldName)
        +
        Equivalent to calling get(String) and casting the result to a byte[].
        +
        +
      • +
      • +
        +

        getArray

        +
        public <T> List<T> getArray(String fieldName)
        +
        Equivalent to calling get(String) and casting the result to a List.
        +
        +
      • +
      • +
        +

        getMap

        +
        public <K, +V> Map<K,V> getMap(String fieldName)
        +
        Equivalent to calling get(String) and casting the result to a Map.
        +
        +
      • +
      • +
        +

        getStruct

        +
        public Struct getStruct(String fieldName)
        +
        Equivalent to calling get(String) and casting the result to a Struct.
        +
        +
      • +
      • +
        +

        put

        +
        public Struct put(String fieldName, + Object value)
        +
        Set the value of a field. Validates the value, throwing a DataException if it does not match the field's + Schema.
        +
        +
        Parameters:
        +
        fieldName - the name of the field to set
        +
        value - the value of the field
        +
        Returns:
        +
        the Struct, to allow chaining of put(String, Object) calls
        +
        +
        +
      • +
      • +
        +

        put

        +
        public Struct put(Field field, + Object value)
        +
        Set the value of a field. Validates the value, throwing a DataException if it does not match the field's + Schema.
        +
        +
        Parameters:
        +
        field - the field to set
        +
        value - the value of the field
        +
        Returns:
        +
        the Struct, to allow chaining of put(String, Object) calls
        +
        +
        +
      • +
      • +
        +

        validate

        +
        public void validate()
        +
        Validates that this struct has filled in all the necessary data with valid values. For required fields + without defaults, this validates that a value has been set and has matching types/schemas. If any validation + fails, throws a DataException.
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/data/Time.html b/static/41/javadoc/org/apache/kafka/connect/data/Time.html new file mode 100644 index 000000000..a08639f80 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/data/Time.html @@ -0,0 +1,258 @@ + + + + +Time (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Time

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.data.Time
    +
    +
    +
    +
    public class Time +extends Object
    +

    + A time representing a specific point in a day, not tied to any specific date. The corresponding Java type is a + Date where only hours, minutes, seconds, and milliseconds can be non-zero. This effectively makes it a + point in time during the first day after the Unix epoch. The underlying representation is an integer + representing the number of milliseconds after midnight. +

    +
    +
    +
      + +
    • +
      +

      Field Summary

      +
      Fields
      +
      +
      Modifier and Type
      +
      Field
      +
      Description
      +
      static final String
      + +
       
      +
      static final Schema
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Returns a SchemaBuilder for a Time.
      +
      +
      static int
      +
      fromLogical(Schema schema, + Date value)
      +
      +
      Convert a value from its logical format (Date) to its encoded format (int).
      +
      +
      static Date
      +
      toLogical(Schema schema, + int value)
      +
      +
      Convert a value from its encoded format (int) to its logical format (Date).
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        LOGICAL_NAME

        +
        public static final String LOGICAL_NAME
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SCHEMA

        +
        public static final Schema SCHEMA
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Time

        +
        public Time()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        builder

        +
        public static SchemaBuilder builder()
        +
        Returns a SchemaBuilder for a Time. By returning a SchemaBuilder you can override additional schema settings such + as required/optional, default value, and documentation.
        +
        +
        Returns:
        +
        a SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        fromLogical

        +
        public static int fromLogical(Schema schema, + Date value)
        +
        Convert a value from its logical format (Date) to its encoded format (int).
        +
        +
        Parameters:
        +
        value - the logical value
        +
        Returns:
        +
        the encoded value
        +
        +
        +
      • +
      • +
        +

        toLogical

        +
        public static Date toLogical(Schema schema, + int value)
        +
        Convert a value from its encoded format (int) to its logical format (Date).
        +
        +
        Parameters:
        +
        value - the encoded value
        +
        Returns:
        +
        the logical value
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/data/Timestamp.html b/static/41/javadoc/org/apache/kafka/connect/data/Timestamp.html new file mode 100644 index 000000000..fde2d8fde --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/data/Timestamp.html @@ -0,0 +1,256 @@ + + + + +Timestamp (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Timestamp

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.data.Timestamp
    +
    +
    +
    +
    public class Timestamp +extends Object
    +

    + A timestamp representing an absolute time, without timezone information. The corresponding Java type is a + Date. The underlying representation is a long representing the number of milliseconds since Unix epoch. +

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        LOGICAL_NAME

        +
        public static final String LOGICAL_NAME
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SCHEMA

        +
        public static final Schema SCHEMA
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Timestamp

        +
        public Timestamp()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        builder

        +
        public static SchemaBuilder builder()
        +
        Returns a SchemaBuilder for a Timestamp. By returning a SchemaBuilder you can override additional schema settings such + as required/optional, default value, and documentation.
        +
        +
        Returns:
        +
        a SchemaBuilder
        +
        +
        +
      • +
      • +
        +

        fromLogical

        +
        public static long fromLogical(Schema schema, + Date value)
        +
        Convert a value from its logical format (Date) to its encoded format (long).
        +
        +
        Parameters:
        +
        value - the logical value
        +
        Returns:
        +
        the encoded value
        +
        +
        +
      • +
      • +
        +

        toLogical

        +
        public static Date toLogical(Schema schema, + long value)
        +
        Convert a value from its encoded format (long) to its logical format (Date).
        +
        +
        Parameters:
        +
        value - the encoded value
        +
        Returns:
        +
        the logical value
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/data/Values.html b/static/41/javadoc/org/apache/kafka/connect/data/Values.html new file mode 100644 index 000000000..767cdb11f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/data/Values.html @@ -0,0 +1,589 @@ + + + + +Values (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Values

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.data.Values
    +
    +
    +
    +
    public class Values +extends Object
    +
    Utility for converting from one Connect value to a different form. This is useful when the caller expects a value of a particular type + but is uncertain whether the actual value is one that isn't directly that type but can be converted into that type. + +

    For example, a caller might expect a particular Header to contain a Schema.Type.INT64 + value, when in fact that header contains a string representation of a 32-bit integer. Here, the caller can use the methods in this + class to convert the value to the desired type: +

    +     Header header = ...
    +     long value = Values.convertToLong(header.schema(), header.value());
    + 
    + +

    This class is able to convert any value to a string representation as well as parse those string representations back into most of + the types. The only exception is Struct values that require a schema and thus cannot be parsed from a simple string.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Values

        +
        public Values()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        convertToBoolean

        +
        public static Boolean convertToBoolean(Schema schema, + Object value) + throws DataException
        +
        Convert the specified value to a Schema.Type.BOOLEAN value. The supplied schema is required if the value is a logical + type when the schema contains critical information that might be necessary for converting to a boolean.
        +
        +
        Parameters:
        +
        schema - the schema for the value; may be null
        +
        value - the value to be converted; may be null
        +
        Returns:
        +
        the representation as a boolean, or null if the supplied value was null
        +
        Throws:
        +
        DataException - if the value could not be converted to a boolean
        +
        +
        +
      • +
      • +
        +

        convertToByte

        +
        public static Byte convertToByte(Schema schema, + Object value) + throws DataException
        +
        Convert the specified value to an Schema.Type.INT8 byte value. The supplied schema is required if the value is a logical + type when the schema contains critical information that might be necessary for converting to a byte.
        +
        +
        Parameters:
        +
        schema - the schema for the value; may be null
        +
        value - the value to be converted; may be null
        +
        Returns:
        +
        the representation as a byte, or null if the supplied value was null
        +
        Throws:
        +
        DataException - if the value could not be converted to a byte
        +
        +
        +
      • +
      • +
        +

        convertToShort

        +
        public static Short convertToShort(Schema schema, + Object value) + throws DataException
        +
        Convert the specified value to an Schema.Type.INT16 short value. The supplied schema is required if the value is a logical + type when the schema contains critical information that might be necessary for converting to a short.
        +
        +
        Parameters:
        +
        schema - the schema for the value; may be null
        +
        value - the value to be converted; may be null
        +
        Returns:
        +
        the representation as a short, or null if the supplied value was null
        +
        Throws:
        +
        DataException - if the value could not be converted to a short
        +
        +
        +
      • +
      • +
        +

        convertToInteger

        +
        public static Integer convertToInteger(Schema schema, + Object value) + throws DataException
        +
        Convert the specified value to an Schema.Type.INT32 int value. The supplied schema is required if the value is a logical + type when the schema contains critical information that might be necessary for converting to an integer.
        +
        +
        Parameters:
        +
        schema - the schema for the value; may be null
        +
        value - the value to be converted; may be null
        +
        Returns:
        +
        the representation as an integer, or null if the supplied value was null
        +
        Throws:
        +
        DataException - if the value could not be converted to an integer
        +
        +
        +
      • +
      • +
        +

        convertToLong

        +
        public static Long convertToLong(Schema schema, + Object value) + throws DataException
        +
        Convert the specified value to an Schema.Type.INT64 long value. The supplied schema is required if the value is a logical + type when the schema contains critical information that might be necessary for converting to a long.
        +
        +
        Parameters:
        +
        schema - the schema for the value; may be null
        +
        value - the value to be converted; may be null
        +
        Returns:
        +
        the representation as a long, or null if the supplied value was null
        +
        Throws:
        +
        DataException - if the value could not be converted to a long
        +
        +
        +
      • +
      • +
        +

        convertToFloat

        +
        public static Float convertToFloat(Schema schema, + Object value) + throws DataException
        +
        Convert the specified value to a Schema.Type.FLOAT32 float value. The supplied schema is required if the value is a logical + type when the schema contains critical information that might be necessary for converting to a floating point number.
        +
        +
        Parameters:
        +
        schema - the schema for the value; may be null
        +
        value - the value to be converted; may be null
        +
        Returns:
        +
        the representation as a float, or null if the supplied value was null
        +
        Throws:
        +
        DataException - if the value could not be converted to a float
        +
        +
        +
      • +
      • +
        +

        convertToDouble

        +
        public static Double convertToDouble(Schema schema, + Object value) + throws DataException
        +
        Convert the specified value to a Schema.Type.FLOAT64 double value. The supplied schema is required if the value is a logical + type when the schema contains critical information that might be necessary for converting to a floating point number.
        +
        +
        Parameters:
        +
        schema - the schema for the value; may be null
        +
        value - the value to be converted; may be null
        +
        Returns:
        +
        the representation as a double, or null if the supplied value was null
        +
        Throws:
        +
        DataException - if the value could not be converted to a double
        +
        +
        +
      • +
      • +
        +

        convertToString

        +
        public static String convertToString(Schema schema, + Object value)
        +
        Convert the specified value to a Schema.Type.STRING value. + Not supplying a schema may limit the ability to convert to the desired type.
        +
        +
        Parameters:
        +
        schema - the schema for the value; may be null
        +
        value - the value to be converted; may be null
        +
        Returns:
        +
        the representation as a string, or null if the supplied value was null
        +
        +
        +
      • +
      • +
        +

        convertToList

        +
        public static List<?> convertToList(Schema schema, + Object value)
        +
        Convert the specified value to an Schema.Type.ARRAY value. If the value is a string representation of an array, this method + will parse the string and its elements to infer the schemas for those elements. Thus, this method supports + arrays of other primitives and structured types. If the value is already an array (or list), this method simply casts and + returns it. + +

        This method currently does not use the schema, though it may be used in the future.

        +
        +
        Parameters:
        +
        schema - the schema for the value; may be null
        +
        value - the value to be converted; may be null
        +
        Returns:
        +
        the representation as a list, or null if the supplied value was null
        +
        Throws:
        +
        DataException - if the value cannot be converted to a list value
        +
        +
        +
      • +
      • +
        +

        convertToMap

        +
        public static Map<?,?> convertToMap(Schema schema, + Object value)
        +
        Convert the specified value to a Schema.Type.MAP value. If the value is a string representation of a map, this method + will parse the string and its entries to infer the schemas for those entries. Thus, this method supports + maps with primitives and structured keys and values. If the value is already a map, this method simply casts and returns it. + +

        This method currently does not use the schema, though it may be used in the future.

        +
        +
        Parameters:
        +
        schema - the schema for the value; may be null
        +
        value - the value to be converted; may be null
        +
        Returns:
        +
        the representation as a map, or null if the supplied value was null
        +
        Throws:
        +
        DataException - if the value cannot be converted to a map value
        +
        +
        +
      • +
      • +
        +

        convertToStruct

        +
        public static Struct convertToStruct(Schema schema, + Object value)
        +
        Convert the specified value to a Schema.Type.STRUCT value. Structs cannot be converted from other types, so this method returns + a struct only if the supplied value is a struct. If not a struct, this method throws an exception. + +

        This method currently does not use the schema, though it may be used in the future.

        +
        +
        Parameters:
        +
        schema - the schema for the value; may be null
        +
        value - the value to be converted; may be null
        +
        Returns:
        +
        the representation as a struct, or null if the supplied value was null
        +
        Throws:
        +
        DataException - if the value is not a struct
        +
        +
        +
      • +
      • +
        +

        convertToTime

        +
        public static Date convertToTime(Schema schema, + Object value)
        +
        Convert the specified value to a time value. + Not supplying a schema may limit the ability to convert to the desired type.
        +
        +
        Parameters:
        +
        schema - the schema for the value; may be null
        +
        value - the value to be converted; may be null
        +
        Returns:
        +
        the representation as a time, or null if the supplied value was null
        +
        Throws:
        +
        DataException - if the value cannot be converted to a time value
        +
        +
        +
      • +
      • +
        +

        convertToDate

        +
        public static Date convertToDate(Schema schema, + Object value)
        +
        Convert the specified value to a date value. + Not supplying a schema may limit the ability to convert to the desired type.
        +
        +
        Parameters:
        +
        schema - the schema for the value; may be null
        +
        value - the value to be converted; may be null
        +
        Returns:
        +
        the representation as a date, or null if the supplied value was null
        +
        Throws:
        +
        DataException - if the value cannot be converted to a date value
        +
        +
        +
      • +
      • +
        +

        convertToTimestamp

        +
        public static Date convertToTimestamp(Schema schema, + Object value)
        +
        Convert the specified value to a timestamp value. + Not supplying a schema may limit the ability to convert to the desired type.
        +
        +
        Parameters:
        +
        schema - the schema for the value; may be null
        +
        value - the value to be converted; may be null
        +
        Returns:
        +
        the representation as a timestamp, or null if the supplied value was null
        +
        Throws:
        +
        DataException - if the value cannot be converted to a timestamp value
        +
        +
        +
      • +
      • +
        +

        convertToDecimal

        +
        public static BigDecimal convertToDecimal(Schema schema, + Object value, + int scale)
        +
        Convert the specified value to a decimal value. + Not supplying a schema may limit the ability to convert to the desired type.
        +
        +
        Parameters:
        +
        schema - the schema for the value; may be null
        +
        value - the value to be converted; may be null
        +
        Returns:
        +
        the representation as a decimal, or null if the supplied value was null
        +
        Throws:
        +
        DataException - if the value cannot be converted to a decimal value
        +
        +
        +
      • +
      • +
        +

        inferSchema

        +
        public static Schema inferSchema(Object value)
        +
        If possible infer a schema for the given value.
        +
        +
        Parameters:
        +
        value - the value whose schema is to be inferred; may be null
        +
        Returns:
        +
        the inferred schema, or null if the value is null or no schema could be inferred
        +
        +
        +
      • +
      • +
        +

        parseString

        +
        public static SchemaAndValue parseString(String value)
        +
        Parse the specified string representation of a value into its schema and value.
        +
        +
        Parameters:
        +
        value - the string form of the value
        +
        Returns:
        +
        the schema and value; never null, but whose schema and value may be null
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        dateFormatFor

        +
        public static DateFormat dateFormatFor(Date value)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/data/package-summary.html b/static/41/javadoc/org/apache/kafka/connect/data/package-summary.html new file mode 100644 index 000000000..0531eea98 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/data/package-summary.html @@ -0,0 +1,149 @@ + + + + +org.apache.kafka.connect.data (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.connect.data

    +
    +
    +
    package org.apache.kafka.connect.data
    +
    +
    Provides classes for representing data and schemas handled by Connect.
    +
    +
    +
      +
    • +
      +
      +
      +
      +
      Class
      +
      Description
      + +
       
      + +
      +
      + A date representing a calendar day with no time of day or timezone.
      +
      + +
      +
      + An arbitrary-precision signed decimal number.
      +
      + +
      +
      + A field in a Struct, consisting of a field name, index, and Schema for the field value.
      +
      + +
      +
      + Definition of an abstract data type.
      +
      + +
      +
      The type of a schema.
      +
      + +
      +
      A composite containing a Schema and associated value
      +
      + +
      +
      + SchemaBuilder provides a fluent API for constructing Schema objects.
      +
      + +
      +
      + SchemaProjector is a utility to project a value between compatible schemas and throw exceptions + when non compatible schemas are provided.
      +
      + +
      +
      + A structured record containing a set of named fields with values, each field using an independent Schema.
      +
      + +
      +
      + A time representing a specific point in a day, not tied to any specific date.
      +
      + +
      +
      + A timestamp representing an absolute time, without timezone information.
      +
      + +
      +
      Utility for converting from one Connect value to a different form.
      +
      +
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/data/package-tree.html b/static/41/javadoc/org/apache/kafka/connect/data/package-tree.html new file mode 100644 index 000000000..2df1679dc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/data/package-tree.html @@ -0,0 +1,101 @@ + + + + +org.apache.kafka.connect.data Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.connect.data

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    +
      +
    • java.lang.Object +
        +
      • org.apache.kafka.connect.data.ConnectSchema (implements org.apache.kafka.connect.data.Schema)
      • +
      • org.apache.kafka.connect.data.Date
      • +
      • org.apache.kafka.connect.data.Decimal
      • +
      • org.apache.kafka.connect.data.Field
      • +
      • org.apache.kafka.connect.data.SchemaAndValue
      • +
      • org.apache.kafka.connect.data.SchemaBuilder (implements org.apache.kafka.connect.data.Schema)
      • +
      • org.apache.kafka.connect.data.SchemaProjector
      • +
      • org.apache.kafka.connect.data.Struct
      • +
      • org.apache.kafka.connect.data.Time
      • +
      • org.apache.kafka.connect.data.Timestamp
      • +
      • org.apache.kafka.connect.data.Values
      • +
      +
    • +
    +
    +
    +

    Interface Hierarchy

    +
      +
    • org.apache.kafka.connect.data.Schema
    • +
    +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/errors/AlreadyExistsException.html b/static/41/javadoc/org/apache/kafka/connect/errors/AlreadyExistsException.html new file mode 100644 index 000000000..091e543d9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/errors/AlreadyExistsException.html @@ -0,0 +1,171 @@ + + + + +AlreadyExistsException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class AlreadyExistsException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class AlreadyExistsException +extends ConnectException
    +
    Indicates the operation tried to create an entity that already exists.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        AlreadyExistsException

        +
        public AlreadyExistsException(String s)
        +
        +
      • +
      • +
        +

        AlreadyExistsException

        +
        public AlreadyExistsException(String s, + Throwable throwable)
        +
        +
      • +
      • +
        +

        AlreadyExistsException

        +
        public AlreadyExistsException(Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/errors/ConnectException.html b/static/41/javadoc/org/apache/kafka/connect/errors/ConnectException.html new file mode 100644 index 000000000..63bc8a7f9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/errors/ConnectException.html @@ -0,0 +1,173 @@ + + + + +ConnectException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConnectException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    Direct Known Subclasses:
    +
    AlreadyExistsException, DataException, IllegalWorkerStateException, NotFoundException, RetriableException
    +
    +
    +
    public class ConnectException +extends KafkaException
    +
    ConnectException is the top-level exception type generated by Kafka Connect and connector implementations.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ConnectException

        +
        public ConnectException(String s)
        +
        +
      • +
      • +
        +

        ConnectException

        +
        public ConnectException(String s, + Throwable throwable)
        +
        +
      • +
      • +
        +

        ConnectException

        +
        public ConnectException(Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/errors/DataException.html b/static/41/javadoc/org/apache/kafka/connect/errors/DataException.html new file mode 100644 index 000000000..9be6f1f2f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/errors/DataException.html @@ -0,0 +1,175 @@ + + + + +DataException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DataException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    Direct Known Subclasses:
    +
    SchemaBuilderException, SchemaProjectorException
    +
    +
    +
    public class DataException +extends ConnectException
    +
    Base class for all Kafka Connect data API exceptions.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DataException

        +
        public DataException(String s)
        +
        +
      • +
      • +
        +

        DataException

        +
        public DataException(String s, + Throwable throwable)
        +
        +
      • +
      • +
        +

        DataException

        +
        public DataException(Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/errors/IllegalWorkerStateException.html b/static/41/javadoc/org/apache/kafka/connect/errors/IllegalWorkerStateException.html new file mode 100644 index 000000000..d570eda69 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/errors/IllegalWorkerStateException.html @@ -0,0 +1,171 @@ + + + + +IllegalWorkerStateException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class IllegalWorkerStateException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class IllegalWorkerStateException +extends ConnectException
    +
    Indicates that a method has been invoked illegally or at an invalid time by a connector or task.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        IllegalWorkerStateException

        +
        public IllegalWorkerStateException(String s)
        +
        +
      • +
      • +
        +

        IllegalWorkerStateException

        +
        public IllegalWorkerStateException(String s, + Throwable throwable)
        +
        +
      • +
      • +
        +

        IllegalWorkerStateException

        +
        public IllegalWorkerStateException(Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/errors/NotFoundException.html b/static/41/javadoc/org/apache/kafka/connect/errors/NotFoundException.html new file mode 100644 index 000000000..ce970576a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/errors/NotFoundException.html @@ -0,0 +1,171 @@ + + + + +NotFoundException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class NotFoundException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class NotFoundException +extends ConnectException
    +
    Indicates that an operation attempted to modify or delete a connector or task that is not present on the worker.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        NotFoundException

        +
        public NotFoundException(String s)
        +
        +
      • +
      • +
        +

        NotFoundException

        +
        public NotFoundException(String s, + Throwable throwable)
        +
        +
      • +
      • +
        +

        NotFoundException

        +
        public NotFoundException(Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/errors/RetriableException.html b/static/41/javadoc/org/apache/kafka/connect/errors/RetriableException.html new file mode 100644 index 000000000..7ee55ebdf --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/errors/RetriableException.html @@ -0,0 +1,171 @@ + + + + +RetriableException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class RetriableException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class RetriableException +extends ConnectException
    +
    An exception that indicates the operation can be reattempted.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        RetriableException

        +
        public RetriableException(String s)
        +
        +
      • +
      • +
        +

        RetriableException

        +
        public RetriableException(String s, + Throwable throwable)
        +
        +
      • +
      • +
        +

        RetriableException

        +
        public RetriableException(Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/errors/SchemaBuilderException.html b/static/41/javadoc/org/apache/kafka/connect/errors/SchemaBuilderException.html new file mode 100644 index 000000000..dac74f1a5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/errors/SchemaBuilderException.html @@ -0,0 +1,173 @@ + + + + +SchemaBuilderException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SchemaBuilderException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class SchemaBuilderException +extends DataException
    +
    Indicates an error while building a schema via SchemaBuilder
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SchemaBuilderException

        +
        public SchemaBuilderException(String s)
        +
        +
      • +
      • +
        +

        SchemaBuilderException

        +
        public SchemaBuilderException(String s, + Throwable throwable)
        +
        +
      • +
      • +
        +

        SchemaBuilderException

        +
        public SchemaBuilderException(Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/errors/SchemaProjectorException.html b/static/41/javadoc/org/apache/kafka/connect/errors/SchemaProjectorException.html new file mode 100644 index 000000000..927c6ceab --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/errors/SchemaProjectorException.html @@ -0,0 +1,173 @@ + + + + +SchemaProjectorException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SchemaProjectorException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class SchemaProjectorException +extends DataException
    +
    Indicates an error while projecting a schema via SchemaProjector
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SchemaProjectorException

        +
        public SchemaProjectorException(String s)
        +
        +
      • +
      • +
        +

        SchemaProjectorException

        +
        public SchemaProjectorException(String s, + Throwable throwable)
        +
        +
      • +
      • +
        +

        SchemaProjectorException

        +
        public SchemaProjectorException(Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/errors/package-summary.html b/static/41/javadoc/org/apache/kafka/connect/errors/package-summary.html new file mode 100644 index 000000000..2a53bc497 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/errors/package-summary.html @@ -0,0 +1,115 @@ + + + + +org.apache.kafka.connect.errors (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.connect.errors

    +
    +
    +
    package org.apache.kafka.connect.errors
    +
    +
    Provides common exception classes for Connect, used by the framework and plugins to communicate failures.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/errors/package-tree.html b/static/41/javadoc/org/apache/kafka/connect/errors/package-tree.html new file mode 100644 index 000000000..28ccdedab --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/errors/package-tree.html @@ -0,0 +1,100 @@ + + + + +org.apache.kafka.connect.errors Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.connect.errors

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/header/ConnectHeaders.html b/static/41/javadoc/org/apache/kafka/connect/header/ConnectHeaders.html new file mode 100644 index 000000000..b75e5d307 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/header/ConnectHeaders.html @@ -0,0 +1,913 @@ + + + + +ConnectHeaders (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConnectHeaders

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.header.ConnectHeaders
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Iterable<Header>, Headers
    +
    +
    +
    public class ConnectHeaders +extends Object +implements Headers
    +
    A basic Headers implementation.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ConnectHeaders

        +
        public ConnectHeaders()
        +
        +
      • +
      • +
        +

        ConnectHeaders

        +
        public ConnectHeaders(Iterable<Header> original)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        size

        +
        public int size()
        +
        Description copied from interface: Headers
        +
        Get the number of headers in this object.
        +
        +
        Specified by:
        +
        size in interface Headers
        +
        Returns:
        +
        the number of headers; never negative
        +
        +
        +
      • +
      • +
        +

        isEmpty

        +
        public boolean isEmpty()
        +
        Description copied from interface: Headers
        +
        Determine whether this object has no headers.
        +
        +
        Specified by:
        +
        isEmpty in interface Headers
        +
        Returns:
        +
        true if there are no headers, or false if there is at least one header
        +
        +
        +
      • +
      • +
        +

        clear

        +
        public Headers clear()
        +
        Description copied from interface: Headers
        +
        Removes all headers from this object.
        +
        +
        Specified by:
        +
        clear in interface Headers
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        add

        +
        public Headers add(Header header)
        +
        Description copied from interface: Headers
        +
        Add the given Header to this collection.
        +
        +
        Specified by:
        +
        add in interface Headers
        +
        Parameters:
        +
        header - the header; may not be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        add

        +
        public Headers add(String key, + SchemaAndValue schemaAndValue)
        +
        Description copied from interface: Headers
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Specified by:
        +
        add in interface Headers
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        schemaAndValue - the SchemaAndValue for the header; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        add

        +
        public Headers add(String key, + Object value, + Schema schema)
        +
        Description copied from interface: Headers
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Specified by:
        +
        add in interface Headers
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        schema - the schema for the header's value; may not be null if the value is not null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addString

        +
        public Headers addString(String key, + String value)
        +
        Description copied from interface: Headers
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Specified by:
        +
        addString in interface Headers
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addBytes

        +
        public Headers addBytes(String key, + byte[] value)
        +
        Description copied from interface: Headers
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Specified by:
        +
        addBytes in interface Headers
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addBoolean

        +
        public Headers addBoolean(String key, + boolean value)
        +
        Description copied from interface: Headers
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Specified by:
        +
        addBoolean in interface Headers
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addByte

        +
        public Headers addByte(String key, + byte value)
        +
        Description copied from interface: Headers
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Specified by:
        +
        addByte in interface Headers
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addShort

        +
        public Headers addShort(String key, + short value)
        +
        Description copied from interface: Headers
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Specified by:
        +
        addShort in interface Headers
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addInt

        +
        public Headers addInt(String key, + int value)
        +
        Description copied from interface: Headers
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Specified by:
        +
        addInt in interface Headers
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addLong

        +
        public Headers addLong(String key, + long value)
        +
        Description copied from interface: Headers
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Specified by:
        +
        addLong in interface Headers
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addFloat

        +
        public Headers addFloat(String key, + float value)
        +
        Description copied from interface: Headers
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Specified by:
        +
        addFloat in interface Headers
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addDouble

        +
        public Headers addDouble(String key, + double value)
        +
        Description copied from interface: Headers
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Specified by:
        +
        addDouble in interface Headers
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addList

        +
        public Headers addList(String key, + List<?> value, + Schema schema)
        +
        Description copied from interface: Headers
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Specified by:
        +
        addList in interface Headers
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        schema - the schema describing the list value; may not be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addMap

        +
        public Headers addMap(String key, + Map<?,?> value, + Schema schema)
        +
        Description copied from interface: Headers
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Specified by:
        +
        addMap in interface Headers
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        schema - the schema describing the map value; may not be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addStruct

        +
        public Headers addStruct(String key, + Struct value)
        +
        Description copied from interface: Headers
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Specified by:
        +
        addStruct in interface Headers
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addDecimal

        +
        public Headers addDecimal(String key, + BigDecimal value)
        +
        Description copied from interface: Headers
        +
        Add to this collection a Header with the given key and Decimal value.
        +
        +
        Specified by:
        +
        addDecimal in interface Headers
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's Decimal value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addDate

        +
        public Headers addDate(String key, + Date value)
        +
        Description copied from interface: Headers
        +
        Add to this collection a Header with the given key and Date value.
        +
        +
        Specified by:
        +
        addDate in interface Headers
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's Date value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addTime

        +
        public Headers addTime(String key, + Date value)
        +
        Description copied from interface: Headers
        +
        Add to this collection a Header with the given key and Time value.
        +
        +
        Specified by:
        +
        addTime in interface Headers
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's Time value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addTimestamp

        +
        public Headers addTimestamp(String key, + Date value)
        +
        Description copied from interface: Headers
        +
        Add to this collection a Header with the given key and Timestamp value.
        +
        +
        Specified by:
        +
        addTimestamp in interface Headers
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's Timestamp value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        lastWithName

        +
        public Header lastWithName(String key)
        +
        Description copied from interface: Headers
        +
        Return the last Header with the specified key.
        +
        +
        Specified by:
        +
        lastWithName in interface Headers
        +
        Parameters:
        +
        key - the key for the header; may not be null
        +
        Returns:
        +
        the last Header, or null if there are no headers with the specified key
        +
        +
        +
      • +
      • +
        +

        allWithName

        +
        public Iterator<Header> allWithName(String key)
        +
        Description copied from interface: Headers
        +
        Get the collection of Header objects whose keys all match the specified key.
        +
        +
        Specified by:
        +
        allWithName in interface Headers
        +
        Parameters:
        +
        key - the key; may not be null
        +
        Returns:
        +
        the iterator over headers with the specified key; may be null if there are no headers with the specified key
        +
        +
        +
      • +
      • +
        +

        iterator

        +
        public Iterator<Header> iterator()
        +
        +
        Specified by:
        +
        iterator in interface Iterable<Header>
        +
        +
        +
      • +
      • +
        +

        remove

        +
        public Headers remove(String key)
        +
        Description copied from interface: Headers
        +
        Removes all Header objects whose key matches the specified key.
        +
        +
        Specified by:
        +
        remove in interface Headers
        +
        Parameters:
        +
        key - the key; may not be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        retainLatest

        +
        public Headers retainLatest()
        +
        Description copied from interface: Headers
        +
        Removes all but the last Header object with each key.
        +
        +
        Specified by:
        +
        retainLatest in interface Headers
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        retainLatest

        +
        public Headers retainLatest(String key)
        +
        Description copied from interface: Headers
        +
        Removes all but the latest Header objects whose key matches the specified key.
        +
        +
        Specified by:
        +
        retainLatest in interface Headers
        +
        Parameters:
        +
        key - the key; may not be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        apply

        +
        public Headers apply(String key, + Headers.HeaderTransform transform)
        +
        Description copied from interface: Headers
        +
        Get all Headers with the given key, apply the transform to each and store the result in place of the original.
        +
        +
        Specified by:
        +
        apply in interface Headers
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        transform - the transform to apply; may not be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        apply

        +
        public Headers apply(Headers.HeaderTransform transform)
        +
        Description copied from interface: Headers
        +
        Get all Headers, apply the transform to each and store the result in place of the original.
        +
        +
        Specified by:
        +
        apply in interface Headers
        +
        Parameters:
        +
        transform - the transform to apply; may not be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object obj)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        duplicate

        +
        public ConnectHeaders duplicate()
        +
        Description copied from interface: Headers
        +
        Create a copy of this Headers object. The new copy will contain all of the same Header objects as this object.
        +
        +
        Specified by:
        +
        duplicate in interface Headers
        +
        Returns:
        +
        the copy; never null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/header/Header.html b/static/41/javadoc/org/apache/kafka/connect/header/Header.html new file mode 100644 index 000000000..8da6935ad --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/header/Header.html @@ -0,0 +1,206 @@ + + + + +Header (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Header

    +
    +
    +
    +
    public interface Header
    +
    A Header is a key-value pair, and multiple headers can be included with the key, value, and timestamp in each Kafka message. + If the value contains schema information, then the header will have a non-null schema. +

    + This is an immutable interface.

    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      key()
      +
      +
      The header's key, which is not necessarily unique within the set of headers on a Kafka message.
      +
      + + +
      +
      Return a new Header object that has the same schema and value but with the supplied key.
      +
      + + +
      +
      Return the Schema associated with this header, if there is one.
      +
      + + +
      +
      Get the header's value as deserialized by Connect's header converter.
      +
      + +
      with(Schema schema, + Object value)
      +
      +
      Return a new Header object that has the same key but with the supplied value.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        key

        +
        String key()
        +
        The header's key, which is not necessarily unique within the set of headers on a Kafka message.
        +
        +
        Returns:
        +
        the header's key; never null
        +
        +
        +
      • +
      • +
        +

        schema

        +
        Schema schema()
        +
        Return the Schema associated with this header, if there is one. Not all headers will have schemas.
        +
        +
        Returns:
        +
        the header's schema, or null if no schema is associated with this header
        +
        +
        +
      • +
      • +
        +

        value

        +
        Object value()
        +
        Get the header's value as deserialized by Connect's header converter.
        +
        +
        Returns:
        +
        the deserialized object representation of the header's value; may be null
        +
        +
        +
      • +
      • +
        +

        with

        +
        Header with(Schema schema, + Object value)
        +
        Return a new Header object that has the same key but with the supplied value.
        +
        +
        Parameters:
        +
        schema - the schema for the new value; may be null
        +
        value - the new value
        +
        Returns:
        +
        the new Header; never null
        +
        +
        +
      • +
      • +
        +

        rename

        +
        Header rename(String key)
        +
        Return a new Header object that has the same schema and value but with the supplied key.
        +
        +
        Parameters:
        +
        key - the key for the new header; may not be null
        +
        Returns:
        +
        the new Header; never null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/header/Headers.HeaderTransform.html b/static/41/javadoc/org/apache/kafka/connect/header/Headers.HeaderTransform.html new file mode 100644 index 000000000..a0b0b1544 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/header/Headers.HeaderTransform.html @@ -0,0 +1,139 @@ + + + + +Headers.HeaderTransform (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Headers.HeaderTransform

    +
    +
    +
    +
    Enclosing interface:
    +
    Headers
    +
    +
    +
    public static interface Headers.HeaderTransform
    +
    A function to transform the supplied Header. Implementations will likely need to use Header.with(Schema, Object) + to create the new instance.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      apply(Header header)
      +
      +
      Transform the given Header and return the updated Header.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        apply

        +
        Header apply(Header header)
        +
        Transform the given Header and return the updated Header.
        +
        +
        Parameters:
        +
        header - the input header; never null
        +
        Returns:
        +
        the new header, or null if the supplied Header is to be removed
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/header/Headers.html b/static/41/javadoc/org/apache/kafka/connect/header/Headers.html new file mode 100644 index 000000000..04d3e1a33 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/header/Headers.html @@ -0,0 +1,751 @@ + + + + +Headers (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Headers

    +
    +
    +
    +
    All Superinterfaces:
    +
    Iterable<Header>
    +
    +
    +
    All Known Implementing Classes:
    +
    ConnectHeaders
    +
    +
    +
    public interface Headers +extends Iterable<Header>
    +
    A mutable ordered collection of Header objects. Note that multiple headers may have the same key.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        size

        +
        int size()
        +
        Get the number of headers in this object.
        +
        +
        Returns:
        +
        the number of headers; never negative
        +
        +
        +
      • +
      • +
        +

        isEmpty

        +
        boolean isEmpty()
        +
        Determine whether this object has no headers.
        +
        +
        Returns:
        +
        true if there are no headers, or false if there is at least one header
        +
        +
        +
      • +
      • +
        +

        allWithName

        +
        Iterator<Header> allWithName(String key)
        +
        Get the collection of Header objects whose keys all match the specified key.
        +
        +
        Parameters:
        +
        key - the key; may not be null
        +
        Returns:
        +
        the iterator over headers with the specified key; may be null if there are no headers with the specified key
        +
        +
        +
      • +
      • +
        +

        lastWithName

        +
        Header lastWithName(String key)
        +
        Return the last Header with the specified key.
        +
        +
        Parameters:
        +
        key - the key for the header; may not be null
        +
        Returns:
        +
        the last Header, or null if there are no headers with the specified key
        +
        +
        +
      • +
      • +
        +

        add

        +
        Headers add(Header header)
        +
        Add the given Header to this collection.
        +
        +
        Parameters:
        +
        header - the header; may not be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        add

        +
        Headers add(String key, + SchemaAndValue schemaAndValue)
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        schemaAndValue - the SchemaAndValue for the header; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        add

        +
        Headers add(String key, + Object value, + Schema schema)
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        schema - the schema for the header's value; may not be null if the value is not null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addString

        +
        Headers addString(String key, + String value)
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addBoolean

        +
        Headers addBoolean(String key, + boolean value)
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addByte

        +
        Headers addByte(String key, + byte value)
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addShort

        +
        Headers addShort(String key, + short value)
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addInt

        +
        Headers addInt(String key, + int value)
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addLong

        +
        Headers addLong(String key, + long value)
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addFloat

        +
        Headers addFloat(String key, + float value)
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addDouble

        +
        Headers addDouble(String key, + double value)
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addBytes

        +
        Headers addBytes(String key, + byte[] value)
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addList

        +
        Headers addList(String key, + List<?> value, + Schema schema)
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        schema - the schema describing the list value; may not be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        Throws:
        +
        DataException - if the header's value is invalid
        +
        +
        +
      • +
      • +
        +

        addMap

        +
        Headers addMap(String key, + Map<?,?> value, + Schema schema)
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        schema - the schema describing the map value; may not be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        Throws:
        +
        DataException - if the header's value is invalid
        +
        +
        +
      • +
      • +
        +

        addStruct

        +
        Headers addStruct(String key, + Struct value)
        +
        Add to this collection a Header with the given key and value.
        +
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        Throws:
        +
        DataException - if the header's value is invalid
        +
        +
        +
      • +
      • +
        +

        addDecimal

        +
        Headers addDecimal(String key, + BigDecimal value)
        +
        Add to this collection a Header with the given key and Decimal value.
        +
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's Decimal value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addDate

        +
        Headers addDate(String key, + Date value)
        +
        Add to this collection a Header with the given key and Date value.
        +
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's Date value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addTime

        +
        Headers addTime(String key, + Date value)
        +
        Add to this collection a Header with the given key and Time value.
        +
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's Time value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        addTimestamp

        +
        Headers addTimestamp(String key, + Date value)
        +
        Add to this collection a Header with the given key and Timestamp value.
        +
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        value - the header's Timestamp value; may be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        remove

        +
        Headers remove(String key)
        +
        Removes all Header objects whose key matches the specified key.
        +
        +
        Parameters:
        +
        key - the key; may not be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        retainLatest

        +
        Headers retainLatest(String key)
        +
        Removes all but the latest Header objects whose key matches the specified key.
        +
        +
        Parameters:
        +
        key - the key; may not be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        retainLatest

        +
        Headers retainLatest()
        +
        Removes all but the last Header object with each key.
        +
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        clear

        +
        Headers clear()
        +
        Removes all headers from this object.
        +
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        +
        +
      • +
      • +
        +

        duplicate

        +
        Headers duplicate()
        +
        Create a copy of this Headers object. The new copy will contain all of the same Header objects as this object.
        +
        +
        Returns:
        +
        the copy; never null
        +
        +
        +
      • +
      • +
        +

        apply

        +
        Headers apply(Headers.HeaderTransform transform)
        +
        Get all Headers, apply the transform to each and store the result in place of the original.
        +
        +
        Parameters:
        +
        transform - the transform to apply; may not be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        Throws:
        +
        DataException - if the header's value is invalid
        +
        +
        +
      • +
      • +
        +

        apply

        +
        Headers apply(String key, + Headers.HeaderTransform transform)
        +
        Get all Headers with the given key, apply the transform to each and store the result in place of the original.
        +
        +
        Parameters:
        +
        key - the header's key; may not be null
        +
        transform - the transform to apply; may not be null
        +
        Returns:
        +
        this object to facilitate chaining multiple methods; never null
        +
        Throws:
        +
        DataException - if the header's value is invalid
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/header/package-summary.html b/static/41/javadoc/org/apache/kafka/connect/header/package-summary.html new file mode 100644 index 000000000..6492fdf20 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/header/package-summary.html @@ -0,0 +1,105 @@ + + + + +org.apache.kafka.connect.header (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.connect.header

    +
    +
    +
    package org.apache.kafka.connect.header
    +
    +
    Provides an API for application-defined metadata attached to Connect records.
    +
    +
    +
      +
    • +
      +
      +
      +
      +
      Class
      +
      Description
      + +
      +
      A basic Headers implementation.
      +
      + +
      +
      A Header is a key-value pair, and multiple headers can be included with the key, value, and timestamp in each Kafka message.
      +
      + +
      +
      A mutable ordered collection of Header objects.
      +
      + +
      +
      A function to transform the supplied Header.
      +
      +
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/header/package-tree.html b/static/41/javadoc/org/apache/kafka/connect/header/package-tree.html new file mode 100644 index 000000000..c10d1c6eb --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/header/package-tree.html @@ -0,0 +1,83 @@ + + + + +org.apache.kafka.connect.header Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.connect.header

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/health/AbstractState.html b/static/41/javadoc/org/apache/kafka/connect/health/AbstractState.html new file mode 100644 index 000000000..b6eadef85 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/health/AbstractState.html @@ -0,0 +1,241 @@ + + + + +AbstractState (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class AbstractState

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.health.AbstractState
    +
    +
    +
    +
    Direct Known Subclasses:
    +
    ConnectorState, TaskState
    +
    +
    +
    public abstract class AbstractState +extends Object
    +
    Provides the current status for a connector or a task, along with an identifier for its Connect worker
    +
    +
    +
      + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      +
      AbstractState(String state, + String workerId, + String traceMessage)
      +
      +
      Construct a state for a connector or task.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      boolean
      + +
       
      +
      int
      + +
       
      + + +
      +
      Provides the current state of the connector or task.
      +
      + + +
      +
      The error message associated with the connector or task.
      +
      + + +
      +
      The identifier of the worker associated with the connector or the task.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +getClass, notify, notifyAll, toString, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        AbstractState

        +
        public AbstractState(String state, + String workerId, + String traceMessage)
        +
        Construct a state for a connector or task.
        +
        +
        Parameters:
        +
        state - the status of a connector or task; may not be null or empty
        +
        workerId - the workerId associated with the connector or the task; may not be null or empty
        +
        traceMessage - any error trace message associated with the connector or the task; may be null or empty
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        state

        +
        public String state()
        +
        Provides the current state of the connector or task.
        +
        +
        Returns:
        +
        state, never null or empty
        +
        +
        +
      • +
      • +
        +

        workerId

        +
        public String workerId()
        +
        The identifier of the worker associated with the connector or the task.
        +
        +
        Returns:
        +
        workerId, never null or empty.
        +
        +
        +
      • +
      • +
        +

        traceMessage

        +
        public String traceMessage()
        +
        The error message associated with the connector or task.
        +
        +
        Returns:
        +
        traceMessage, can be null or empty.
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/health/ConnectClusterDetails.html b/static/41/javadoc/org/apache/kafka/connect/health/ConnectClusterDetails.html new file mode 100644 index 000000000..4ac340d69 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/health/ConnectClusterDetails.html @@ -0,0 +1,133 @@ + + + + +ConnectClusterDetails (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ConnectClusterDetails

    +
    +
    +
    +
    public interface ConnectClusterDetails
    +
    Provides immutable Connect cluster information, such as the ID of the backing Kafka cluster. The + Connect framework provides the implementation for this interface.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Get the cluster ID of the Kafka cluster backing this Connect cluster.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        kafkaClusterId

        +
        String kafkaClusterId()
        +
        Get the cluster ID of the Kafka cluster backing this Connect cluster.
        +
        +
        Returns:
        +
        the cluster ID of the Kafka cluster backing this Connect cluster
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/health/ConnectClusterState.html b/static/41/javadoc/org/apache/kafka/connect/health/ConnectClusterState.html new file mode 100644 index 000000000..ffc1916f6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/health/ConnectClusterState.html @@ -0,0 +1,197 @@ + + + + +ConnectClusterState (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ConnectClusterState

    +
    +
    +
    +
    public interface ConnectClusterState
    +
    Provides the ability to lookup connector metadata, including status and configurations, as well + as immutable cluster information such as Kafka cluster ID. This is made available to + ConnectRestExtension implementations. The Connect framework + provides the implementation for this interface.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        connectors

        +
        Collection<String> connectors()
        +
        Get the names of the connectors currently deployed in this cluster. This is a full list of connectors in the cluster gathered from + the current configuration, which may change over time.
        +
        +
        Returns:
        +
        collection of connector names, never null
        +
        +
        +
      • +
      • +
        +

        connectorHealth

        +
        ConnectorHealth connectorHealth(String connName)
        +
        Lookup the current health of a connector and its tasks. This provides the current snapshot of health by querying the underlying + herder. A connector returned by previous invocation of connectors() may no longer be available and could result in NotFoundException.
        +
        +
        Parameters:
        +
        connName - name of the connector
        +
        Returns:
        +
        the health of the connector for the connector name
        +
        Throws:
        +
        NotFoundException - if the requested connector can't be found
        +
        +
        +
      • +
      • +
        +

        connectorConfig

        +
        default Map<String,String> connectorConfig(String connName)
        +
        Lookup the current configuration of a connector. This provides the current snapshot of configuration by querying the underlying + herder. A connector returned by previous invocation of connectors() may no longer be available and could result in NotFoundException.
        +
        +
        Parameters:
        +
        connName - name of the connector
        +
        Returns:
        +
        the configuration of the connector for the connector name
        +
        Throws:
        +
        NotFoundException - if the requested connector can't be found
        +
        UnsupportedOperationException - if the default implementation has not been overridden
        +
        +
        +
      • +
      • +
        +

        clusterDetails

        +
        default ConnectClusterDetails clusterDetails()
        +
        Get details about the setup of the Connect cluster.
        +
        +
        Returns:
        +
        a ConnectClusterDetails object containing information about the cluster
        +
        Throws:
        +
        UnsupportedOperationException - if the default implementation has not been overridden
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/health/ConnectorHealth.html b/static/41/javadoc/org/apache/kafka/connect/health/ConnectorHealth.html new file mode 100644 index 000000000..ee624834a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/health/ConnectorHealth.html @@ -0,0 +1,259 @@ + + + + +ConnectorHealth (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConnectorHealth

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.health.ConnectorHealth
    +
    +
    +
    +
    public class ConnectorHealth +extends Object
    +
    Provides basic health information about the connector and its tasks.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        name

        +
        public String name()
        +
        Provides the name of the connector.
        +
        +
        Returns:
        +
        name, never null or empty
        +
        +
        +
      • +
      • +
        +

        connectorState

        +
        public ConnectorState connectorState()
        +
        Provides the current state of the connector.
        +
        +
        Returns:
        +
        the connector state, never null
        +
        +
        +
      • +
      • +
        +

        tasksState

        +
        public Map<Integer,TaskState> tasksState()
        +
        Provides the current state of the connector tasks.
        +
        +
        Returns:
        +
        the state for each task ID; never null
        +
        +
        +
      • +
      • +
        +

        type

        +
        public ConnectorType type()
        +
        Provides the type of the connector.
        +
        +
        Returns:
        +
        type, never null
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/health/ConnectorState.html b/static/41/javadoc/org/apache/kafka/connect/health/ConnectorState.html new file mode 100644 index 000000000..a26b3b7a8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/health/ConnectorState.html @@ -0,0 +1,181 @@ + + + + +ConnectorState (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConnectorState

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.health.AbstractState +
    org.apache.kafka.connect.health.ConnectorState
    +
    +
    +
    +
    +
    public class ConnectorState +extends AbstractState
    +
    Describes the status, worker ID, and any errors associated with a connector.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ConnectorState

        +
        public ConnectorState(String state, + String workerId, + String traceMessage)
        +
        Provides an instance of the ConnectorState.
        +
        +
        Parameters:
        +
        state - - the status of connector, may not be null or empty
        +
        workerId - - the workerId associated with the connector, may not be null or empty
        +
        traceMessage - - any error message associated with the connector, may be null or empty
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/health/ConnectorType.html b/static/41/javadoc/org/apache/kafka/connect/health/ConnectorType.html new file mode 100644 index 000000000..7ab807f39 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/health/ConnectorType.html @@ -0,0 +1,248 @@ + + + + +ConnectorType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class ConnectorType

    +
    +
    java.lang.Object +
    java.lang.Enum<ConnectorType> +
    org.apache.kafka.connect.health.ConnectorType
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<ConnectorType>, Constable
    +
    +
    +
    public enum ConnectorType +extends Enum<ConnectorType>
    +
    Enum definition that identifies the type of the connector.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      +
        +
      • +
        +

        SOURCE

        +
        public static final ConnectorType SOURCE
        +
        Identifies a source connector
        +
        +
      • +
      • +
        +

        SINK

        +
        public static final ConnectorType SINK
        +
        Identifies a sink connector
        +
        +
      • +
      • +
        +

        UNKNOWN

        +
        public static final ConnectorType UNKNOWN
        +
        Identifies a connector whose type could not be inferred
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static ConnectorType[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static ConnectorType valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Enum<ConnectorType>
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/health/TaskState.html b/static/41/javadoc/org/apache/kafka/connect/health/TaskState.html new file mode 100644 index 000000000..a32384f24 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/health/TaskState.html @@ -0,0 +1,226 @@ + + + + +TaskState (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TaskState

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.health.AbstractState +
    org.apache.kafka.connect.health.TaskState
    +
    +
    +
    +
    +
    public class TaskState +extends AbstractState
    +
    Describes the state, IDs, and any errors of a connector task.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TaskState

        +
        public TaskState(int taskId, + String state, + String workerId, + String trace)
        +
        Provides an instance of TaskState.
        +
        +
        Parameters:
        +
        taskId - the id associated with the connector task
        +
        state - the status of the task, may not be null or empty
        +
        workerId - id of the worker the task is associated with, may not be null or empty
        +
        trace - error message if that task had failed or errored out, may be null or empty
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        taskId

        +
        public int taskId()
        +
        Provides the ID of the task.
        +
        +
        Returns:
        +
        the task ID
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class AbstractState
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class AbstractState
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/health/package-summary.html b/static/41/javadoc/org/apache/kafka/connect/health/package-summary.html new file mode 100644 index 000000000..2e17494d6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/health/package-summary.html @@ -0,0 +1,119 @@ + + + + +org.apache.kafka.connect.health (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.connect.health

    +
    +
    +
    package org.apache.kafka.connect.health
    +
    +
    Provides an API for describing the state of a running Connect cluster to + ConnectRestExtension instances.
    +
    +
    +
      +
    • +
      +
      +
      +
      +
      Class
      +
      Description
      + +
      +
      Provides the current status for a connector or a task, along with an identifier for its Connect worker
      +
      + +
      +
      Provides immutable Connect cluster information, such as the ID of the backing Kafka cluster.
      +
      + +
      +
      Provides the ability to lookup connector metadata, including status and configurations, as well + as immutable cluster information such as Kafka cluster ID.
      +
      + +
      +
      Provides basic health information about the connector and its tasks.
      +
      + +
      +
      Describes the status, worker ID, and any errors associated with a connector.
      +
      + +
      +
      Enum definition that identifies the type of the connector.
      +
      + +
      +
      Describes the state, IDs, and any errors of a connector task.
      +
      +
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/health/package-tree.html b/static/41/javadoc/org/apache/kafka/connect/health/package-tree.html new file mode 100644 index 000000000..8122b70ee --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/health/package-tree.html @@ -0,0 +1,98 @@ + + + + +org.apache.kafka.connect.health Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.connect.health

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/mirror/Checkpoint.html b/static/41/javadoc/org/apache/kafka/connect/mirror/Checkpoint.html new file mode 100644 index 000000000..7d95d0cd0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/mirror/Checkpoint.html @@ -0,0 +1,443 @@ + + + + +Checkpoint (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Checkpoint

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.mirror.Checkpoint
    +
    +
    +
    +
    public class Checkpoint +extends Object
    +
    Checkpoint records emitted by MirrorCheckpointConnector.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        TOPIC_KEY

        +
        public static final String TOPIC_KEY
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        PARTITION_KEY

        +
        public static final String PARTITION_KEY
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        CONSUMER_GROUP_ID_KEY

        +
        public static final String CONSUMER_GROUP_ID_KEY
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        UPSTREAM_OFFSET_KEY

        +
        public static final String UPSTREAM_OFFSET_KEY
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DOWNSTREAM_OFFSET_KEY

        +
        public static final String DOWNSTREAM_OFFSET_KEY
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        METADATA_KEY

        +
        public static final String METADATA_KEY
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        VERSION_KEY

        +
        public static final String VERSION_KEY
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        VERSION

        +
        public static final short VERSION
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        VALUE_SCHEMA_V0

        +
        public static final org.apache.kafka.common.protocol.types.Schema VALUE_SCHEMA_V0
        +
        +
      • +
      • +
        +

        KEY_SCHEMA

        +
        public static final org.apache.kafka.common.protocol.types.Schema KEY_SCHEMA
        +
        +
      • +
      • +
        +

        HEADER_SCHEMA

        +
        public static final org.apache.kafka.common.protocol.types.Schema HEADER_SCHEMA
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Checkpoint

        +
        public Checkpoint(String consumerGroupId, + TopicPartition topicPartition, + long upstreamOffset, + long downstreamOffset, + String metadata)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        consumerGroupId

        +
        public String consumerGroupId()
        +
        +
      • +
      • +
        +

        topicPartition

        +
        public TopicPartition topicPartition()
        +
        +
      • +
      • +
        +

        upstreamOffset

        +
        public long upstreamOffset()
        +
        +
      • +
      • +
        +

        downstreamOffset

        +
        public long downstreamOffset()
        +
        +
      • +
      • +
        +

        metadata

        +
        public String metadata()
        +
        +
      • +
      • +
        +

        offsetAndMetadata

        +
        public OffsetAndMetadata offsetAndMetadata()
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        deserializeRecord

        +
        public static Checkpoint deserializeRecord(ConsumerRecord<byte[],byte[]> record)
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/mirror/DefaultReplicationPolicy.html b/static/41/javadoc/org/apache/kafka/connect/mirror/DefaultReplicationPolicy.html new file mode 100644 index 000000000..cb0476f59 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/mirror/DefaultReplicationPolicy.html @@ -0,0 +1,389 @@ + + + + +DefaultReplicationPolicy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DefaultReplicationPolicy

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.mirror.DefaultReplicationPolicy
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Configurable, ReplicationPolicy
    +
    +
    +
    Direct Known Subclasses:
    +
    IdentityReplicationPolicy
    +
    +
    +
    public class DefaultReplicationPolicy +extends Object +implements ReplicationPolicy, Configurable
    +
    Default implementation of ReplicationPolicy which prepends the source cluster alias to + remote topic names. + For example, if the source cluster alias is "us-west", topics created in the target cluster will be named + us-west.<TOPIC>. The separator is customizable by setting SEPARATOR_CONFIG and defaults to a period.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        SEPARATOR_CONFIG

        +
        public static final String SEPARATOR_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SEPARATOR_DEFAULT

        +
        public static final String SEPARATOR_DEFAULT
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        INTERNAL_TOPIC_SEPARATOR_ENABLED_CONFIG

        +
        public static final String INTERNAL_TOPIC_SEPARATOR_ENABLED_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        INTERNAL_TOPIC_SEPARATOR_ENABLED_DEFAULT

        +
        public static final Boolean INTERNAL_TOPIC_SEPARATOR_ENABLED_DEFAULT
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DefaultReplicationPolicy

        +
        public DefaultReplicationPolicy()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> props)
        +
        Description copied from interface: Configurable
        +
        Configure this class with the given key-value pairs
        +
        +
        Specified by:
        +
        configure in interface Configurable
        +
        +
        +
      • +
      • +
        +

        formatRemoteTopic

        +
        public String formatRemoteTopic(String sourceClusterAlias, + String topic)
        +
        Description copied from interface: ReplicationPolicy
        +
        Returns the remote topic name for the given topic and source cluster alias.
        +
        +
        Specified by:
        +
        formatRemoteTopic in interface ReplicationPolicy
        +
        +
        +
      • +
      • +
        +

        topicSource

        +
        public String topicSource(String topic)
        +
        Description copied from interface: ReplicationPolicy
        +
        Returns the source cluster alias of given topic. + Returns null if the given topic is not a remote topic.
        +
        +
        Specified by:
        +
        topicSource in interface ReplicationPolicy
        +
        +
        +
      • +
      • +
        +

        upstreamTopic

        +
        public String upstreamTopic(String topic)
        +
        Description copied from interface: ReplicationPolicy
        +
        Return the name of the given topic on the source cluster. +

        + Topics may be replicated multiple hops, so the immediately upstream topic may itself be a remote topic. +

        + Returns null if the given topic is not a remote topic.

        +
        +
        Specified by:
        +
        upstreamTopic in interface ReplicationPolicy
        +
        +
        +
      • +
      • +
        +

        offsetSyncsTopic

        +
        public String offsetSyncsTopic(String clusterAlias)
        +
        Description copied from interface: ReplicationPolicy
        +
        Returns the name of the offset-syncs topic for given cluster alias.
        +
        +
        Specified by:
        +
        offsetSyncsTopic in interface ReplicationPolicy
        +
        +
        +
      • +
      • +
        +

        checkpointsTopic

        +
        public String checkpointsTopic(String clusterAlias)
        +
        Description copied from interface: ReplicationPolicy
        +
        Returns the name of the checkpoints topic for given cluster alias.
        +
        +
        Specified by:
        +
        checkpointsTopic in interface ReplicationPolicy
        +
        +
        +
      • +
      • +
        +

        isCheckpointsTopic

        +
        public boolean isCheckpointsTopic(String topic)
        +
        Description copied from interface: ReplicationPolicy
        +
        Returns true if the topic is a checkpoints topic.
        +
        +
        Specified by:
        +
        isCheckpointsTopic in interface ReplicationPolicy
        +
        +
        +
      • +
      • +
        +

        isMM2InternalTopic

        +
        public boolean isMM2InternalTopic(String topic)
        +
        Description copied from interface: ReplicationPolicy
        +
        Returns true if the topic is one of MirrorMaker internal topics. + This is used to make sure the topic doesn't need to be replicated.
        +
        +
        Specified by:
        +
        isMM2InternalTopic in interface ReplicationPolicy
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/mirror/Heartbeat.html b/static/41/javadoc/org/apache/kafka/connect/mirror/Heartbeat.html new file mode 100644 index 000000000..ea0083101 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/mirror/Heartbeat.html @@ -0,0 +1,335 @@ + + + + +Heartbeat (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Heartbeat

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.mirror.Heartbeat
    +
    +
    +
    +
    public class Heartbeat +extends Object
    +
    Heartbeat records emitted by MirrorHeartbeatConnector.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        SOURCE_CLUSTER_ALIAS_KEY

        +
        public static final String SOURCE_CLUSTER_ALIAS_KEY
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        TARGET_CLUSTER_ALIAS_KEY

        +
        public static final String TARGET_CLUSTER_ALIAS_KEY
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        TIMESTAMP_KEY

        +
        public static final String TIMESTAMP_KEY
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        VERSION_KEY

        +
        public static final String VERSION_KEY
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        VERSION

        +
        public static final short VERSION
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        VALUE_SCHEMA_V0

        +
        public static final org.apache.kafka.common.protocol.types.Schema VALUE_SCHEMA_V0
        +
        +
      • +
      • +
        +

        KEY_SCHEMA

        +
        public static final org.apache.kafka.common.protocol.types.Schema KEY_SCHEMA
        +
        +
      • +
      • +
        +

        HEADER_SCHEMA

        +
        public static final org.apache.kafka.common.protocol.types.Schema HEADER_SCHEMA
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Heartbeat

        +
        public Heartbeat(String sourceClusterAlias, + String targetClusterAlias, + long timestamp)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        sourceClusterAlias

        +
        public String sourceClusterAlias()
        +
        +
      • +
      • +
        +

        targetClusterAlias

        +
        public String targetClusterAlias()
        +
        +
      • +
      • +
        +

        timestamp

        +
        public long timestamp()
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        deserializeRecord

        +
        public static Heartbeat deserializeRecord(ConsumerRecord<byte[],byte[]> record)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/mirror/IdentityReplicationPolicy.html b/static/41/javadoc/org/apache/kafka/connect/mirror/IdentityReplicationPolicy.html new file mode 100644 index 000000000..f067ca5ac --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/mirror/IdentityReplicationPolicy.html @@ -0,0 +1,292 @@ + + + + +IdentityReplicationPolicy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class IdentityReplicationPolicy

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.mirror.DefaultReplicationPolicy +
    org.apache.kafka.connect.mirror.IdentityReplicationPolicy
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Configurable, ReplicationPolicy
    +
    +
    +
    public class IdentityReplicationPolicy +extends DefaultReplicationPolicy
    +
    Alternative implementation of ReplicationPolicy that does not rename remote topics. + This is useful for migrating from legacy MirrorMaker, or for any use-case involving one-way replication. +

    + N.B. MirrorMaker is not able to prevent cycles when using this replication policy, so take care that + your replication topology is acyclic. If migrating from legacy MirrorMaker, this will likely already be the case.

    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/mirror/MirrorClient.html b/static/41/javadoc/org/apache/kafka/connect/mirror/MirrorClient.html new file mode 100644 index 000000000..532c0097b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/mirror/MirrorClient.html @@ -0,0 +1,320 @@ + + + + +MirrorClient (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class MirrorClient

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.mirror.MirrorClient
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    AutoCloseable
    +
    +
    +
    public class MirrorClient +extends Object +implements AutoCloseable
    +
    Client to interact with MirrorMaker internal topics (checkpoints, heartbeats) on a given cluster. + Whenever possible use the methods from RemoteClusterUtils instead of directly using MirrorClient.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        close

        +
        public void close()
        +
        Closes internal clients.
        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        +
        +
      • +
      • +
        +

        replicationPolicy

        +
        public ReplicationPolicy replicationPolicy()
        +
        Gets the ReplicationPolicy instance used to interpret remote topics. This instance is constructed based on + relevant configuration properties, including replication.policy.class.
        +
        +
      • +
      • +
        +

        replicationHops

        +
        public int replicationHops(String upstreamClusterAlias) + throws InterruptedException
        +
        Computes the shortest number of hops from an upstream source cluster. + For example, given replication flow A->B->C, there are two hops from A to C. + Returns -1 if the upstream cluster is unreachable.
        +
        +
        Throws:
        +
        InterruptedException
        +
        +
        +
      • +
      • +
        +

        heartbeatTopics

        +
        public Set<String> heartbeatTopics() + throws InterruptedException
        +
        Finds all heartbeats topics on this cluster. Heartbeats topics are replicated from other clusters.
        +
        +
        Throws:
        +
        InterruptedException
        +
        +
        +
      • +
      • +
        +

        checkpointTopics

        +
        public Set<String> checkpointTopics() + throws InterruptedException
        +
        Finds all checkpoints topics on this cluster.
        +
        +
        Throws:
        +
        InterruptedException
        +
        +
        +
      • +
      • +
        +

        upstreamClusters

        +
        public Set<String> upstreamClusters() + throws InterruptedException
        +
        Finds upstream clusters, which may be multiple hops away, based on incoming heartbeats.
        +
        +
        Throws:
        +
        InterruptedException
        +
        +
        +
      • +
      • +
        +

        remoteTopics

        +
        public Set<String> remoteTopics() + throws InterruptedException
        +
        Finds all remote topics on this cluster. This does not include internal topics (heartbeats, checkpoints).
        +
        +
        Throws:
        +
        InterruptedException
        +
        +
        +
      • +
      • +
        +

        remoteTopics

        +
        public Set<String> remoteTopics(String source) + throws InterruptedException
        +
        Finds all remote topics that have been replicated directly from the given source cluster.
        +
        +
        Throws:
        +
        InterruptedException
        +
        +
        +
      • +
      • +
        +

        remoteConsumerOffsets

        +
        public Map<TopicPartition,OffsetAndMetadata> remoteConsumerOffsets(String consumerGroupId, + String remoteClusterAlias, + Duration timeout)
        +
        Translates a remote consumer group's offsets into corresponding local offsets. Topics are automatically + renamed according to the ReplicationPolicy.
        +
        +
        Parameters:
        +
        consumerGroupId - The group ID of remote consumer group
        +
        remoteClusterAlias - The alias of remote cluster
        +
        timeout - The maximum time to block when consuming from the checkpoints topic
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/mirror/MirrorClientConfig.html b/static/41/javadoc/org/apache/kafka/connect/mirror/MirrorClientConfig.html new file mode 100644 index 000000000..fdabbfd60 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/mirror/MirrorClientConfig.html @@ -0,0 +1,404 @@ + + + + +MirrorClientConfig (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class MirrorClientConfig

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.AbstractConfig +
    org.apache.kafka.connect.mirror.MirrorClientConfig
    +
    +
    +
    +
    +
    public class MirrorClientConfig +extends AbstractConfig
    +
    Configuration required for MirrorClient to talk to a given target cluster. +

    + This needs to contain at least the connection details for the target cluster (bootstrap.servers and + any required TLS/SASL configuration), as well as REPLICATION_POLICY_CLASS when not using the default + replication policy. It can also include AdminClientConfig and ConsumerConfig to customize the + internal clients this uses. For example: +

    +      bootstrap.servers = host1:9092
    +      consumer.client.id = mm2-client
    +      replication.policy.separator = __
    +   
    +

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        REPLICATION_POLICY_CLASS

        +
        public static final String REPLICATION_POLICY_CLASS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        REPLICATION_POLICY_CLASS_DEFAULT

        +
        public static final Class<?> REPLICATION_POLICY_CLASS_DEFAULT
        +
        +
      • +
      • +
        +

        REPLICATION_POLICY_SEPARATOR

        +
        public static final String REPLICATION_POLICY_SEPARATOR
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        REPLICATION_POLICY_SEPARATOR_DEFAULT

        +
        public static final String REPLICATION_POLICY_SEPARATOR_DEFAULT
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        INTERNAL_TOPIC_SEPARATOR_ENABLED

        +
        public static final String INTERNAL_TOPIC_SEPARATOR_ENABLED
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        INTERNAL_TOPIC_SEPARATOR_ENABLED_DOC

        +
        public static final String INTERNAL_TOPIC_SEPARATOR_ENABLED_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        INTERNAL_TOPIC_SEPARATOR_ENABLED_DEFAULT

        +
        public static final Boolean INTERNAL_TOPIC_SEPARATOR_ENABLED_DEFAULT
        +
        +
      • +
      • +
        +

        FORWARDING_ADMIN_CLASS

        +
        public static final String FORWARDING_ADMIN_CLASS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        FORWARDING_ADMIN_CLASS_DOC

        +
        public static final String FORWARDING_ADMIN_CLASS_DOC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        FORWARDING_ADMIN_CLASS_DEFAULT

        +
        public static final Class<?> FORWARDING_ADMIN_CLASS_DEFAULT
        +
        +
      • +
      • +
        +

        ADMIN_CLIENT_PREFIX

        +
        public static final String ADMIN_CLIENT_PREFIX
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        CONSUMER_CLIENT_PREFIX

        +
        public static final String CONSUMER_CLIENT_PREFIX
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        PRODUCER_CLIENT_PREFIX

        +
        public static final String PRODUCER_CLIENT_PREFIX
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        replicationPolicy

        +
        public ReplicationPolicy replicationPolicy()
        +
        +
      • +
      • +
        +

        adminConfig

        +
        public Map<String,Object> adminConfig()
        +
        Sub-config for Admin clients.
        +
        +
      • +
      • +
        +

        consumerConfig

        +
        public Map<String,Object> consumerConfig()
        +
        Sub-config for Consumer clients.
        +
        +
      • +
      • +
        +

        producerConfig

        +
        public Map<String,Object> producerConfig()
        +
        Sub-config for Producer clients.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/mirror/RemoteClusterUtils.html b/static/41/javadoc/org/apache/kafka/connect/mirror/RemoteClusterUtils.html new file mode 100644 index 000000000..50ed3b96d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/mirror/RemoteClusterUtils.html @@ -0,0 +1,251 @@ + + + + +RemoteClusterUtils (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class RemoteClusterUtils

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.mirror.RemoteClusterUtils
    +
    +
    +
    +
    public final class RemoteClusterUtils +extends Object
    +
    Convenience tool for multi-cluster environments. Wraps MirrorClient +

    + Properties passed to these methods are used to construct internal Admin and Consumer clients. + Sub-configs like "admin.xyz" are also supported. For example: +

    +
    +     bootstrap.servers = host1:9092
    +     consumer.client.id = mm2-client
    + 
    +

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/mirror/ReplicationPolicy.html b/static/41/javadoc/org/apache/kafka/connect/mirror/ReplicationPolicy.html new file mode 100644 index 000000000..f0b23c5e1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/mirror/ReplicationPolicy.html @@ -0,0 +1,261 @@ + + + + +ReplicationPolicy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ReplicationPolicy

    +
    +
    +
    +
    All Known Implementing Classes:
    +
    DefaultReplicationPolicy, IdentityReplicationPolicy
    +
    +
    +
    public interface ReplicationPolicy
    +
    An interface used by the MirrorMaker connectors to manage topics names between source and target clusters.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      default String
      +
      checkpointsTopic(String clusterAlias)
      +
      +
      Returns the name of the checkpoints topic for given cluster alias.
      +
      + +
      formatRemoteTopic(String sourceClusterAlias, + String topic)
      +
      +
      Returns the remote topic name for the given topic and source cluster alias.
      +
      +
      default String
      + +
      +
      Returns the name of heartbeats topic.
      +
      +
      default boolean
      + +
      +
      Returns true if the topic is a checkpoints topic.
      +
      +
      default boolean
      + +
      +
      Returns true if the topic is a heartbeats topic
      +
      +
      default boolean
      + +
      +
      Returns true if the topic is considered an internal topic.
      +
      +
      default boolean
      + +
      +
      Returns true if the topic is one of MirrorMaker internal topics.
      +
      +
      default String
      +
      offsetSyncsTopic(String clusterAlias)
      +
      +
      Returns the name of the offset-syncs topic for given cluster alias.
      +
      +
      default String
      + +
      +
      Returns the name of the original topic, which may have been replicated multiple hops.
      +
      + + +
      +
      Returns the source cluster alias of given topic.
      +
      + + +
      +
      Return the name of the given topic on the source cluster.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        formatRemoteTopic

        +
        String formatRemoteTopic(String sourceClusterAlias, + String topic)
        +
        Returns the remote topic name for the given topic and source cluster alias.
        +
        +
      • +
      • +
        +

        topicSource

        +
        String topicSource(String topic)
        +
        Returns the source cluster alias of given topic. + Returns null if the given topic is not a remote topic.
        +
        +
      • +
      • +
        +

        upstreamTopic

        +
        String upstreamTopic(String topic)
        +
        Return the name of the given topic on the source cluster. +

        + Topics may be replicated multiple hops, so the immediately upstream topic may itself be a remote topic. +

        + Returns null if the given topic is not a remote topic.

        +
        +
      • +
      • +
        +

        originalTopic

        +
        default String originalTopic(String topic)
        +
        Returns the name of the original topic, which may have been replicated multiple hops. + Returns the topic if it is not a remote topic.
        +
        +
      • +
      • +
        +

        heartbeatsTopic

        +
        default String heartbeatsTopic()
        +
        Returns the name of heartbeats topic.
        +
        +
      • +
      • +
        +

        offsetSyncsTopic

        +
        default String offsetSyncsTopic(String clusterAlias)
        +
        Returns the name of the offset-syncs topic for given cluster alias.
        +
        +
      • +
      • +
        +

        checkpointsTopic

        +
        default String checkpointsTopic(String clusterAlias)
        +
        Returns the name of the checkpoints topic for given cluster alias.
        +
        +
      • +
      • +
        +

        isHeartbeatsTopic

        +
        default boolean isHeartbeatsTopic(String topic)
        +
        Returns true if the topic is a heartbeats topic
        +
        +
      • +
      • +
        +

        isCheckpointsTopic

        +
        default boolean isCheckpointsTopic(String topic)
        +
        Returns true if the topic is a checkpoints topic.
        +
        +
      • +
      • +
        +

        isMM2InternalTopic

        +
        default boolean isMM2InternalTopic(String topic)
        +
        Returns true if the topic is one of MirrorMaker internal topics. + This is used to make sure the topic doesn't need to be replicated.
        +
        +
      • +
      • +
        +

        isInternalTopic

        +
        default boolean isInternalTopic(String topic)
        +
        Returns true if the topic is considered an internal topic.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/mirror/SourceAndTarget.html b/static/41/javadoc/org/apache/kafka/connect/mirror/SourceAndTarget.html new file mode 100644 index 000000000..71bff25ef --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/mirror/SourceAndTarget.html @@ -0,0 +1,209 @@ + + + + +SourceAndTarget (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SourceAndTarget

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.mirror.SourceAndTarget
    +
    +
    +
    +
    public class SourceAndTarget +extends Object
    +
    Directional pair of clusters, where source is mirrored to target.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SourceAndTarget

        +
        public SourceAndTarget(String source, + String target)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        source

        +
        public String source()
        +
        +
      • +
      • +
        +

        target

        +
        public String target()
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object other)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/mirror/package-summary.html b/static/41/javadoc/org/apache/kafka/connect/mirror/package-summary.html new file mode 100644 index 000000000..20c2f8705 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/mirror/package-summary.html @@ -0,0 +1,126 @@ + + + + +org.apache.kafka.connect.mirror (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.connect.mirror

    +
    +
    +
    package org.apache.kafka.connect.mirror
    +
    +
    Provides APIs for the MirrorMaker connectors and utilities to manage MirrorMaker resources.
    +
    +
    +
      +
    • +
      +
      +
      +
      +
      Class
      +
      Description
      + +
      +
      Checkpoint records emitted by MirrorCheckpointConnector.
      +
      + +
      +
      Default implementation of ReplicationPolicy which prepends the source cluster alias to + remote topic names.
      +
      + +
      +
      Heartbeat records emitted by MirrorHeartbeatConnector.
      +
      + +
      +
      Alternative implementation of ReplicationPolicy that does not rename remote topics.
      +
      + +
      +
      Client to interact with MirrorMaker internal topics (checkpoints, heartbeats) on a given cluster.
      +
      + +
      +
      Configuration required for MirrorClient to talk to a given target cluster.
      +
      + +
      +
      Convenience tool for multi-cluster environments.
      +
      + +
      +
      An interface used by the MirrorMaker connectors to manage topics names between source and target clusters.
      +
      + +
      +
      Directional pair of clusters, where source is mirrored to target.
      +
      +
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/mirror/package-tree.html b/static/41/javadoc/org/apache/kafka/connect/mirror/package-tree.html new file mode 100644 index 000000000..1b6f1dcdc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/mirror/package-tree.html @@ -0,0 +1,91 @@ + + + + +org.apache.kafka.connect.mirror Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.connect.mirror

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/rest/ConnectRestExtension.html b/static/41/javadoc/org/apache/kafka/connect/rest/ConnectRestExtension.html new file mode 100644 index 000000000..945fb7bc2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/rest/ConnectRestExtension.html @@ -0,0 +1,169 @@ + + + + +ConnectRestExtension (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ConnectRestExtension

    +
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Closeable, Configurable, Versioned
    +
    +
    +
    public interface ConnectRestExtension +extends Configurable, Versioned, Closeable
    +
    A plugin interface to allow registration of new JAX-RS resources like Filters, REST endpoints, providers, etc. The implementations will + be discovered using the standard Java ServiceLoader mechanism by Connect's plugin class loading mechanism. + +

    Kafka Connect discovers implementations of this interface using the Java ServiceLoader mechanism. + To support this, implementations of this interface should also contain a service provider configuration file in + META-INF/services/org.apache.kafka.connect.rest.ConnectRestExtension. +

    The extension class(es) must be packaged as a plugin, including the JARs of all dependencies except those + already provided by the Connect framework. + +

    To install into a Connect installation, add a directory named for the plugin and containing the plugin's JARs into a directory that is + on Connect's plugin.path, and (re)start the Connect worker. + +

    When the Connect worker process starts up, it will read its configuration and instantiate all of the REST extension implementation + classes that are specified in the `rest.extension.classes` configuration property. Connect will then pass its configuration to each + extension via the Configurable.configure(Map) method, and will then call register(org.apache.kafka.connect.rest.ConnectRestExtensionContext) with a provided context. + +

    When the Connect worker shuts down, it will call the extension's Closeable.close() method to allow the implementation to release all of + its resources. + +

    Implement Monitorable to enable the extension to register metrics. + The following tags are automatically added to all metrics registered: config set to + rest.extension.classes, and class set to the ConnectRestExtension class name.

    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
      +
      ConnectRestExtension implementations can register custom JAX-RS resources via this method.
      +
      +
      +
      +
      +
      +

      Methods inherited from interface java.io.Closeable

      +close
      +
      +

      Methods inherited from interface org.apache.kafka.common.Configurable

      +configure
      +
      +

      Methods inherited from interface org.apache.kafka.connect.components.Versioned

      +version
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        register

        +
        void register(ConnectRestExtensionContext restPluginContext)
        +
        ConnectRestExtension implementations can register custom JAX-RS resources via this method. The Connect framework + will invoke this method after registering the default Connect resources. If the implementations attempt + to re-register any of the Connect resources, it will be ignored and will be logged.
        +
        +
        Parameters:
        +
        restPluginContext - The context provides access to JAX-RS Configurable and ConnectClusterState.The custom JAX-RS resources can be registered via the ConnectRestExtensionContext.configurable()
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/rest/ConnectRestExtensionContext.html b/static/41/javadoc/org/apache/kafka/connect/rest/ConnectRestExtensionContext.html new file mode 100644 index 000000000..6f3041b2d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/rest/ConnectRestExtensionContext.html @@ -0,0 +1,150 @@ + + + + +ConnectRestExtensionContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ConnectRestExtensionContext

    +
    +
    +
    +
    public interface ConnectRestExtensionContext
    +
    The interface provides the ability for ConnectRestExtension implementations to access the JAX-RS + Configurable and cluster state ConnectClusterState. The implementation for the interface is provided + by the Connect framework.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Provides the cluster state and health information about the connectors and tasks.
      +
      +
      jakarta.ws.rs.core.Configurable<? extends jakarta.ws.rs.core.Configurable<?>>
      + +
      +
      Provides an implementation of Configurable that can be used to register JAX-RS resources.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configurable

        +
        jakarta.ws.rs.core.Configurable<? extends jakarta.ws.rs.core.Configurable<?>> configurable()
        +
        Provides an implementation of Configurable that can be used to register JAX-RS resources.
        +
        +
        Returns:
        +
        the JAX-RS Configurable; never null
        +
        +
        +
      • +
      • +
        +

        clusterState

        +
        ConnectClusterState clusterState()
        +
        Provides the cluster state and health information about the connectors and tasks.
        +
        +
        Returns:
        +
        the cluster state information; never null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/rest/package-summary.html b/static/41/javadoc/org/apache/kafka/connect/rest/package-summary.html new file mode 100644 index 000000000..03bed8f40 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/rest/package-summary.html @@ -0,0 +1,92 @@ + + + + +org.apache.kafka.connect.rest (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.connect.rest

    +
    +
    +
    package org.apache.kafka.connect.rest
    +
    +
    Provides a pluggable interface for altering the behavior of the Connect REST API.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/rest/package-tree.html b/static/41/javadoc/org/apache/kafka/connect/rest/package-tree.html new file mode 100644 index 000000000..dc15f574e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/rest/package-tree.html @@ -0,0 +1,86 @@ + + + + +org.apache.kafka.connect.rest Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.connect.rest

    +Package Hierarchies: + +
    +
    +

    Interface Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/sink/ErrantRecordReporter.html b/static/41/javadoc/org/apache/kafka/connect/sink/ErrantRecordReporter.html new file mode 100644 index 000000000..615942612 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/sink/ErrantRecordReporter.html @@ -0,0 +1,160 @@ + + + + +ErrantRecordReporter (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ErrantRecordReporter

    +
    +
    +
    +
    public interface ErrantRecordReporter
    +
    Component that a SinkTask can use to report problematic records (and their corresponding problems) as it + writes them through SinkTask.put(java.util.Collection).
    +
    +
    Since:
    +
    2.6
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      report(SinkRecord record, + Throwable error)
      +
      +
      Report a problematic record and the corresponding error to be written to the sink + connector's dead letter queue (DLQ).
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        report

        +
        Future<Void> report(SinkRecord record, + Throwable error)
        +
        Report a problematic record and the corresponding error to be written to the sink + connector's dead letter queue (DLQ). +

        + This call is asynchronous and returns a Future. + Invoking get() on this future will block until the + record has been written or throw any exception that occurred while sending the record. + If you want to simulate a simple blocking call you can call the get() method + immediately. +

        + Connect guarantees that sink records reported through this reporter will be written to the error topic + before the framework calls the SinkTask.preCommit(java.util.Map) method and therefore before + committing the consumer offsets. SinkTask implementations can use the Future when stronger guarantees + are required.

        +
        +
        Parameters:
        +
        record - the problematic record; may not be null
        +
        error - the error capturing the problem with the record; may not be null
        +
        Returns:
        +
        a future that can be used to block until the record and error are reported + to the DLQ
        +
        Throws:
        +
        ConnectException - if the error reporter and DLQ fails to write a reported record
        +
        Since:
        +
        2.6
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/sink/SinkConnector.html b/static/41/javadoc/org/apache/kafka/connect/sink/SinkConnector.html new file mode 100644 index 000000000..49fa6cc90 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/sink/SinkConnector.html @@ -0,0 +1,266 @@ + + + + +SinkConnector (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SinkConnector

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.connector.Connector +
    org.apache.kafka.connect.sink.SinkConnector
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Versioned
    +
    +
    +
    Direct Known Subclasses:
    +
    MockSinkConnector, VerifiableSinkConnector
    +
    +
    +
    public abstract class SinkConnector +extends Connector
    +
    SinkConnectors implement the Connector interface to send Kafka data to another system. +

    Kafka Connect may discover implementations of this interface using the Java ServiceLoader mechanism. + To support this, implementations of this interface should also contain a service provider configuration file in + META-INF/services/org.apache.kafka.connect.sink.SinkConnector.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        TOPICS_CONFIG

        +
        public static final String TOPICS_CONFIG
        +

        + Configuration key for the list of input topics for this connector. +

        +

        + Usually this setting is only relevant to the Kafka Connect framework, but is provided here for + the convenience of Connector developers if they also need to know the set of topics. +

        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SinkConnector

        +
        public SinkConnector()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        alterOffsets

        +
        public boolean alterOffsets(Map<String,String> connectorConfig, + Map<TopicPartition,Long> offsets)
        +
        Invoked when users request to manually alter/reset the offsets for this connector via the Connect worker's REST + API. Connectors that manage offsets externally can propagate offset changes to their external system in this + method. Connectors may also validate these offsets if, for example, an offset is out of range for what can be + feasibly written to the external system. +

        + Connectors that neither manage offsets externally nor require custom offset validation need not implement this + method beyond simply returning true. +

        + User requests to alter/reset offsets will be handled by the Connect runtime and will be reflected in the offsets + for this connector's consumer group. +

        + Note that altering / resetting offsets is expected to be an idempotent operation and this method should be able + to handle being called more than once with the same arguments (which could occur if a user retries the request + due to a failure in altering the consumer group offsets, for example). +

        + Similar to validate, this method may be called by the runtime before the + start method is invoked.

        +
        +
        Parameters:
        +
        connectorConfig - the configuration of the connector
        +
        offsets - a map from topic partition to offset, containing the offsets that the user has requested to + alter/reset. For any topic partitions whose offsets are being reset instead of altered, their + corresponding value in the map will be null. This map may be empty, but never null. An + empty offsets map could indicate that the offsets were reset previously or that no offsets have + been committed yet.
        +
        Returns:
        +
        whether this method has been overridden by the connector; the default implementation returns + false, and all other implementations (that do not unconditionally throw exceptions) should return + true
        +
        Throws:
        +
        UnsupportedOperationException - if it is impossible to alter/reset the offsets for this connector
        +
        ConnectException - if the offsets for this connector cannot be + reset for any other reason (for example, they have failed custom validation logic specific to this connector)
        +
        Since:
        +
        3.6
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/sink/SinkConnectorContext.html b/static/41/javadoc/org/apache/kafka/connect/sink/SinkConnectorContext.html new file mode 100644 index 000000000..910bf991d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/sink/SinkConnectorContext.html @@ -0,0 +1,98 @@ + + + + +SinkConnectorContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface SinkConnectorContext

    +
    +
    +
    +
    All Superinterfaces:
    +
    ConnectorContext
    +
    +
    +
    public interface SinkConnectorContext +extends ConnectorContext
    +
    A context to allow a SinkConnector to interact with the Kafka Connect runtime.
    +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/sink/SinkRecord.html b/static/41/javadoc/org/apache/kafka/connect/sink/SinkRecord.html new file mode 100644 index 000000000..e38f235ff --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/sink/SinkRecord.html @@ -0,0 +1,520 @@ + + + + +SinkRecord (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SinkRecord

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.connector.ConnectRecord<SinkRecord> +
    org.apache.kafka.connect.sink.SinkRecord
    +
    +
    +
    +
    +
    public class SinkRecord +extends ConnectRecord<SinkRecord>
    +
    SinkRecord is a ConnectRecord that has been read from Kafka and includes the original Kafka record's + topic, partition and offset (before any transformations have been applied) + in addition to the standard fields. This information should be used by the SinkTask to coordinate + offset commits. +

    + It also includes the TimestampType, which may be TimestampType.NO_TIMESTAMP_TYPE, and the relevant + timestamp, which may be null.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SinkRecord

        +
        public SinkRecord(String topic, + int partition, + Schema keySchema, + Object key, + Schema valueSchema, + Object value, + long kafkaOffset)
        +
        +
      • +
      • +
        +

        SinkRecord

        +
        public SinkRecord(String topic, + int partition, + Schema keySchema, + Object key, + Schema valueSchema, + Object value, + long kafkaOffset, + Long timestamp, + org.apache.kafka.common.record.TimestampType timestampType)
        +
        +
      • +
      • +
        +

        SinkRecord

        +
        public SinkRecord(String topic, + int partition, + Schema keySchema, + Object key, + Schema valueSchema, + Object value, + long kafkaOffset, + Long timestamp, + org.apache.kafka.common.record.TimestampType timestampType, + Iterable<Header> headers)
        +
        +
      • +
      • +
        +

        SinkRecord

        +
        public SinkRecord(String topic, + int partition, + Schema keySchema, + Object key, + Schema valueSchema, + Object value, + long kafkaOffset, + Long timestamp, + org.apache.kafka.common.record.TimestampType timestampType, + Iterable<Header> headers, + String originalTopic, + Integer originalKafkaPartition, + long originalKafkaOffset)
        +
        This constructor is intended for use by the Connect runtime only and plugins (sink connectors or transformations) + should not use this directly outside testing code.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        kafkaOffset

        +
        public long kafkaOffset()
        +
        +
      • +
      • +
        +

        timestampType

        +
        public org.apache.kafka.common.record.TimestampType timestampType()
        +
        +
      • +
      • +
        +

        originalTopic

        +
        public String originalTopic()
        +
        Get the original topic for this sink record, before any transformations were applied. + In order to be compatible with transformations that mutate topic names, this method should be used + by sink tasks instead of ConnectRecord.topic() for any internal offset tracking purposes (for instance, reporting + offsets to the Connect runtime via SinkTask.preCommit(Map)). +

        + This method was added in Apache Kafka 3.6. Sink connectors that use this method but want to maintain backward + compatibility in order to be able to be deployed on older Connect runtimes should guard the call to this method + with a try-catch block, since calling this method will result in a NoSuchMethodError when the sink + connector is deployed to Connect runtimes older than Kafka 3.6. + For example: +

        
        + String originalTopic;
        + try {
        +     originalTopic = record.originalTopic();
        + } catch (NoSuchMethodError e) {
        +     log.warn("This connector is not compatible with SMTs that mutate topic names, topic partitions or offset values on this version of Kafka Connect");
        +     originalTopic = record.topic();
        + }
        + 
        + 
        +

        + Note that sink connectors that do their own offset tracking will be incompatible with SMTs that mutate topic + names when deployed to older Connect runtimes that do not support this method.

        +
        +
        Returns:
        +
        the topic for this record before any transformations were applied
        +
        Since:
        +
        3.6
        +
        +
        +
      • +
      • +
        +

        originalKafkaPartition

        +
        public Integer originalKafkaPartition()
        +
        Get the original topic partition for this sink record, before any transformations were applied. + In order to be compatible with transformations that mutate topic partitions, this method should be used + by sink tasks instead of ConnectRecord.kafkaPartition() for any internal offset tracking purposes (for instance, reporting + offsets to the Connect runtime via SinkTask.preCommit(Map)). +

        + This method was added in Apache Kafka 3.6. Sink connectors that use this method but want to maintain backward + compatibility in order to be able to be deployed on older Connect runtimes should guard the call to this method + with a try-catch block, since calling this method will result in a NoSuchMethodError when the sink + connector is deployed to Connect runtimes older than Kafka 3.6. + For example: +

        
        + String originalKafkaPartition;
        + try {
        +     originalKafkaPartition = record.originalKafkaPartition();
        + } catch (NoSuchMethodError e) {
        +     log.warn("This connector is not compatible with SMTs that mutate topic names, topic partitions or offset values on this version of Kafka Connect");
        +     originalKafkaPartition = record.kafkaPartition();
        + }
        + 
        + 
        +

        + Note that sink connectors that do their own offset tracking will be incompatible with SMTs that mutate topic + partitions when deployed to older Connect runtimes that do not support this method.

        +
        +
        Returns:
        +
        the topic partition for this record before any transformations were applied
        +
        Since:
        +
        3.6
        +
        +
        +
      • +
      • +
        +

        originalKafkaOffset

        +
        public long originalKafkaOffset()
        +
        Get the original offset for this sink record, before any transformations were applied. + In order to be compatible with transformations that mutate offset values, this method should be used + by sink tasks instead of kafkaOffset() for any internal offset tracking purposes (for instance, reporting + offsets to the Connect runtime via SinkTask.preCommit(Map)). +

        + This method was added in Apache Kafka 3.6. Sink connectors that use this method but want to maintain backward + compatibility in order to be able to be deployed on older Connect runtimes should guard the call to this method + with a try-catch block, since calling this method will result in a NoSuchMethodError when the sink + connector is deployed to Connect runtimes older than Kafka 3.6. + For example: +

        
        + String originalKafkaOffset;
        + try {
        +     originalKafkaOffset = record.originalKafkaOffset();
        + } catch (NoSuchMethodError e) {
        +     log.warn("This connector is not compatible with SMTs that mutate topic names, topic partitions or offset values on this version of Kafka Connect");
        +     originalKafkaOffset = record.kafkaOffset();
        + }
        + 
        + 
        +

        + Note that sink connectors that do their own offset tracking will be incompatible with SMTs that mutate offset + values when deployed to older Connect runtimes that do not support this method.

        +
        +
        Returns:
        +
        the offset for this record before any transformations were applied
        +
        Since:
        +
        3.6
        +
        +
        +
      • +
      • +
        +

        newRecord

        +
        public SinkRecord newRecord(String topic, + Integer kafkaPartition, + Schema keySchema, + Object key, + Schema valueSchema, + Object value, + Long timestamp)
        +
        Description copied from class: ConnectRecord
        +
        Create a new record of the same type as itself, with the specified parameter values. All other fields in this record will be copied + over to the new record. Since the headers are mutable, the resulting record will have a copy of this record's headers.
        +
        +
        Specified by:
        +
        newRecord in class ConnectRecord<SinkRecord>
        +
        Parameters:
        +
        topic - the name of the topic; may be null
        +
        kafkaPartition - the partition number for the Kafka topic; may be null
        +
        keySchema - the schema for the key; may be null
        +
        key - the key; may be null
        +
        valueSchema - the schema for the value; may be null
        +
        value - the value; may be null
        +
        timestamp - the timestamp; may be null
        +
        Returns:
        +
        the new record
        +
        +
        +
      • +
      • +
        +

        newRecord

        +
        public SinkRecord newRecord(String topic, + Integer kafkaPartition, + Schema keySchema, + Object key, + Schema valueSchema, + Object value, + Long timestamp, + Iterable<Header> headers)
        +
        Description copied from class: ConnectRecord
        +
        Create a new record of the same type as itself, with the specified parameter values. All other fields in this record will be copied + over to the new record.
        +
        +
        Specified by:
        +
        newRecord in class ConnectRecord<SinkRecord>
        +
        Parameters:
        +
        topic - the name of the topic; may be null
        +
        kafkaPartition - the partition number for the Kafka topic; may be null
        +
        keySchema - the schema for the key; may be null
        +
        key - the key; may be null
        +
        valueSchema - the schema for the value; may be null
        +
        value - the value; may be null
        +
        timestamp - the timestamp; may be null
        +
        headers - the headers; may be null or empty
        +
        Returns:
        +
        the new record
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class ConnectRecord<SinkRecord>
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class ConnectRecord<SinkRecord>
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class ConnectRecord<SinkRecord>
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/sink/SinkTask.html b/static/41/javadoc/org/apache/kafka/connect/sink/SinkTask.html new file mode 100644 index 000000000..2de1816b1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/sink/SinkTask.html @@ -0,0 +1,432 @@ + + + + +SinkTask (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SinkTask

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.sink.SinkTask
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Task
    +
    +
    +
    Direct Known Subclasses:
    +
    MockSinkTask, VerifiableSinkTask
    +
    +
    +
    public abstract class SinkTask +extends Object +implements Task
    +
    SinkTask is a Task that takes records loaded from Kafka and sends them to another system. Each task + instance is assigned a set of partitions by the Connect framework and will handle all records received + from those partitions. As records are fetched from Kafka, they will be passed to the sink task using the + put(Collection) API, which should either write them to the downstream system or batch them for + later writing. Periodically, Connect will call flush(Map) to ensure that batched records are + actually pushed to the downstream system. +

    + Below we describe the lifecycle of a SinkTask. + +

      +
    1. Initialization: SinkTasks are first initialized using initialize(SinkTaskContext) + to prepare the task's context and start(Map) to accept configuration and start any services + needed for processing.
    2. +
    3. Partition Assignment: After initialization, Connect will assign the task a set of partitions + using open(Collection). These partitions are owned exclusively by this task until they + have been closed with close(Collection).
    4. +
    5. Record Processing: Once partitions have been opened for writing, Connect will begin forwarding + records from Kafka using the put(Collection) API. Periodically, Connect will ask the task + to flush records using flush(Map) as described above.
    6. +
    7. Partition Rebalancing: Occasionally, Connect will need to change the assignment of this task. + When this happens, the currently assigned partitions will be closed with close(Collection) and + the new assignment will be opened using open(Collection).
    8. +
    9. Shutdown: When the task needs to be shutdown, Connect will close active partitions (if there + are any) and stop the task using stop()
    10. +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        TOPICS_CONFIG

        +
        public static final String TOPICS_CONFIG
        +

        + The configuration key that provides the list of topics that are inputs for this + SinkTask. +

        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        TOPICS_REGEX_CONFIG

        +
        public static final String TOPICS_REGEX_CONFIG
        +

        + The configuration key that provides a regex specifying which topics to include as inputs + for this SinkTask. +

        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SinkTask

        +
        public SinkTask()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        initialize

        +
        public void initialize(SinkTaskContext context)
        +
        Initialize the context of this task. Note that the partition assignment will be empty until + Connect has opened the partitions for writing with open(Collection).
        +
        +
        Parameters:
        +
        context - The sink task's context
        +
        +
        +
      • +
      • +
        +

        start

        +
        public abstract void start(Map<String,String> props)
        +
        Start the Task. This should handle any configuration parsing and one-time setup of the task.
        +
        +
        Specified by:
        +
        start in interface Task
        +
        Parameters:
        +
        props - initial configuration
        +
        +
        +
      • +
      • +
        +

        put

        +
        public abstract void put(Collection<SinkRecord> records)
        +
        Put the records in the sink. This should either write them to the downstream system or batch them for + later writing. If this method returns before the records are written to the downstream system, the task must + implement flush(Map) or preCommit(Map) to ensure that offsets are only committed for records + that have been written to the downstream system (hence avoiding data loss during failures). +

        + If this operation fails, the SinkTask may throw a RetriableException to + indicate that the framework should attempt to retry the same call again. Other exceptions will cause the task to + be stopped immediately. SinkTaskContext.timeout(long) can be used to set the maximum time before the + batch will be retried.

        +
        +
        Parameters:
        +
        records - the collection of records to send
        +
        +
        +
      • +
      • +
        +

        flush

        +
        public void flush(Map<TopicPartition,OffsetAndMetadata> currentOffsets)
        +
        Flush all records that have been put(Collection) for the specified topic-partitions.
        +
        +
        Parameters:
        +
        currentOffsets - the current offset state as of the last call to put(Collection), provided for + convenience but could also be determined by tracking all offsets included in the + SinkRecords passed to put(java.util.Collection<org.apache.kafka.connect.sink.SinkRecord>). Note that the topic, partition and offset + here correspond to the original Kafka topic partition and offset, before any + transformations have been applied. These can be tracked by the task + through the SinkRecord.originalTopic(), SinkRecord.originalKafkaPartition() + and SinkRecord.originalKafkaOffset() methods.
        +
        +
        +
      • +
      • +
        +

        preCommit

        + +
        Pre-commit hook invoked prior to an offset commit. +

        + The default implementation simply invokes flush(Map) and is thus able to assume all currentOffsets + are safe to commit.

        +
        +
        Parameters:
        +
        currentOffsets - the current offset state as of the last call to put(Collection), provided for + convenience but could also be determined by tracking all offsets included in the + SinkRecords passed to put(java.util.Collection<org.apache.kafka.connect.sink.SinkRecord>). Note that the topic, partition and offset + here correspond to the original Kafka topic partition and offset, before any + transformations have been applied. These can be tracked by the task + through the SinkRecord.originalTopic(), SinkRecord.originalKafkaPartition() + and SinkRecord.originalKafkaOffset() methods.
        +
        Returns:
        +
        an empty map if Connect-managed offset commit is not desired, otherwise a map of offsets by topic-partition that are + safe to commit. Note that the returned topic-partition to offsets map should use the original Kafka + topic partitions and offsets instead of the transformed values.
        +
        +
        +
      • +
      • +
        +

        open

        +
        public void open(Collection<TopicPartition> partitions)
        +
        The SinkTask uses this method to create writers for newly assigned partitions in case of partition + rebalance. This method will be called after partition re-assignment completes and before the SinkTask starts + fetching data. Any errors raised from this method will cause the task to stop. +

        + Note that the topic partitions here correspond to the original Kafka topic partitions, before any + transformations have been applied.

        +
        +
        Parameters:
        +
        partitions - The list of partitions that are now assigned to the task (may include + partitions previously assigned to the task)
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close(Collection<TopicPartition> partitions)
        +
        The SinkTask uses this method to close writers for partitions that are no + longer assigned to the SinkTask. This method will be called before a rebalance operation starts + and after the SinkTask stops fetching data. After being closed, Connect will not write + any records to the task until a new set of partitions has been opened. Any errors raised + from this method will cause the task to stop. +

        + Note that the topic partitions here correspond to the original Kafka topic partitions, before any + transformations have been applied.

        +
        +
        Parameters:
        +
        partitions - The list of partitions that should be closed
        +
        +
        +
      • +
      • +
        +

        stop

        +
        public abstract void stop()
        +
        Perform any cleanup to stop this task. In SinkTasks, this method is invoked only once outstanding calls to other + methods have completed (e.g., put(Collection) has returned) and a final flush(Map) and offset + commit has completed. Implementations of this method should only need to perform final cleanup operations, such + as closing network connections to the sink system.
        +
        +
        Specified by:
        +
        stop in interface Task
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/sink/SinkTaskContext.html b/static/41/javadoc/org/apache/kafka/connect/sink/SinkTaskContext.html new file mode 100644 index 000000000..43d4c550a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/sink/SinkTaskContext.html @@ -0,0 +1,328 @@ + + + + +SinkTaskContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface SinkTaskContext

    +
    +
    +
    +
    public interface SinkTaskContext
    +
    Context passed to SinkTasks, allowing them to access utilities in the Kafka Connect runtime.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configs

        +
        Map<String,String> configs()
        +
        Get the Task configuration. This is the latest configuration and may differ from that passed on startup. +

        + For example, this method can be used to obtain the latest configuration if an external secret has changed, + and the configuration is using variable references such as those compatible with + ConfigTransformer.

        +
        +
      • +
      • +
        +

        offset

        +
        void offset(Map<TopicPartition,Long> offsets)
        +
        Reset the consumer offsets for the given topic partitions. SinkTasks should use this if they manage offsets + in the sink data store rather than using Kafka consumer offsets. For example, an HDFS connector might record + offsets in HDFS to provide exactly once delivery. When the SinkTask is started or a rebalance occurs, the task + would reload offsets from HDFS and use this method to reset the consumer to those offsets. +

        + SinkTasks that do not manage their own offsets do not need to use this method.

        +
        +
        Parameters:
        +
        offsets - map of offsets for topic partitions
        +
        +
        +
      • +
      • +
        +

        offset

        +
        void offset(TopicPartition tp, + long offset)
        +
        Reset the consumer offsets for the given topic partition. SinkTasks should use this if they manage offsets + in the sink data store rather than using Kafka consumer offsets. For example, an HDFS connector might record + offsets in HDFS to provide exactly once delivery. When the topic partition is recovered the task + would reload offsets from HDFS and use this method to reset the consumer to the offset. +

        + SinkTasks that do not manage their own offsets do not need to use this method.

        +
        +
        Parameters:
        +
        tp - the topic partition to reset offset.
        +
        offset - the offset to reset to.
        +
        +
        +
      • +
      • +
        +

        timeout

        +
        void timeout(long timeoutMs)
        +
        Set the timeout in milliseconds. SinkTasks should use this to indicate that they need to retry certain + operations after the timeout. SinkTasks may have certain operations on external systems that may need + to be retried in case of failures. For example, appending a record to an HDFS file may fail due to temporary + network issues. SinkTasks can use this method to set how long to wait before retrying.
        +
        +
        Parameters:
        +
        timeoutMs - the backoff timeout in milliseconds.
        +
        +
        +
      • +
      • +
        +

        assignment

        +
        Set<TopicPartition> assignment()
        +
        Get the current set of assigned TopicPartitions for this task.
        +
        +
        Returns:
        +
        the set of currently assigned TopicPartitions
        +
        +
        +
      • +
      • +
        +

        pause

        +
        void pause(TopicPartition... partitions)
        +
        Pause consumption of messages from the specified TopicPartitions.
        +
        +
        Parameters:
        +
        partitions - the partitions which should be paused
        +
        +
        +
      • +
      • +
        +

        resume

        +
        void resume(TopicPartition... partitions)
        +
        Resume consumption of messages from previously paused TopicPartitions.
        +
        +
        Parameters:
        +
        partitions - the partitions to resume
        +
        +
        +
      • +
      • +
        +

        requestCommit

        +
        void requestCommit()
        +
        Request an offset commit. Sink tasks can use this to minimize the potential for redelivery + by requesting an offset commit as soon as they flush data to the destination system. +

        + It is only a hint to the runtime and no timing guarantee should be assumed.

        +
        +
      • +
      • +
        +

        errantRecordReporter

        +
        default ErrantRecordReporter errantRecordReporter()
        +
        Get the reporter to which the sink task can report problematic or failed records + passed to the SinkTask.put(java.util.Collection) method. When reporting a failed record, + the sink task will receive a Future that the task can optionally use to wait until + the failed record and exception have been written to Kafka. Note that the result of + this method may be null if this connector has not been configured to use a reporter. +

        + This method was added in Apache Kafka 2.6. Sink tasks that use this method but want to + maintain backward compatibility so they can also be deployed to older Connect runtimes + should guard the call to this method with a try-catch block, since calling this method will result in a + NoSuchMethodError or NoClassDefFoundError when the sink connector is deployed to + Connect runtimes older than Kafka 2.6. For example: +

        +     ErrantRecordReporter reporter;
        +     try {
        +         reporter = context.errantRecordReporter();
        +     } catch (NoSuchMethodError | NoClassDefFoundError e) {
        +         reporter = null;
        +     }
        + 
        +
        +
        Returns:
        +
        the reporter; null if no error reporter has been configured for the connector
        +
        Since:
        +
        2.6
        +
        +
        +
      • +
      • +
        +

        pluginMetrics

        +
        PluginMetrics pluginMetrics()
        +
        Get a PluginMetrics that can be used to define metrics + +

        This method was added in Apache Kafka 4.1. Tasks that use this method but want to + maintain backward compatibility so they can also be deployed to older Connect runtimes + should guard the call to this method with a try-catch block, since calling this method will result in a + NoSuchMethodError or NoClassDefFoundError when the connector is deployed to + Connect runtimes older than Kafka 4.1. For example: +

        +     PluginMetrics pluginMetrics;
        +     try {
        +         pluginMetrics = context.pluginMetrics();
        +     } catch (NoSuchMethodError | NoClassDefFoundError e) {
        +         pluginMetrics = null;
        +     }
        + 
        +
        +
        Returns:
        +
        the PluginMetrics instance
        +
        Since:
        +
        4.1
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/sink/package-summary.html b/static/41/javadoc/org/apache/kafka/connect/sink/package-summary.html new file mode 100644 index 000000000..6ecf12f85 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/sink/package-summary.html @@ -0,0 +1,116 @@ + + + + +org.apache.kafka.connect.sink (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.connect.sink

    +
    +
    +
    package org.apache.kafka.connect.sink
    +
    +
    Provides an API for implementing sink connectors which write Kafka records to external applications.
    +
    +
    +
      +
    • +
      +
      +
      +
      +
      Class
      +
      Description
      + +
      +
      Component that a SinkTask can use to report problematic records (and their corresponding problems) as it + writes them through SinkTask.put(java.util.Collection).
      +
      + +
      +
      SinkConnectors implement the Connector interface to send Kafka data to another system.
      +
      + +
      +
      A context to allow a SinkConnector to interact with the Kafka Connect runtime.
      +
      + +
      +
      SinkRecord is a ConnectRecord that has been read from Kafka and includes the original Kafka record's + topic, partition and offset (before any transformations have been applied) + in addition to the standard fields.
      +
      + +
      +
      SinkTask is a Task that takes records loaded from Kafka and sends them to another system.
      +
      + +
      +
      Context passed to SinkTasks, allowing them to access utilities in the Kafka Connect runtime.
      +
      +
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/sink/package-tree.html b/static/41/javadoc/org/apache/kafka/connect/sink/package-tree.html new file mode 100644 index 000000000..7393f1f90 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/sink/package-tree.html @@ -0,0 +1,93 @@ + + + + +org.apache.kafka.connect.sink Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.connect.sink

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    +
      +
    • java.lang.Object +
        +
      • org.apache.kafka.connect.connector.Connector (implements org.apache.kafka.connect.components.Versioned) + +
      • +
      • org.apache.kafka.connect.connector.ConnectRecord<R> + +
      • +
      • org.apache.kafka.connect.sink.SinkTask (implements org.apache.kafka.connect.connector.Task)
      • +
      +
    • +
    +
    +
    +

    Interface Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/source/ConnectorTransactionBoundaries.html b/static/41/javadoc/org/apache/kafka/connect/source/ConnectorTransactionBoundaries.html new file mode 100644 index 000000000..f4ee7ad85 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/source/ConnectorTransactionBoundaries.html @@ -0,0 +1,224 @@ + + + + +ConnectorTransactionBoundaries (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class ConnectorTransactionBoundaries

    +
    +
    java.lang.Object +
    java.lang.Enum<ConnectorTransactionBoundaries> +
    org.apache.kafka.connect.source.ConnectorTransactionBoundaries
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<ConnectorTransactionBoundaries>, Constable
    +
    +
    +
    public enum ConnectorTransactionBoundaries +extends Enum<ConnectorTransactionBoundaries>
    +
    An enum to represent the level of support for connector-defined transaction boundaries.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      +
        +
      • +
        +

        SUPPORTED

        +
        public static final ConnectorTransactionBoundaries SUPPORTED
        +
        Signals that a connector can define its own transaction boundaries.
        +
        +
      • +
      • +
        +

        UNSUPPORTED

        +
        public static final ConnectorTransactionBoundaries UNSUPPORTED
        +
        Signals that a connector cannot define its own transaction boundaries.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static ConnectorTransactionBoundaries[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static ConnectorTransactionBoundaries valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/source/ExactlyOnceSupport.html b/static/41/javadoc/org/apache/kafka/connect/source/ExactlyOnceSupport.html new file mode 100644 index 000000000..d58997bd2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/source/ExactlyOnceSupport.html @@ -0,0 +1,224 @@ + + + + +ExactlyOnceSupport (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class ExactlyOnceSupport

    +
    +
    java.lang.Object +
    java.lang.Enum<ExactlyOnceSupport> +
    org.apache.kafka.connect.source.ExactlyOnceSupport
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<ExactlyOnceSupport>, Constable
    +
    +
    +
    public enum ExactlyOnceSupport +extends Enum<ExactlyOnceSupport>
    +
    An enum to represent the level of support for exactly-once semantics from a source connector.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      +
        +
      • +
        +

        SUPPORTED

        +
        public static final ExactlyOnceSupport SUPPORTED
        +
        Signals that a connector supports exactly-once semantics.
        +
        +
      • +
      • +
        +

        UNSUPPORTED

        +
        public static final ExactlyOnceSupport UNSUPPORTED
        +
        Signals that a connector does not support exactly-once semantics.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static ExactlyOnceSupport[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static ExactlyOnceSupport valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/source/SourceConnector.html b/static/41/javadoc/org/apache/kafka/connect/source/SourceConnector.html new file mode 100644 index 000000000..8e921a37f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/source/SourceConnector.html @@ -0,0 +1,286 @@ + + + + +SourceConnector (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SourceConnector

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.connector.Connector +
    org.apache.kafka.connect.source.SourceConnector
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Versioned
    +
    +
    +
    Direct Known Subclasses:
    +
    MockSourceConnector, SchemaSourceConnector, VerifiableSourceConnector
    +
    +
    +
    public abstract class SourceConnector +extends Connector
    +
    SourceConnectors implement the connector interface to pull data from another system and send + it to Kafka. +

    Kafka Connect may discover implementations of this interface using the Java ServiceLoader mechanism. + To support this, implementations of this interface should also contain a service provider configuration file in + META-INF/services/org.apache.kafka.connect.source.SourceConnector.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SourceConnector

        +
        public SourceConnector()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        exactlyOnceSupport

        +
        public ExactlyOnceSupport exactlyOnceSupport(Map<String,String> connectorConfig)
        +
        Signals whether the connector supports exactly-once semantics with a proposed configuration. + Connector authors can assume that worker-level exactly-once support is enabled when this method is invoked. + +

        For backwards compatibility, the default implementation will return null, but connector authors are + strongly encouraged to override this method to return a non-null value such as + SUPPORTED or UNSUPPORTED. + +

        Similar to validate, this method may be called by the runtime before the + start method is invoked when the connector will be run with exactly-once support.

        +
        +
        Parameters:
        +
        connectorConfig - the configuration that will be used for the connector.
        +
        Returns:
        +
        ExactlyOnceSupport.SUPPORTED if the connector can provide exactly-once support with the given + configuration, and ExactlyOnceSupport.UNSUPPORTED if it cannot. If this method is overridden by a + connector, should not be null, but if null, it will be assumed that the connector cannot provide + exactly-once semantics.
        +
        Since:
        +
        3.3
        +
        +
        +
      • +
      • +
        +

        canDefineTransactionBoundaries

        +
        public ConnectorTransactionBoundaries canDefineTransactionBoundaries(Map<String,String> connectorConfig)
        +
        Signals whether the connector implementation is capable of defining the transaction boundaries for a + connector with the given configuration. This method is called before Connector.start(Map), only when the + runtime supports exactly-once and the connector configuration includes transaction.boundary=connector. + +

        This method need not be implemented if the connector implementation does not support defining + transaction boundaries.

        +
        +
        Parameters:
        +
        connectorConfig - the configuration that will be used for the connector
        +
        Returns:
        +
        ConnectorTransactionBoundaries.SUPPORTED if the connector will define its own transaction boundaries, + or ConnectorTransactionBoundaries.UNSUPPORTED otherwise; may never be null. The default implementation + returns ConnectorTransactionBoundaries.UNSUPPORTED.
        +
        Since:
        +
        3.3
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        alterOffsets

        +
        public boolean alterOffsets(Map<String,String> connectorConfig, + Map<Map<String,?>,Map<String,?>> offsets)
        +
        Invoked when users request to manually alter/reset the offsets for this connector via the Connect worker's REST + API. Connectors that manage offsets externally can propagate offset changes to their external system in this + method. Connectors may also validate these offsets to ensure that the source partitions and source offsets are + in a format that is recognizable to them. +

        + Connectors that neither manage offsets externally nor require custom offset validation need not implement this + method beyond simply returning true. +

        + User requests to alter/reset offsets will be handled by the Connect runtime and will be reflected in the offsets + returned by any OffsetStorageReader instances + provided to this connector and its tasks. +

        + Note that altering / resetting offsets is expected to be an idempotent operation and this method should be able + to handle being called more than once with the same arguments (which could occur if a user retries the request + due to a failure in writing the new offsets to the offsets store, for example). +

        + Similar to validate, this method may be called by the runtime before the + start method is invoked.

        +
        +
        Parameters:
        +
        connectorConfig - the configuration of the connector
        +
        offsets - a map from source partition to source offset, containing the offsets that the user has requested + to alter/reset. For any source partitions whose offsets are being reset instead of altered, their + corresponding source offset value in the map will be null. This map may be empty, but + never null. An empty offsets map could indicate that the offsets were reset previously or that no + offsets have been committed yet.
        +
        Returns:
        +
        whether this method has been overridden by the connector; the default implementation returns + false, and all other implementations (that do not unconditionally throw exceptions) should return + true
        +
        Throws:
        +
        UnsupportedOperationException - if it is impossible to alter/reset the offsets for this connector
        +
        ConnectException - if the offsets for this connector cannot be + reset for any other reason (for example, they have failed custom validation logic specific to this connector)
        +
        Since:
        +
        3.6
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/source/SourceConnectorContext.html b/static/41/javadoc/org/apache/kafka/connect/source/SourceConnectorContext.html new file mode 100644 index 000000000..063278c5e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/source/SourceConnectorContext.html @@ -0,0 +1,140 @@ + + + + +SourceConnectorContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface SourceConnectorContext

    +
    +
    +
    +
    All Superinterfaces:
    +
    ConnectorContext
    +
    +
    +
    public interface SourceConnectorContext +extends ConnectorContext
    +
    A context to allow a SourceConnector to interact with the Kafka Connect runtime.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        offsetStorageReader

        +
        OffsetStorageReader offsetStorageReader()
        +
        Returns the OffsetStorageReader for this SourceConnectorContext.
        +
        +
        Returns:
        +
        the OffsetStorageReader for this connector.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/source/SourceRecord.html b/static/41/javadoc/org/apache/kafka/connect/source/SourceRecord.html new file mode 100644 index 000000000..0f4b43df9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/source/SourceRecord.html @@ -0,0 +1,428 @@ + + + + +SourceRecord (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SourceRecord

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.connector.ConnectRecord<SourceRecord> +
    org.apache.kafka.connect.source.SourceRecord
    +
    +
    +
    +
    +
    public class SourceRecord +extends ConnectRecord<SourceRecord>
    +

    + SourceRecords are generated by SourceTasks and passed to Kafka Connect for storage in + Kafka. In addition to the standard fields in ConnectRecord which specify where data is stored + in Kafka, they also include a sourcePartition and sourceOffset. +

    +

    + The sourcePartition represents a single input sourcePartition that the record came from (e.g. a filename, table + name, or topic-partition). The sourceOffset represents a position in that sourcePartition which can be used + to resume consumption of data. +

    +

    + These values can have arbitrary structure and should be represented using + org.apache.kafka.connect.data objects (or primitive values). For example, a database connector + might specify the sourcePartition as a record containing { "db": "database_name", "table": + "table_name"} and the sourceOffset as a Long containing the timestamp of the row. +

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        sourcePartition

        +
        public Map<String,?> sourcePartition()
        +
        +
      • +
      • +
        +

        sourceOffset

        +
        public Map<String,?> sourceOffset()
        +
        +
      • +
      • +
        +

        newRecord

        +
        public SourceRecord newRecord(String topic, + Integer kafkaPartition, + Schema keySchema, + Object key, + Schema valueSchema, + Object value, + Long timestamp)
        +
        Description copied from class: ConnectRecord
        +
        Create a new record of the same type as itself, with the specified parameter values. All other fields in this record will be copied + over to the new record. Since the headers are mutable, the resulting record will have a copy of this record's headers.
        +
        +
        Specified by:
        +
        newRecord in class ConnectRecord<SourceRecord>
        +
        Parameters:
        +
        topic - the name of the topic; may be null
        +
        kafkaPartition - the partition number for the Kafka topic; may be null
        +
        keySchema - the schema for the key; may be null
        +
        key - the key; may be null
        +
        valueSchema - the schema for the value; may be null
        +
        value - the value; may be null
        +
        timestamp - the timestamp; may be null
        +
        Returns:
        +
        the new record
        +
        +
        +
      • +
      • +
        +

        newRecord

        +
        public SourceRecord newRecord(String topic, + Integer kafkaPartition, + Schema keySchema, + Object key, + Schema valueSchema, + Object value, + Long timestamp, + Iterable<Header> headers)
        +
        Description copied from class: ConnectRecord
        +
        Create a new record of the same type as itself, with the specified parameter values. All other fields in this record will be copied + over to the new record.
        +
        +
        Specified by:
        +
        newRecord in class ConnectRecord<SourceRecord>
        +
        Parameters:
        +
        topic - the name of the topic; may be null
        +
        kafkaPartition - the partition number for the Kafka topic; may be null
        +
        keySchema - the schema for the key; may be null
        +
        key - the key; may be null
        +
        valueSchema - the schema for the value; may be null
        +
        value - the value; may be null
        +
        timestamp - the timestamp; may be null
        +
        headers - the headers; may be null or empty
        +
        Returns:
        +
        the new record
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class ConnectRecord<SourceRecord>
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class ConnectRecord<SourceRecord>
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class ConnectRecord<SourceRecord>
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/source/SourceTask.TransactionBoundary.html b/static/41/javadoc/org/apache/kafka/connect/source/SourceTask.TransactionBoundary.html new file mode 100644 index 000000000..f4d48dbbb --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/source/SourceTask.TransactionBoundary.html @@ -0,0 +1,306 @@ + + + + +SourceTask.TransactionBoundary (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class SourceTask.TransactionBoundary

    +
    +
    java.lang.Object +
    java.lang.Enum<SourceTask.TransactionBoundary> +
    org.apache.kafka.connect.source.SourceTask.TransactionBoundary
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<SourceTask.TransactionBoundary>, Constable
    +
    +
    +
    Enclosing class:
    +
    SourceTask
    +
    +
    +
    public static enum SourceTask.TransactionBoundary +extends Enum<SourceTask.TransactionBoundary>
    +
    Represents the permitted values for the SourceTask.TRANSACTION_BOUNDARY_CONFIG property.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        DEFAULT

        +
        public static final SourceTask.TransactionBoundary DEFAULT
        +
        The default transaction boundary style that will be used for source connectors when no style is explicitly + configured.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/source/SourceTask.html b/static/41/javadoc/org/apache/kafka/connect/source/SourceTask.html new file mode 100644 index 000000000..36c42b3dd --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/source/SourceTask.html @@ -0,0 +1,356 @@ + + + + +SourceTask (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SourceTask

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.source.SourceTask
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Task
    +
    +
    +
    Direct Known Subclasses:
    +
    MockSourceTask, SchemaSourceTask, VerifiableSourceTask
    +
    +
    +
    public abstract class SourceTask +extends Object +implements Task
    +
    SourceTask is a Task that pulls records from another system for storage in Kafka.
    +
    +
    +
      + +
    • +
      +

      Nested Class Summary

      +
      Nested Classes
      +
      +
      Modifier and Type
      +
      Class
      +
      Description
      +
      static enum 
      + +
      +
      Represents the permitted values for the TRANSACTION_BOUNDARY_CONFIG property.
      +
      +
      +
      +
    • + +
    • +
      +

      Field Summary

      +
      Fields
      +
      +
      Modifier and Type
      +
      Field
      +
      Description
      +
      static final String
      + +
      +
      The configuration key that determines how source tasks will define transaction boundaries + when exactly-once support is enabled.
      +
      +
      +
      +
    • + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
      +
      This method is invoked periodically when offsets are committed for this source task.
      +
      +
      void
      + +
      +
      + Commit an individual SourceRecord when the callback from the producer client is received.
      +
      +
      void
      + +
      +
      Initialize this SourceTask with the specified context object.
      +
      +
      abstract List<SourceRecord>
      + +
      +
      Poll this source task for new records.
      +
      +
      abstract void
      + +
      +
      Start the Task.
      +
      +
      abstract void
      + +
      +
      Signal this SourceTask to stop.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      +
      +

      Methods inherited from interface org.apache.kafka.connect.connector.Task

      +version
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        TRANSACTION_BOUNDARY_CONFIG

        +
        public static final String TRANSACTION_BOUNDARY_CONFIG
        +
        The configuration key that determines how source tasks will define transaction boundaries + when exactly-once support is enabled.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SourceTask

        +
        public SourceTask()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        initialize

        +
        public void initialize(SourceTaskContext context)
        +
        Initialize this SourceTask with the specified context object.
        +
        +
      • +
      • +
        +

        start

        +
        public abstract void start(Map<String,String> props)
        +
        Start the Task. This should handle any configuration parsing and one-time setup of the task.
        +
        +
        Specified by:
        +
        start in interface Task
        +
        Parameters:
        +
        props - initial configuration
        +
        +
        +
      • +
      • +
        +

        poll

        +
        public abstract List<SourceRecord> poll() + throws InterruptedException
        +
        Poll this source task for new records. If no data is currently available, this method + should block but return control to the caller regularly (by returning null) in + order for the task to transition to the PAUSED state if requested to do so. +

        + The task will be stopped on a separate thread, and when that happens + this method is expected to unblock, quickly finish up any remaining processing, and + return.

        +
        +
        Returns:
        +
        a list of source records
        +
        Throws:
        +
        InterruptedException
        +
        +
        +
      • +
      • +
        +

        commit

        +
        public void commit() + throws InterruptedException
        +
        This method is invoked periodically when offsets are committed for this source task. Note that the offsets + being committed won't necessarily correspond to the latest offsets returned by this source task via + poll(). Also see commitRecord(SourceRecord, RecordMetadata) which allows for a more + fine-grained tracking of records that have been successfully delivered. +

        + SourceTasks are not required to implement this functionality; Kafka Connect will record offsets + automatically. This hook is provided for systems that also need to store offsets internally + in their own system.

        +
        +
        Throws:
        +
        InterruptedException
        +
        +
        +
      • +
      • +
        +

        stop

        +
        public abstract void stop()
        +
        Signal this SourceTask to stop. In SourceTasks, this method only needs to signal to the task that it should stop + trying to poll for new data and interrupt any outstanding poll() requests. It is not required that the task has + fully stopped. Note that this method necessarily may be invoked from a different thread than poll() and + commit(). +

        + For example, if a task uses a Selector to receive data over the network, this method + could set a flag that will force poll() to exit immediately and invoke + wakeup() to interrupt any ongoing requests.

        +
        +
        Specified by:
        +
        stop in interface Task
        +
        +
        +
      • +
      • +
        +

        commitRecord

        +
        public void commitRecord(SourceRecord record, + RecordMetadata metadata) + throws InterruptedException
        +

        + Commit an individual SourceRecord when the callback from the producer client is received. This method is + also called when a record is filtered by a transformation or when "errors.tolerance" is set to "all" + and thus will never be ACK'd by a broker. + In both cases metadata will be null. +

        + SourceTasks are not required to implement this functionality; Kafka Connect will record offsets + automatically. This hook is provided for systems that also need to store offsets internally + in their own system. +

        + The default implementation is a nop. It is not necessary to implement the method.

        +
        +
        Parameters:
        +
        record - SourceRecord that was successfully sent via the producer, filtered by a transformation, or dropped on producer exception
        +
        metadata - RecordMetadata record metadata returned from the broker, or null if the record was filtered or if producer exceptions are ignored
        +
        Throws:
        +
        InterruptedException
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/source/SourceTaskContext.html b/static/41/javadoc/org/apache/kafka/connect/source/SourceTaskContext.html new file mode 100644 index 000000000..6f1c4428e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/source/SourceTaskContext.html @@ -0,0 +1,211 @@ + + + + +SourceTaskContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface SourceTaskContext

    +
    +
    +
    +
    public interface SourceTaskContext
    +
    SourceTaskContext is provided to SourceTasks to allow them to interact with the underlying + runtime.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configs

        +
        Map<String,String> configs()
        +
        Get the Task configuration. This is the latest configuration and may differ from that passed on startup. +

        + For example, this method can be used to obtain the latest configuration if an external secret has changed, + and the configuration is using variable references such as those compatible with + ConfigTransformer.

        +
        +
      • +
      • +
        +

        offsetStorageReader

        +
        OffsetStorageReader offsetStorageReader()
        +
        Get the OffsetStorageReader for this SourceTask.
        +
        +
      • +
      • +
        +

        transactionContext

        +
        default TransactionContext transactionContext()
        +
        Get a TransactionContext that can be used to define producer transaction boundaries + when exactly-once support is enabled for the connector. + +

        This method was added in Apache Kafka 3.2. Source tasks that use this method but want to + maintain backward compatibility so they can also be deployed to older Connect runtimes + should guard the call to this method with a try-catch block, since calling this method will result in a + NoSuchMethodError or NoClassDefFoundError when the source connector is deployed to + Connect runtimes older than Kafka 3.2. For example: +

        +     TransactionContext transactionContext;
        +     try {
        +         transactionContext = context.transactionContext();
        +     } catch (NoSuchMethodError | NoClassDefFoundError e) {
        +         transactionContext = null;
        +     }
        + 
        +
        +
        Returns:
        +
        the transaction context, or null if the connector was not configured to specify transaction boundaries
        +
        Since:
        +
        3.3
        +
        +
        +
      • +
      • +
        +

        pluginMetrics

        +
        PluginMetrics pluginMetrics()
        +
        Get a PluginMetrics that can be used to define metrics + +

        This method was added in Apache Kafka 4.1. Tasks that use this method but want to + maintain backward compatibility so they can also be deployed to older Connect runtimes + should guard the call to this method with a try-catch block, since calling this method will result in a + NoSuchMethodError or NoClassDefFoundError when the connector is deployed to + Connect runtimes older than Kafka 4.1. For example: +

        +     PluginMetrics pluginMetrics;
        +     try {
        +         pluginMetrics = context.pluginMetrics();
        +     } catch (NoSuchMethodError | NoClassDefFoundError e) {
        +         pluginMetrics = null;
        +     }
        + 
        +
        +
        Returns:
        +
        the pluginMetrics instance
        +
        Since:
        +
        4.1
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/source/TransactionContext.html b/static/41/javadoc/org/apache/kafka/connect/source/TransactionContext.html new file mode 100644 index 000000000..ca9048a45 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/source/TransactionContext.html @@ -0,0 +1,194 @@ + + + + +TransactionContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface TransactionContext

    +
    +
    +
    +
    public interface TransactionContext
    +
    Provided to source tasks to allow them to define their own producer transaction boundaries when + exactly-once support is enabled.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        commitTransaction

        +
        void commitTransaction()
        +
        Request a transaction commit after the next batch of records from SourceTask.poll() + is processed.
        +
        +
      • +
      • +
        +

        commitTransaction

        +
        void commitTransaction(SourceRecord record)
        +
        Request a transaction commit after a source record is processed. The source record will be the + last record in the committed transaction. +

        + If a task requests that the last record in a batch that it returns from SourceTask.poll() + be committed by invoking this method, and also requests that that same batch be aborted by + invoking abortTransaction(), the record-based operation (in this case, committing + the transaction) will take precedence.

        +
        +
        Parameters:
        +
        record - the record to commit the transaction after; may not be null.
        +
        +
        +
      • +
      • +
        +

        abortTransaction

        +
        void abortTransaction()
        +
        Requests a transaction abort after the next batch of records from SourceTask.poll(). All of + the records in that transaction will be discarded and will not appear in a committed transaction. + However, offsets for that transaction will still be committed so than the records in that transaction + are not reprocessed. If the data should instead be reprocessed, the task should not invoke this method + and should instead throw an exception.
        +
        +
      • +
      • +
        +

        abortTransaction

        +
        void abortTransaction(SourceRecord record)
        +
        Requests a transaction abort after a source record is processed. The source record will be the + last record in the aborted transaction. All of the records in that transaction will be discarded + and will not appear in a committed transaction. However, offsets for that transaction will still + be committed so that the records in that transaction are not reprocessed. If the data should be + reprocessed, the task should not invoke this method and should instead throw an exception. +

        + If a task requests that the last record in a batch that it returns from SourceTask.poll() + be aborted by invoking this method, and also requests that that same batch be committed by + invoking commitTransaction(), the record-based operation (in this case, aborting + the transaction) will take precedence.

        +
        +
        Parameters:
        +
        record - the record to abort the transaction after; may not be null.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/source/package-summary.html b/static/41/javadoc/org/apache/kafka/connect/source/package-summary.html new file mode 100644 index 000000000..efc1139cc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/source/package-summary.html @@ -0,0 +1,130 @@ + + + + +org.apache.kafka.connect.source (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.connect.source

    +
    +
    +
    package org.apache.kafka.connect.source
    +
    +
    Provides an API for implementing source connectors which read data from external applications into Kafka.
    +
    +
    +
      +
    • +
      +
      +
      +
      +
      Class
      +
      Description
      + +
      +
      An enum to represent the level of support for connector-defined transaction boundaries.
      +
      + +
      +
      An enum to represent the level of support for exactly-once semantics from a source connector.
      +
      + +
      +
      SourceConnectors implement the connector interface to pull data from another system and send + it to Kafka.
      +
      + +
      +
      A context to allow a SourceConnector to interact with the Kafka Connect runtime.
      +
      + +
      +
      + SourceRecords are generated by SourceTasks and passed to Kafka Connect for storage in + Kafka.
      +
      + +
      +
      SourceTask is a Task that pulls records from another system for storage in Kafka.
      +
      + +
      +
      Represents the permitted values for the SourceTask.TRANSACTION_BOUNDARY_CONFIG property.
      +
      + +
      +
      SourceTaskContext is provided to SourceTasks to allow them to interact with the underlying + runtime.
      +
      + +
      +
      Provided to source tasks to allow them to define their own producer transaction boundaries when + exactly-once support is enabled.
      +
      +
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/source/package-tree.html b/static/41/javadoc/org/apache/kafka/connect/source/package-tree.html new file mode 100644 index 000000000..e395ba38e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/source/package-tree.html @@ -0,0 +1,109 @@ + + + + +org.apache.kafka.connect.source Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.connect.source

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/storage/Converter.html b/static/41/javadoc/org/apache/kafka/connect/storage/Converter.html new file mode 100644 index 000000000..269440014 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/storage/Converter.html @@ -0,0 +1,296 @@ + + + + +Converter (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Converter

    +
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Closeable
    +
    +
    +
    All Known Implementing Classes:
    +
    StringConverter
    +
    +
    +
    public interface Converter +extends Closeable
    +
    The Converter interface provides support for translating between Kafka Connect's runtime data format + and byte[]. Internally, this likely includes an intermediate step to the format used by the serialization + layer (e.g. JsonNode, GenericRecord, Message). +

    Kafka Connect may discover implementations of this interface using the Java ServiceLoader mechanism. + To support this, implementations of this interface should also contain a service provider configuration file in + META-INF/services/org.apache.kafka.connect.storage.Converter. + +

    Implement Monitorable to enable the converter to register metrics. + The following tags are automatically added to all metrics registered: connector set to connector name, + task set to the task id and converter set to either key or value.

    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      default void
      + +
       
      +
      default ConfigDef
      + +
      +
      Configuration specification for this converter.
      +
      +
      void
      +
      configure(Map<String,?> configs, + boolean isKey)
      +
      +
      Configure this class.
      +
      +
      default byte[]
      +
      fromConnectData(String topic, + Headers headers, + Schema schema, + Object value)
      +
      +
      Convert a Kafka Connect data object to a native object for serialization, + potentially using the supplied topic and headers in the record as necessary.
      +
      +
      byte[]
      +
      fromConnectData(String topic, + Schema schema, + Object value)
      +
      +
      Convert a Kafka Connect data object to a native object for serialization.
      +
      + +
      toConnectData(String topic, + byte[] value)
      +
      +
      Convert a native object to a Kafka Connect data object for deserialization.
      +
      + +
      toConnectData(String topic, + Headers headers, + byte[] value)
      +
      +
      Convert a native object to a Kafka Connect data object for deserialization, + potentially using the supplied topic and headers in the record as necessary.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        void configure(Map<String,?> configs, + boolean isKey)
        +
        Configure this class.
        +
        +
        Parameters:
        +
        configs - configs in key/value pairs
        +
        isKey - whether this converter is for a key or a value
        +
        +
        +
      • +
      • +
        +

        fromConnectData

        +
        byte[] fromConnectData(String topic, + Schema schema, + Object value)
        +
        Convert a Kafka Connect data object to a native object for serialization.
        +
        +
        Parameters:
        +
        topic - the topic associated with the data
        +
        schema - the schema for the value
        +
        value - the value to convert
        +
        Returns:
        +
        the serialized value
        +
        +
        +
      • +
      • +
        +

        fromConnectData

        +
        default byte[] fromConnectData(String topic, + Headers headers, + Schema schema, + Object value)
        +
        Convert a Kafka Connect data object to a native object for serialization, + potentially using the supplied topic and headers in the record as necessary. + +

        Connect uses this method directly, and for backward compatibility reasons this method + by default will call the fromConnectData(String, Schema, Object) method. + Override this method to make use of the supplied headers.

        +
        +
        Parameters:
        +
        topic - the topic associated with the data
        +
        headers - the headers associated with the data; any changes done to the headers + are applied to the message sent to the broker
        +
        schema - the schema for the value
        +
        value - the value to convert
        +
        Returns:
        +
        the serialized value
        +
        +
        +
      • +
      • +
        +

        toConnectData

        +
        SchemaAndValue toConnectData(String topic, + byte[] value)
        +
        Convert a native object to a Kafka Connect data object for deserialization.
        +
        +
        Parameters:
        +
        topic - the topic associated with the data
        +
        value - the value to convert
        +
        Returns:
        +
        an object containing the Schema and the converted value
        +
        +
        +
      • +
      • +
        +

        toConnectData

        +
        default SchemaAndValue toConnectData(String topic, + Headers headers, + byte[] value)
        +
        Convert a native object to a Kafka Connect data object for deserialization, + potentially using the supplied topic and headers in the record as necessary. + +

        Connect uses this method directly, and for backward compatibility reasons this method + by default will call the toConnectData(String, byte[]) method. + Override this method to make use of the supplied headers.

        +
        +
        Parameters:
        +
        topic - the topic associated with the data
        +
        headers - the headers associated with the data
        +
        value - the value to convert
        +
        Returns:
        +
        an object containing the Schema and the converted value
        +
        +
        +
      • +
      • +
        +

        config

        +
        default ConfigDef config()
        +
        Configuration specification for this converter.
        +
        +
        Returns:
        +
        the configuration specification; may not be null
        +
        +
        +
      • +
      • +
        +

        close

        +
        default void close() + throws IOException
        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        Throws:
        +
        IOException
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/storage/ConverterConfig.html b/static/41/javadoc/org/apache/kafka/connect/storage/ConverterConfig.html new file mode 100644 index 000000000..c2c7644cc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/storage/ConverterConfig.html @@ -0,0 +1,204 @@ + + + + +ConverterConfig (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConverterConfig

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.AbstractConfig +
    org.apache.kafka.connect.storage.ConverterConfig
    +
    +
    +
    +
    +
    Direct Known Subclasses:
    +
    StringConverterConfig
    +
    +
    +
    public abstract class ConverterConfig +extends AbstractConfig
    +
    Abstract class that defines the configuration options for Converter and HeaderConverter instances.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        newConfigDef

        +
        public static ConfigDef newConfigDef()
        +
        Create a new ConfigDef instance containing the configurations defined by ConverterConfig. This can be called by subclasses.
        +
        +
        Returns:
        +
        the ConfigDef; never null
        +
        +
        +
      • +
      • +
        +

        type

        +
        public ConverterType type()
        +
        Get the type of converter as defined by the TYPE_CONFIG configuration.
        +
        +
        Returns:
        +
        the converter type; never null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/storage/ConverterType.html b/static/41/javadoc/org/apache/kafka/connect/storage/ConverterType.html new file mode 100644 index 000000000..558baf9af --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/storage/ConverterType.html @@ -0,0 +1,253 @@ + + + + +ConverterType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class ConverterType

    +
    +
    java.lang.Object +
    java.lang.Enum<ConverterType> +
    org.apache.kafka.connect.storage.ConverterType
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<ConverterType>, Constable
    +
    +
    +
    public enum ConverterType +extends Enum<ConverterType>
    +
    The type of Converter and HeaderConverter.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static ConverterType[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static ConverterType valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        withName

        +
        public static ConverterType withName(String name)
        +
        Find the ConverterType with the given name, using a case-insensitive match.
        +
        +
        Parameters:
        +
        name - the name of the converter type; may be null
        +
        Returns:
        +
        the matching converter type, or null if the supplied name is null or does not match the name of the known types
        +
        +
        +
      • +
      • +
        +

        getName

        +
        public String getName()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/storage/HeaderConverter.html b/static/41/javadoc/org/apache/kafka/connect/storage/HeaderConverter.html new file mode 100644 index 000000000..86abf73f6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/storage/HeaderConverter.html @@ -0,0 +1,207 @@ + + + + +HeaderConverter (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface HeaderConverter

    +
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Closeable, Configurable
    +
    +
    +
    All Known Implementing Classes:
    +
    SimpleHeaderConverter, StringConverter
    +
    +
    +
    public interface HeaderConverter +extends Configurable, Closeable
    +
    The HeaderConverter interface provides support for translating between Kafka Connect's runtime data format + and byte[]. This is similar to the Converter interface, but specifically for + Headers. +

    Kafka Connect may discover implementations of this interface using the Java ServiceLoader mechanism. + To support this, implementations of this interface should also contain a service provider configuration file in + META-INF/services/org.apache.kafka.connect.storage.HeaderConverter. + +

    Implement Monitorable to enable the converter to register metrics. + The following tags are automatically added to all metrics registered: connector set to connector name, + task set to the task id and converter set to header.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        toConnectHeader

        +
        SchemaAndValue toConnectHeader(String topic, + String headerKey, + byte[] value)
        +
        Convert the header name and byte array value into a Header object.
        +
        +
        Parameters:
        +
        topic - the name of the topic for the record containing the header
        +
        headerKey - the header's key; may not be null
        +
        value - the header's raw value; may be null
        +
        Returns:
        +
        the SchemaAndValue; may not be null
        +
        +
        +
      • +
      • +
        +

        fromConnectHeader

        +
        byte[] fromConnectHeader(String topic, + String headerKey, + Schema schema, + Object value)
        +
        Convert the Header's value into its byte array representation.
        +
        +
        Parameters:
        +
        topic - the name of the topic for the record containing the header
        +
        headerKey - the header's key; may not be null
        +
        schema - the schema for the header's value; may be null
        +
        value - the header's value to convert; may be null
        +
        Returns:
        +
        the byte array form of the Header's value; may be null if the value is null
        +
        +
        +
      • +
      • +
        +

        config

        +
        ConfigDef config()
        +
        Configuration specification for this set of header converters.
        +
        +
        Returns:
        +
        the configuration specification; may not be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/storage/OffsetStorageReader.html b/static/41/javadoc/org/apache/kafka/connect/storage/OffsetStorageReader.html new file mode 100644 index 000000000..ef5d14f81 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/storage/OffsetStorageReader.html @@ -0,0 +1,173 @@ + + + + +OffsetStorageReader (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface OffsetStorageReader

    +
    +
    +
    +
    public interface OffsetStorageReader
    +

    + OffsetStorageReader provides access to the offset storage used by sources. This can be used by + connectors to determine offsets to start consuming data from. This is most commonly used during + initialization of a task, but can also be used during runtime, e.g. when reconfiguring a task. +

    +

    + Offsets are always defined as Maps of Strings to primitive types, i.e. all types supported by + Schema other than Array, Map, and Struct. +

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        offset

        +
        <T> Map<String,Object> offset(Map<String,T> partition)
        +
        Get the offset for the specified partition. If the data isn't already available locally, this + gets it from the backing store, which may require some network round trips.
        +
        +
        Parameters:
        +
        partition - object uniquely identifying the partition of data
        +
        Returns:
        +
        object uniquely identifying the offset in the partition of data
        +
        +
        +
      • +
      • +
        +

        offsets

        +
        <T> Map<Map<String,T>,Map<String,Object>> offsets(Collection<Map<String,T>> partitions)
        +

        + Get a set of offsets for the specified partition identifiers. This may be more efficient + than calling offset(Map) repeatedly. +

        +

        + Note that when errors occur, this method omits the associated data and tries to return as + many of the requested values as possible. This allows a task that's managing many partitions to + still proceed with any available data. Therefore, implementations should take care to check + that the data is actually available in the returned response. The only case when an + exception will be thrown is if the entire request failed, e.g. because the underlying + storage was unavailable. +

        +
        +
        Parameters:
        +
        partitions - set of identifiers for partitions of data
        +
        Returns:
        +
        a map of partition identifiers to decoded offsets
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/storage/SimpleHeaderConverter.html b/static/41/javadoc/org/apache/kafka/connect/storage/SimpleHeaderConverter.html new file mode 100644 index 000000000..84b248c48 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/storage/SimpleHeaderConverter.html @@ -0,0 +1,286 @@ + + + + +SimpleHeaderConverter (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SimpleHeaderConverter

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.storage.SimpleHeaderConverter
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Configurable, Versioned, HeaderConverter
    +
    +
    +
    public class SimpleHeaderConverter +extends Object +implements HeaderConverter, Versioned
    +
    A HeaderConverter that serializes header values as strings and that deserializes header values to the most appropriate + numeric, boolean, array, or map representation. Schemas are not serialized, but are inferred upon deserialization when possible.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SimpleHeaderConverter

        +
        public SimpleHeaderConverter()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        version

        +
        public String version()
        +
        Description copied from interface: Versioned
        +
        Get the version of this component.
        +
        +
        Specified by:
        +
        version in interface Versioned
        +
        Returns:
        +
        the version, formatted as a String. The version may not be null or empty.
        +
        +
        +
      • +
      • +
        +

        config

        +
        public ConfigDef config()
        +
        Description copied from interface: HeaderConverter
        +
        Configuration specification for this set of header converters.
        +
        +
        Specified by:
        +
        config in interface HeaderConverter
        +
        Returns:
        +
        the configuration specification; may not be null
        +
        +
        +
      • +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs)
        +
        Description copied from interface: Configurable
        +
        Configure this class with the given key-value pairs
        +
        +
        Specified by:
        +
        configure in interface Configurable
        +
        +
        +
      • +
      • +
        +

        toConnectHeader

        +
        public SchemaAndValue toConnectHeader(String topic, + String headerKey, + byte[] value)
        +
        Description copied from interface: HeaderConverter
        +
        Convert the header name and byte array value into a Header object.
        +
        +
        Specified by:
        +
        toConnectHeader in interface HeaderConverter
        +
        Parameters:
        +
        topic - the name of the topic for the record containing the header
        +
        headerKey - the header's key; may not be null
        +
        value - the header's raw value; may be null
        +
        Returns:
        +
        the SchemaAndValue; may not be null
        +
        +
        +
      • +
      • +
        +

        fromConnectHeader

        +
        public byte[] fromConnectHeader(String topic, + String headerKey, + Schema schema, + Object value)
        +
        Description copied from interface: HeaderConverter
        +
        Convert the Header's value into its byte array representation.
        +
        +
        Specified by:
        +
        fromConnectHeader in interface HeaderConverter
        +
        Parameters:
        +
        topic - the name of the topic for the record containing the header
        +
        headerKey - the header's key; may not be null
        +
        schema - the schema for the header's value; may be null
        +
        value - the header's value to convert; may be null
        +
        Returns:
        +
        the byte array form of the Header's value; may be null if the value is null
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close() + throws IOException
        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        Throws:
        +
        IOException
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/storage/StringConverter.html b/static/41/javadoc/org/apache/kafka/connect/storage/StringConverter.html new file mode 100644 index 000000000..bfbeeb660 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/storage/StringConverter.html @@ -0,0 +1,371 @@ + + + + +StringConverter (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StringConverter

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.storage.StringConverter
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Configurable, Versioned, Converter, HeaderConverter
    +
    +
    +
    public class StringConverter +extends Object +implements Converter, HeaderConverter, Versioned
    +
    Converter and HeaderConverter implementation that only supports serializing to strings. When converting Kafka Connect + data to bytes, the schema will be ignored and Object.toString() will always be invoked to convert the data to a String. + When converting from bytes to Kafka Connect format, the converter will only ever return an optional string schema and + a string or null. +

    + Encoding configuration is identical to StringSerializer and StringDeserializer, but for convenience + this class can also be configured to use the same encoding for both encoding and decoding with the + converter.encoding setting. +

    + This implementation currently does nothing with the topic names or header keys.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StringConverter

        +
        public StringConverter()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        version

        +
        public String version()
        +
        Description copied from interface: Versioned
        +
        Get the version of this component.
        +
        +
        Specified by:
        +
        version in interface Versioned
        +
        Returns:
        +
        the version, formatted as a String. The version may not be null or empty.
        +
        +
        +
      • +
      • +
        +

        config

        +
        public ConfigDef config()
        +
        Description copied from interface: Converter
        +
        Configuration specification for this converter.
        +
        +
        Specified by:
        +
        config in interface Converter
        +
        Specified by:
        +
        config in interface HeaderConverter
        +
        Returns:
        +
        the configuration specification; may not be null
        +
        +
        +
      • +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs)
        +
        Description copied from interface: Configurable
        +
        Configure this class with the given key-value pairs
        +
        +
        Specified by:
        +
        configure in interface Configurable
        +
        +
        +
      • +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs, + boolean isKey)
        +
        Description copied from interface: Converter
        +
        Configure this class.
        +
        +
        Specified by:
        +
        configure in interface Converter
        +
        Parameters:
        +
        configs - configs in key/value pairs
        +
        isKey - whether this converter is for a key or a value
        +
        +
        +
      • +
      • +
        +

        fromConnectData

        +
        public byte[] fromConnectData(String topic, + Schema schema, + Object value)
        +
        Description copied from interface: Converter
        +
        Convert a Kafka Connect data object to a native object for serialization.
        +
        +
        Specified by:
        +
        fromConnectData in interface Converter
        +
        Parameters:
        +
        topic - the topic associated with the data
        +
        schema - the schema for the value
        +
        value - the value to convert
        +
        Returns:
        +
        the serialized value
        +
        +
        +
      • +
      • +
        +

        toConnectData

        +
        public SchemaAndValue toConnectData(String topic, + byte[] value)
        +
        Description copied from interface: Converter
        +
        Convert a native object to a Kafka Connect data object for deserialization.
        +
        +
        Specified by:
        +
        toConnectData in interface Converter
        +
        Parameters:
        +
        topic - the topic associated with the data
        +
        value - the value to convert
        +
        Returns:
        +
        an object containing the Schema and the converted value
        +
        +
        +
      • +
      • +
        +

        fromConnectHeader

        +
        public byte[] fromConnectHeader(String topic, + String headerKey, + Schema schema, + Object value)
        +
        Description copied from interface: HeaderConverter
        +
        Convert the Header's value into its byte array representation.
        +
        +
        Specified by:
        +
        fromConnectHeader in interface HeaderConverter
        +
        Parameters:
        +
        topic - the name of the topic for the record containing the header
        +
        headerKey - the header's key; may not be null
        +
        schema - the schema for the header's value; may be null
        +
        value - the header's value to convert; may be null
        +
        Returns:
        +
        the byte array form of the Header's value; may be null if the value is null
        +
        +
        +
      • +
      • +
        +

        toConnectHeader

        +
        public SchemaAndValue toConnectHeader(String topic, + String headerKey, + byte[] value)
        +
        Description copied from interface: HeaderConverter
        +
        Convert the header name and byte array value into a Header object.
        +
        +
        Specified by:
        +
        toConnectHeader in interface HeaderConverter
        +
        Parameters:
        +
        topic - the name of the topic for the record containing the header
        +
        headerKey - the header's key; may not be null
        +
        value - the header's raw value; may be null
        +
        Returns:
        +
        the SchemaAndValue; may not be null
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close()
        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        Specified by:
        +
        close in interface Converter
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/storage/StringConverterConfig.html b/static/41/javadoc/org/apache/kafka/connect/storage/StringConverterConfig.html new file mode 100644 index 000000000..9ce273bb7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/storage/StringConverterConfig.html @@ -0,0 +1,237 @@ + + + + +StringConverterConfig (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StringConverterConfig

    +
    + +
    +
    +
    public class StringConverterConfig +extends ConverterConfig
    +
    Configuration options for StringConverter instances.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        ENCODING_CONFIG

        +
        public static final String ENCODING_CONFIG
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        ENCODING_DEFAULT

        +
        public static final String ENCODING_DEFAULT
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StringConverterConfig

        +
        public StringConverterConfig(Map<String,?> props)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configDef

        +
        public static ConfigDef configDef()
        +
        +
      • +
      • +
        +

        encoding

        +
        public String encoding()
        +
        Get the string encoding.
        +
        +
        Returns:
        +
        the encoding; never null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/storage/package-summary.html b/static/41/javadoc/org/apache/kafka/connect/storage/package-summary.html new file mode 100644 index 000000000..83d2a1bbf --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/storage/package-summary.html @@ -0,0 +1,125 @@ + + + + +org.apache.kafka.connect.storage (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.connect.storage

    +
    +
    +
    package org.apache.kafka.connect.storage
    +
    +
    Provides pluggable interfaces and some implementations for (de)serializing data to and from Kafka
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/storage/package-tree.html b/static/41/javadoc/org/apache/kafka/connect/storage/package-tree.html new file mode 100644 index 000000000..40a9134e4 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/storage/package-tree.html @@ -0,0 +1,116 @@ + + + + +org.apache.kafka.connect.storage Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.connect.storage

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/tools/MockConnector.html b/static/41/javadoc/org/apache/kafka/connect/tools/MockConnector.html new file mode 100644 index 000000000..703f76c7b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/tools/MockConnector.html @@ -0,0 +1,392 @@ + + + + +MockConnector (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class MockConnector

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.connector.Connector +
    org.apache.kafka.connect.tools.MockConnector
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Versioned
    +
    +
    +
    public class MockConnector +extends Connector
    +
    This connector provides support for mocking certain connector behaviors. For example, + this can be used to simulate connector or task failures. It works by passing a "mock mode" + through configuration from the system test. New mock behavior can be implemented either + in the connector or in the task by providing a new mode implementation. +

    + At the moment, this connector only supports a single task and shares configuration between + the connector and its tasks.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        MockConnector

        +
        public MockConnector()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        version

        +
        public String version()
        +
        Description copied from interface: Versioned
        +
        Get the version of this component.
        +
        +
        Returns:
        +
        the version, formatted as a String. The version may not be null or empty.
        +
        +
        +
      • +
      • +
        +

        start

        +
        public void start(Map<String,String> config)
        +
        Description copied from class: Connector
        +
        Start this Connector. This method will only be called on a clean Connector, i.e. it has + either just been instantiated and initialized or Connector.stop() has been invoked.
        +
        +
        Specified by:
        +
        start in class Connector
        +
        Parameters:
        +
        config - configuration settings
        +
        +
        +
      • +
      • +
        +

        taskClass

        +
        public Class<? extends Task> taskClass()
        +
        Description copied from class: Connector
        +
        Returns the Task implementation for this Connector.
        +
        +
        Specified by:
        +
        taskClass in class Connector
        +
        +
        +
      • +
      • +
        +

        taskConfigs

        +
        public List<Map<String,String>> taskConfigs(int maxTasks)
        +
        Description copied from class: Connector
        +
        Returns a set of configurations for Tasks based on the current configuration, + producing at most maxTasks configurations.
        +
        +
        Specified by:
        +
        taskConfigs in class Connector
        +
        Parameters:
        +
        maxTasks - maximum number of configurations to generate
        +
        Returns:
        +
        configurations for Tasks
        +
        +
        +
      • +
      • +
        +

        stop

        +
        public void stop()
        +
        Description copied from class: Connector
        +
        Stop this connector.
        +
        +
        Specified by:
        +
        stop in class Connector
        +
        +
        +
      • +
      • +
        +

        config

        +
        public ConfigDef config()
        +
        Description copied from class: Connector
        +
        Define the configuration for the connector.
        +
        +
        Specified by:
        +
        config in class Connector
        +
        Returns:
        +
        The ConfigDef for this connector; may not be null.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/tools/MockSinkConnector.html b/static/41/javadoc/org/apache/kafka/connect/tools/MockSinkConnector.html new file mode 100644 index 000000000..a0c57fa3a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/tools/MockSinkConnector.html @@ -0,0 +1,382 @@ + + + + +MockSinkConnector (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class MockSinkConnector

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Versioned
    +
    +
    +
    public class MockSinkConnector +extends SinkConnector
    +
    Mock sink implementation which delegates to MockConnector.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        MockSinkConnector

        +
        public MockSinkConnector()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        initialize

        +
        public void initialize(ConnectorContext ctx)
        +
        Description copied from class: Connector
        +
        Initialize this connector, using the provided ConnectorContext to notify the runtime of + input configuration changes.
        +
        +
        Overrides:
        +
        initialize in class Connector
        +
        Parameters:
        +
        ctx - context object used to interact with the Kafka Connect runtime
        +
        +
        +
      • +
      • +
        +

        initialize

        +
        public void initialize(ConnectorContext ctx, + List<Map<String,String>> taskConfigs)
        +
        Description copied from class: Connector
        +

        + Initialize this connector, using the provided ConnectorContext to notify the runtime of + input configuration changes and using the provided set of Task configurations. + This version is only used to recover from failures. +

        +

        + The default implementation ignores the provided Task configurations. During recovery, Kafka Connect will request + an updated set of configurations and update the running Tasks appropriately. However, Connectors should + implement special handling of this case if it will avoid unnecessary changes to running Tasks. +

        +
        +
        Overrides:
        +
        initialize in class Connector
        +
        Parameters:
        +
        ctx - context object used to interact with the Kafka Connect runtime
        +
        taskConfigs - existing task configurations, which may be used when generating new task configs to avoid + churn in partition to task assignments
        +
        +
        +
      • +
      • +
        +

        reconfigure

        +
        public void reconfigure(Map<String,String> props)
        +
        Description copied from class: Connector
        +
        Reconfigure this Connector. Most implementations will not override this, using the default + implementation that calls Connector.stop() followed by Connector.start(Map). + Implementations only need to override this if they want to handle this process more + efficiently, e.g. without shutting down network connections to the external system.
        +
        +
        Overrides:
        +
        reconfigure in class Connector
        +
        Parameters:
        +
        props - new configuration settings
        +
        +
        +
      • +
      • +
        +

        validate

        +
        public Config validate(Map<String,String> connectorConfigs)
        +
        Description copied from class: Connector
        +
        Validate the connector configuration values against configuration definitions.
        +
        +
        Overrides:
        +
        validate in class Connector
        +
        Parameters:
        +
        connectorConfigs - the provided configuration values
        +
        Returns:
        +
        a parsed and validated Config containing any relevant validation errors with the raw + connectorConfigs which should prevent this configuration from being used.
        +
        +
        +
      • +
      • +
        +

        version

        +
        public String version()
        +
        Description copied from interface: Versioned
        +
        Get the version of this component.
        +
        +
        Returns:
        +
        the version, formatted as a String. The version may not be null or empty.
        +
        +
        +
      • +
      • +
        +

        start

        +
        public void start(Map<String,String> props)
        +
        Description copied from class: Connector
        +
        Start this Connector. This method will only be called on a clean Connector, i.e. it has + either just been instantiated and initialized or Connector.stop() has been invoked.
        +
        +
        Specified by:
        +
        start in class Connector
        +
        Parameters:
        +
        props - configuration settings
        +
        +
        +
      • +
      • +
        +

        taskClass

        +
        public Class<? extends Task> taskClass()
        +
        Description copied from class: Connector
        +
        Returns the Task implementation for this Connector.
        +
        +
        Specified by:
        +
        taskClass in class Connector
        +
        +
        +
      • +
      • +
        +

        taskConfigs

        +
        public List<Map<String,String>> taskConfigs(int maxTasks)
        +
        Description copied from class: Connector
        +
        Returns a set of configurations for Tasks based on the current configuration, + producing at most maxTasks configurations.
        +
        +
        Specified by:
        +
        taskConfigs in class Connector
        +
        Parameters:
        +
        maxTasks - maximum number of configurations to generate
        +
        Returns:
        +
        configurations for Tasks
        +
        +
        +
      • +
      • +
        +

        stop

        +
        public void stop()
        +
        Description copied from class: Connector
        +
        Stop this connector.
        +
        +
        Specified by:
        +
        stop in class Connector
        +
        +
        +
      • +
      • +
        +

        config

        +
        public ConfigDef config()
        +
        Description copied from class: Connector
        +
        Define the configuration for the connector.
        +
        +
        Specified by:
        +
        config in class Connector
        +
        Returns:
        +
        The ConfigDef for this connector; may not be null.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/tools/MockSinkTask.html b/static/41/javadoc/org/apache/kafka/connect/tools/MockSinkTask.html new file mode 100644 index 000000000..e3d36a8a9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/tools/MockSinkTask.html @@ -0,0 +1,255 @@ + + + + +MockSinkTask (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class MockSinkTask

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.sink.SinkTask +
    org.apache.kafka.connect.tools.MockSinkTask
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Task
    +
    +
    +
    public class MockSinkTask +extends SinkTask
    +
    Task implementation for MockSinkConnector.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        MockSinkTask

        +
        public MockSinkTask()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        version

        +
        public String version()
        +
        Description copied from interface: Task
        +
        Get the version of this task. Usually this should be the same as the corresponding Connector class's version.
        +
        +
        Returns:
        +
        the version, formatted as a String
        +
        +
        +
      • +
      • +
        +

        start

        +
        public void start(Map<String,String> config)
        +
        Description copied from class: SinkTask
        +
        Start the Task. This should handle any configuration parsing and one-time setup of the task.
        +
        +
        Specified by:
        +
        start in interface Task
        +
        Specified by:
        +
        start in class SinkTask
        +
        Parameters:
        +
        config - initial configuration
        +
        +
        +
      • +
      • +
        +

        put

        +
        public void put(Collection<SinkRecord> records)
        +
        Description copied from class: SinkTask
        +
        Put the records in the sink. This should either write them to the downstream system or batch them for + later writing. If this method returns before the records are written to the downstream system, the task must + implement SinkTask.flush(Map) or SinkTask.preCommit(Map) to ensure that offsets are only committed for records + that have been written to the downstream system (hence avoiding data loss during failures). +

        + If this operation fails, the SinkTask may throw a RetriableException to + indicate that the framework should attempt to retry the same call again. Other exceptions will cause the task to + be stopped immediately. SinkTaskContext.timeout(long) can be used to set the maximum time before the + batch will be retried.

        +
        +
        Specified by:
        +
        put in class SinkTask
        +
        Parameters:
        +
        records - the collection of records to send
        +
        +
        +
      • +
      • +
        +

        stop

        +
        public void stop()
        +
        Description copied from class: SinkTask
        +
        Perform any cleanup to stop this task. In SinkTasks, this method is invoked only once outstanding calls to other + methods have completed (e.g., SinkTask.put(Collection) has returned) and a final SinkTask.flush(Map) and offset + commit has completed. Implementations of this method should only need to perform final cleanup operations, such + as closing network connections to the sink system.
        +
        +
        Specified by:
        +
        stop in interface Task
        +
        Specified by:
        +
        stop in class SinkTask
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/tools/MockSourceConnector.html b/static/41/javadoc/org/apache/kafka/connect/tools/MockSourceConnector.html new file mode 100644 index 000000000..95fc9cd44 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/tools/MockSourceConnector.html @@ -0,0 +1,373 @@ + + + + +MockSourceConnector (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class MockSourceConnector

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Versioned
    +
    +
    +
    public class MockSourceConnector +extends SourceConnector
    +
    Mock source implementation which delegates to MockConnector.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        MockSourceConnector

        +
        public MockSourceConnector()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        initialize

        +
        public void initialize(ConnectorContext ctx)
        +
        Description copied from class: Connector
        +
        Initialize this connector, using the provided ConnectorContext to notify the runtime of + input configuration changes.
        +
        +
        Overrides:
        +
        initialize in class Connector
        +
        Parameters:
        +
        ctx - context object used to interact with the Kafka Connect runtime
        +
        +
        +
      • +
      • +
        +

        initialize

        +
        public void initialize(ConnectorContext ctx, + List<Map<String,String>> taskConfigs)
        +
        Description copied from class: Connector
        +

        + Initialize this connector, using the provided ConnectorContext to notify the runtime of + input configuration changes and using the provided set of Task configurations. + This version is only used to recover from failures. +

        +

        + The default implementation ignores the provided Task configurations. During recovery, Kafka Connect will request + an updated set of configurations and update the running Tasks appropriately. However, Connectors should + implement special handling of this case if it will avoid unnecessary changes to running Tasks. +

        +
        +
        Overrides:
        +
        initialize in class Connector
        +
        Parameters:
        +
        ctx - context object used to interact with the Kafka Connect runtime
        +
        taskConfigs - existing task configurations, which may be used when generating new task configs to avoid + churn in partition to task assignments
        +
        +
        +
      • +
      • +
        +

        reconfigure

        +
        public void reconfigure(Map<String,String> props)
        +
        Description copied from class: Connector
        +
        Reconfigure this Connector. Most implementations will not override this, using the default + implementation that calls Connector.stop() followed by Connector.start(Map). + Implementations only need to override this if they want to handle this process more + efficiently, e.g. without shutting down network connections to the external system.
        +
        +
        Overrides:
        +
        reconfigure in class Connector
        +
        Parameters:
        +
        props - new configuration settings
        +
        +
        +
      • +
      • +
        +

        validate

        +
        public Config validate(Map<String,String> connectorConfigs)
        +
        Description copied from class: Connector
        +
        Validate the connector configuration values against configuration definitions.
        +
        +
        Overrides:
        +
        validate in class Connector
        +
        Parameters:
        +
        connectorConfigs - the provided configuration values
        +
        Returns:
        +
        a parsed and validated Config containing any relevant validation errors with the raw + connectorConfigs which should prevent this configuration from being used.
        +
        +
        +
      • +
      • +
        +

        version

        +
        public String version()
        +
        Description copied from interface: Versioned
        +
        Get the version of this component.
        +
        +
        Returns:
        +
        the version, formatted as a String. The version may not be null or empty.
        +
        +
        +
      • +
      • +
        +

        start

        +
        public void start(Map<String,String> props)
        +
        Description copied from class: Connector
        +
        Start this Connector. This method will only be called on a clean Connector, i.e. it has + either just been instantiated and initialized or Connector.stop() has been invoked.
        +
        +
        Specified by:
        +
        start in class Connector
        +
        Parameters:
        +
        props - configuration settings
        +
        +
        +
      • +
      • +
        +

        taskClass

        +
        public Class<? extends Task> taskClass()
        +
        Description copied from class: Connector
        +
        Returns the Task implementation for this Connector.
        +
        +
        Specified by:
        +
        taskClass in class Connector
        +
        +
        +
      • +
      • +
        +

        taskConfigs

        +
        public List<Map<String,String>> taskConfigs(int maxTasks)
        +
        Description copied from class: Connector
        +
        Returns a set of configurations for Tasks based on the current configuration, + producing at most maxTasks configurations.
        +
        +
        Specified by:
        +
        taskConfigs in class Connector
        +
        Parameters:
        +
        maxTasks - maximum number of configurations to generate
        +
        Returns:
        +
        configurations for Tasks
        +
        +
        +
      • +
      • +
        +

        stop

        +
        public void stop()
        +
        Description copied from class: Connector
        +
        Stop this connector.
        +
        +
        Specified by:
        +
        stop in class Connector
        +
        +
        +
      • +
      • +
        +

        config

        +
        public ConfigDef config()
        +
        Description copied from class: Connector
        +
        Define the configuration for the connector.
        +
        +
        Specified by:
        +
        config in class Connector
        +
        Returns:
        +
        The ConfigDef for this connector; may not be null.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/tools/MockSourceTask.html b/static/41/javadoc/org/apache/kafka/connect/tools/MockSourceTask.html new file mode 100644 index 000000000..b58ec652e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/tools/MockSourceTask.html @@ -0,0 +1,266 @@ + + + + +MockSourceTask (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class MockSourceTask

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.source.SourceTask +
    org.apache.kafka.connect.tools.MockSourceTask
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Task
    +
    +
    +
    public class MockSourceTask +extends SourceTask
    +
    Task implementation for MockSourceConnector.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        MockSourceTask

        +
        public MockSourceTask()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        version

        +
        public String version()
        +
        Description copied from interface: Task
        +
        Get the version of this task. Usually this should be the same as the corresponding Connector class's version.
        +
        +
        Returns:
        +
        the version, formatted as a String
        +
        +
        +
      • +
      • +
        +

        start

        +
        public void start(Map<String,String> config)
        +
        Description copied from class: SourceTask
        +
        Start the Task. This should handle any configuration parsing and one-time setup of the task.
        +
        +
        Specified by:
        +
        start in interface Task
        +
        Specified by:
        +
        start in class SourceTask
        +
        Parameters:
        +
        config - initial configuration
        +
        +
        +
      • +
      • +
        +

        poll

        +
        public List<SourceRecord> poll()
        +
        Description copied from class: SourceTask
        +
        Poll this source task for new records. If no data is currently available, this method + should block but return control to the caller regularly (by returning null) in + order for the task to transition to the PAUSED state if requested to do so. +

        + The task will be stopped on a separate thread, and when that happens + this method is expected to unblock, quickly finish up any remaining processing, and + return.

        +
        +
        Specified by:
        +
        poll in class SourceTask
        +
        Returns:
        +
        a list of source records
        +
        +
        +
      • +
      • +
        +

        stop

        +
        public void stop()
        +
        Description copied from class: SourceTask
        +
        Signal this SourceTask to stop. In SourceTasks, this method only needs to signal to the task that it should stop + trying to poll for new data and interrupt any outstanding poll() requests. It is not required that the task has + fully stopped. Note that this method necessarily may be invoked from a different thread than SourceTask.poll() and + SourceTask.commit(). +

        + For example, if a task uses a Selector to receive data over the network, this method + could set a flag that will force SourceTask.poll() to exit immediately and invoke + wakeup() to interrupt any ongoing requests.

        +
        +
        Specified by:
        +
        stop in interface Task
        +
        Specified by:
        +
        stop in class SourceTask
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/tools/SchemaSourceConnector.html b/static/41/javadoc/org/apache/kafka/connect/tools/SchemaSourceConnector.html new file mode 100644 index 000000000..0c276a8f8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/tools/SchemaSourceConnector.html @@ -0,0 +1,278 @@ + + + + +SchemaSourceConnector (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SchemaSourceConnector

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Versioned
    +
    +
    +
    public class SchemaSourceConnector +extends SourceConnector
    +
    A simple source connector that is capable of producing static data with + Struct schemas.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SchemaSourceConnector

        +
        public SchemaSourceConnector()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        version

        +
        public String version()
        +
        Description copied from interface: Versioned
        +
        Get the version of this component.
        +
        +
        Returns:
        +
        the version, formatted as a String. The version may not be null or empty.
        +
        +
        +
      • +
      • +
        +

        start

        +
        public void start(Map<String,String> props)
        +
        Description copied from class: Connector
        +
        Start this Connector. This method will only be called on a clean Connector, i.e. it has + either just been instantiated and initialized or Connector.stop() has been invoked.
        +
        +
        Specified by:
        +
        start in class Connector
        +
        Parameters:
        +
        props - configuration settings
        +
        +
        +
      • +
      • +
        +

        taskClass

        +
        public Class<? extends Task> taskClass()
        +
        Description copied from class: Connector
        +
        Returns the Task implementation for this Connector.
        +
        +
        Specified by:
        +
        taskClass in class Connector
        +
        +
        +
      • +
      • +
        +

        taskConfigs

        +
        public List<Map<String,String>> taskConfigs(int maxTasks)
        +
        Description copied from class: Connector
        +
        Returns a set of configurations for Tasks based on the current configuration, + producing at most maxTasks configurations.
        +
        +
        Specified by:
        +
        taskConfigs in class Connector
        +
        Parameters:
        +
        maxTasks - maximum number of configurations to generate
        +
        Returns:
        +
        configurations for Tasks
        +
        +
        +
      • +
      • +
        +

        stop

        +
        public void stop()
        +
        Description copied from class: Connector
        +
        Stop this connector.
        +
        +
        Specified by:
        +
        stop in class Connector
        +
        +
        +
      • +
      • +
        +

        config

        +
        public ConfigDef config()
        +
        Description copied from class: Connector
        +
        Define the configuration for the connector.
        +
        +
        Specified by:
        +
        config in class Connector
        +
        Returns:
        +
        The ConfigDef for this connector; may not be null.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/tools/SchemaSourceTask.html b/static/41/javadoc/org/apache/kafka/connect/tools/SchemaSourceTask.html new file mode 100644 index 000000000..2df7bae11 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/tools/SchemaSourceTask.html @@ -0,0 +1,399 @@ + + + + +SchemaSourceTask (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SchemaSourceTask

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.source.SourceTask +
    org.apache.kafka.connect.tools.SchemaSourceTask
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Task
    +
    +
    +
    public class SchemaSourceTask +extends SourceTask
    +
    Task implementation for SchemaSourceConnector.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SchemaSourceTask

        +
        public SchemaSourceTask()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        version

        +
        public String version()
        +
        Description copied from interface: Task
        +
        Get the version of this task. Usually this should be the same as the corresponding Connector class's version.
        +
        +
        Returns:
        +
        the version, formatted as a String
        +
        +
        +
      • +
      • +
        +

        start

        +
        public void start(Map<String,String> props)
        +
        Description copied from class: SourceTask
        +
        Start the Task. This should handle any configuration parsing and one-time setup of the task.
        +
        +
        Specified by:
        +
        start in interface Task
        +
        Specified by:
        +
        start in class SourceTask
        +
        Parameters:
        +
        props - initial configuration
        +
        +
        +
      • +
      • +
        +

        poll

        +
        public List<SourceRecord> poll()
        +
        Description copied from class: SourceTask
        +
        Poll this source task for new records. If no data is currently available, this method + should block but return control to the caller regularly (by returning null) in + order for the task to transition to the PAUSED state if requested to do so. +

        + The task will be stopped on a separate thread, and when that happens + this method is expected to unblock, quickly finish up any remaining processing, and + return.

        +
        +
        Specified by:
        +
        poll in class SourceTask
        +
        Returns:
        +
        a list of source records
        +
        +
        +
      • +
      • +
        +

        stop

        +
        public void stop()
        +
        Description copied from class: SourceTask
        +
        Signal this SourceTask to stop. In SourceTasks, this method only needs to signal to the task that it should stop + trying to poll for new data and interrupt any outstanding poll() requests. It is not required that the task has + fully stopped. Note that this method necessarily may be invoked from a different thread than SourceTask.poll() and + SourceTask.commit(). +

        + For example, if a task uses a Selector to receive data over the network, this method + could set a flag that will force SourceTask.poll() to exit immediately and invoke + wakeup() to interrupt any ongoing requests.

        +
        +
        Specified by:
        +
        stop in interface Task
        +
        Specified by:
        +
        stop in class SourceTask
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/tools/VerifiableSinkConnector.html b/static/41/javadoc/org/apache/kafka/connect/tools/VerifiableSinkConnector.html new file mode 100644 index 000000000..a99a12fe5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/tools/VerifiableSinkConnector.html @@ -0,0 +1,294 @@ + + + + +VerifiableSinkConnector (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class VerifiableSinkConnector

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Versioned
    +
    +
    +
    public class VerifiableSinkConnector +extends SinkConnector
    +
    A connector primarily intended for system tests.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        VerifiableSinkConnector

        +
        public VerifiableSinkConnector()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        version

        +
        public String version()
        +
        Description copied from interface: Versioned
        +
        Get the version of this component.
        +
        +
        Returns:
        +
        the version, formatted as a String. The version may not be null or empty.
        +
        +
        +
      • +
      • +
        +

        start

        +
        public void start(Map<String,String> props)
        +
        Description copied from class: Connector
        +
        Start this Connector. This method will only be called on a clean Connector, i.e. it has + either just been instantiated and initialized or Connector.stop() has been invoked.
        +
        +
        Specified by:
        +
        start in class Connector
        +
        Parameters:
        +
        props - configuration settings
        +
        +
        +
      • +
      • +
        +

        taskClass

        +
        public Class<? extends Task> taskClass()
        +
        Description copied from class: Connector
        +
        Returns the Task implementation for this Connector.
        +
        +
        Specified by:
        +
        taskClass in class Connector
        +
        +
        +
      • +
      • +
        +

        taskConfigs

        +
        public List<Map<String,String>> taskConfigs(int maxTasks)
        +
        Description copied from class: Connector
        +
        Returns a set of configurations for Tasks based on the current configuration, + producing at most maxTasks configurations.
        +
        +
        Specified by:
        +
        taskConfigs in class Connector
        +
        Parameters:
        +
        maxTasks - maximum number of configurations to generate
        +
        Returns:
        +
        configurations for Tasks
        +
        +
        +
      • +
      • +
        +

        stop

        +
        public void stop()
        +
        Description copied from class: Connector
        +
        Stop this connector.
        +
        +
        Specified by:
        +
        stop in class Connector
        +
        +
        +
      • +
      • +
        +

        config

        +
        public ConfigDef config()
        +
        Description copied from class: Connector
        +
        Define the configuration for the connector.
        +
        +
        Specified by:
        +
        config in class Connector
        +
        Returns:
        +
        The ConfigDef for this connector; may not be null.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/tools/VerifiableSinkTask.html b/static/41/javadoc/org/apache/kafka/connect/tools/VerifiableSinkTask.html new file mode 100644 index 000000000..d6b9249d9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/tools/VerifiableSinkTask.html @@ -0,0 +1,331 @@ + + + + +VerifiableSinkTask (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class VerifiableSinkTask

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.sink.SinkTask +
    org.apache.kafka.connect.tools.VerifiableSinkTask
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Task
    +
    +
    +
    public class VerifiableSinkTask +extends SinkTask
    +
    Counterpart to VerifiableSourceTask that consumes records and logs information about each to stdout. This + allows validation of processing of messages by sink tasks on distributed workers even in the face of worker restarts + and failures. This task relies on the offset management provided by the Kafka Connect framework and therefore can detect + bugs in its implementation.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        VerifiableSinkTask

        +
        public VerifiableSinkTask()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        version

        +
        public String version()
        +
        Description copied from interface: Task
        +
        Get the version of this task. Usually this should be the same as the corresponding Connector class's version.
        +
        +
        Returns:
        +
        the version, formatted as a String
        +
        +
        +
      • +
      • +
        +

        start

        +
        public void start(Map<String,String> props)
        +
        Description copied from class: SinkTask
        +
        Start the Task. This should handle any configuration parsing and one-time setup of the task.
        +
        +
        Specified by:
        +
        start in interface Task
        +
        Specified by:
        +
        start in class SinkTask
        +
        Parameters:
        +
        props - initial configuration
        +
        +
        +
      • +
      • +
        +

        put

        +
        public void put(Collection<SinkRecord> records)
        +
        Description copied from class: SinkTask
        +
        Put the records in the sink. This should either write them to the downstream system or batch them for + later writing. If this method returns before the records are written to the downstream system, the task must + implement SinkTask.flush(Map) or SinkTask.preCommit(Map) to ensure that offsets are only committed for records + that have been written to the downstream system (hence avoiding data loss during failures). +

        + If this operation fails, the SinkTask may throw a RetriableException to + indicate that the framework should attempt to retry the same call again. Other exceptions will cause the task to + be stopped immediately. SinkTaskContext.timeout(long) can be used to set the maximum time before the + batch will be retried.

        +
        +
        Specified by:
        +
        put in class SinkTask
        +
        Parameters:
        +
        records - the collection of records to send
        +
        +
        +
      • +
      • +
        +

        flush

        +
        public void flush(Map<TopicPartition,OffsetAndMetadata> offsets)
        +
        Description copied from class: SinkTask
        +
        Flush all records that have been SinkTask.put(Collection) for the specified topic-partitions.
        +
        +
        Overrides:
        +
        flush in class SinkTask
        +
        Parameters:
        +
        offsets - the current offset state as of the last call to SinkTask.put(Collection), provided for + convenience but could also be determined by tracking all offsets included in the + SinkRecords passed to SinkTask.put(java.util.Collection<org.apache.kafka.connect.sink.SinkRecord>). Note that the topic, partition and offset + here correspond to the original Kafka topic partition and offset, before any + transformations have been applied. These can be tracked by the task + through the SinkRecord.originalTopic(), SinkRecord.originalKafkaPartition() + and SinkRecord.originalKafkaOffset() methods.
        +
        +
        +
      • +
      • +
        +

        stop

        +
        public void stop()
        +
        Description copied from class: SinkTask
        +
        Perform any cleanup to stop this task. In SinkTasks, this method is invoked only once outstanding calls to other + methods have completed (e.g., SinkTask.put(Collection) has returned) and a final SinkTask.flush(Map) and offset + commit has completed. Implementations of this method should only need to perform final cleanup operations, such + as closing network connections to the sink system.
        +
        +
        Specified by:
        +
        stop in interface Task
        +
        Specified by:
        +
        stop in class SinkTask
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/tools/VerifiableSourceConnector.html b/static/41/javadoc/org/apache/kafka/connect/tools/VerifiableSourceConnector.html new file mode 100644 index 000000000..6c0fe1d3b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/tools/VerifiableSourceConnector.html @@ -0,0 +1,285 @@ + + + + +VerifiableSourceConnector (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class VerifiableSourceConnector

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Versioned
    +
    +
    +
    public class VerifiableSourceConnector +extends SourceConnector
    +
    A connector primarily intended for system tests.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        VerifiableSourceConnector

        +
        public VerifiableSourceConnector()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        version

        +
        public String version()
        +
        Description copied from interface: Versioned
        +
        Get the version of this component.
        +
        +
        Returns:
        +
        the version, formatted as a String. The version may not be null or empty.
        +
        +
        +
      • +
      • +
        +

        start

        +
        public void start(Map<String,String> props)
        +
        Description copied from class: Connector
        +
        Start this Connector. This method will only be called on a clean Connector, i.e. it has + either just been instantiated and initialized or Connector.stop() has been invoked.
        +
        +
        Specified by:
        +
        start in class Connector
        +
        Parameters:
        +
        props - configuration settings
        +
        +
        +
      • +
      • +
        +

        taskClass

        +
        public Class<? extends Task> taskClass()
        +
        Description copied from class: Connector
        +
        Returns the Task implementation for this Connector.
        +
        +
        Specified by:
        +
        taskClass in class Connector
        +
        +
        +
      • +
      • +
        +

        taskConfigs

        +
        public List<Map<String,String>> taskConfigs(int maxTasks)
        +
        Description copied from class: Connector
        +
        Returns a set of configurations for Tasks based on the current configuration, + producing at most maxTasks configurations.
        +
        +
        Specified by:
        +
        taskConfigs in class Connector
        +
        Parameters:
        +
        maxTasks - maximum number of configurations to generate
        +
        Returns:
        +
        configurations for Tasks
        +
        +
        +
      • +
      • +
        +

        stop

        +
        public void stop()
        +
        Description copied from class: Connector
        +
        Stop this connector.
        +
        +
        Specified by:
        +
        stop in class Connector
        +
        +
        +
      • +
      • +
        +

        config

        +
        public ConfigDef config()
        +
        Description copied from class: Connector
        +
        Define the configuration for the connector.
        +
        +
        Specified by:
        +
        config in class Connector
        +
        Returns:
        +
        The ConfigDef for this connector; may not be null.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/tools/VerifiableSourceTask.html b/static/41/javadoc/org/apache/kafka/connect/tools/VerifiableSourceTask.html new file mode 100644 index 000000000..0965e1105 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/tools/VerifiableSourceTask.html @@ -0,0 +1,404 @@ + + + + +VerifiableSourceTask (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class VerifiableSourceTask

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.source.SourceTask +
    org.apache.kafka.connect.tools.VerifiableSourceTask
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Task
    +
    +
    +
    public class VerifiableSourceTask +extends SourceTask
    +
    A connector primarily intended for system tests. The connector simply generates as many tasks as requested. The + tasks print metadata in the form of JSON to stdout for each message generated, making externally visible which + messages have been sent. Each message is also assigned a unique, increasing seqno that is passed to Kafka Connect; when + tasks are started on new nodes, this seqno is used to resume where the task previously left off, allowing for + testing of distributed Kafka Connect. +

    + If logging is left enabled, log output on stdout can be easily ignored by checking whether a given line is valid JSON.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        VerifiableSourceTask

        +
        public VerifiableSourceTask()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        version

        +
        public String version()
        +
        Description copied from interface: Task
        +
        Get the version of this task. Usually this should be the same as the corresponding Connector class's version.
        +
        +
        Returns:
        +
        the version, formatted as a String
        +
        +
        +
      • +
      • +
        +

        start

        +
        public void start(Map<String,String> props)
        +
        Description copied from class: SourceTask
        +
        Start the Task. This should handle any configuration parsing and one-time setup of the task.
        +
        +
        Specified by:
        +
        start in interface Task
        +
        Specified by:
        +
        start in class SourceTask
        +
        Parameters:
        +
        props - initial configuration
        +
        +
        +
      • +
      • +
        +

        poll

        +
        public List<SourceRecord> poll()
        +
        Description copied from class: SourceTask
        +
        Poll this source task for new records. If no data is currently available, this method + should block but return control to the caller regularly (by returning null) in + order for the task to transition to the PAUSED state if requested to do so. +

        + The task will be stopped on a separate thread, and when that happens + this method is expected to unblock, quickly finish up any remaining processing, and + return.

        +
        +
        Specified by:
        +
        poll in class SourceTask
        +
        Returns:
        +
        a list of source records
        +
        +
        +
      • +
      • +
        +

        commitRecord

        +
        public void commitRecord(SourceRecord record, + RecordMetadata metadata)
        +
        Description copied from class: SourceTask
        +

        + Commit an individual SourceRecord when the callback from the producer client is received. This method is + also called when a record is filtered by a transformation or when "errors.tolerance" is set to "all" + and thus will never be ACK'd by a broker. + In both cases metadata will be null. +

        + SourceTasks are not required to implement this functionality; Kafka Connect will record offsets + automatically. This hook is provided for systems that also need to store offsets internally + in their own system. +

        + The default implementation is a nop. It is not necessary to implement the method.

        +
        +
        Overrides:
        +
        commitRecord in class SourceTask
        +
        Parameters:
        +
        record - SourceRecord that was successfully sent via the producer, filtered by a transformation, or dropped on producer exception
        +
        metadata - RecordMetadata record metadata returned from the broker, or null if the record was filtered or if producer exceptions are ignored
        +
        +
        +
      • +
      • +
        +

        stop

        +
        public void stop()
        +
        Description copied from class: SourceTask
        +
        Signal this SourceTask to stop. In SourceTasks, this method only needs to signal to the task that it should stop + trying to poll for new data and interrupt any outstanding poll() requests. It is not required that the task has + fully stopped. Note that this method necessarily may be invoked from a different thread than SourceTask.poll() and + SourceTask.commit(). +

        + For example, if a task uses a Selector to receive data over the network, this method + could set a flag that will force SourceTask.poll() to exit immediately and invoke + wakeup() to interrupt any ongoing requests.

        +
        +
        Specified by:
        +
        stop in interface Task
        +
        Specified by:
        +
        stop in class SourceTask
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/tools/package-summary.html b/static/41/javadoc/org/apache/kafka/connect/tools/package-summary.html new file mode 100644 index 000000000..bcaa34fd5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/tools/package-summary.html @@ -0,0 +1,128 @@ + + + + +org.apache.kafka.connect.tools (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.connect.tools

    +
    +
    +
    package org.apache.kafka.connect.tools
    +
    +
    Provides source and sink connector implementations used for testing
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/tools/package-tree.html b/static/41/javadoc/org/apache/kafka/connect/tools/package-tree.html new file mode 100644 index 000000000..9b4583489 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/tools/package-tree.html @@ -0,0 +1,101 @@ + + + + +org.apache.kafka.connect.tools Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.connect.tools

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/transforms/Transformation.html b/static/41/javadoc/org/apache/kafka/connect/transforms/Transformation.html new file mode 100644 index 000000000..5388fe58c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/transforms/Transformation.html @@ -0,0 +1,194 @@ + + + + +Transformation (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Transformation<R extends ConnectRecord<R>>

    +
    +
    +
    +
    Type Parameters:
    +
    R - The type of record (must be an implementation of ConnectRecord)
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Closeable, Configurable
    +
    +
    +
    public interface Transformation<R extends ConnectRecord<R>> +extends Configurable, Closeable
    +
    Single message transformation for Kafka Connect record types. +

    + Connectors can be configured with transformations to make lightweight message-at-a-time modifications. +

    Kafka Connect may discover implementations of this interface using the Java ServiceLoader mechanism. + To support this, implementations of this interface should also contain a service provider configuration file in + META-INF/services/org.apache.kafka.connect.transforms.Transformation. + +

    Implement Monitorable to enable the transformation to register metrics. + The following tags are automatically added to all metrics registered: connector set to connector name, + task set to the task id and transformation set to the transformation alias.

    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      apply(R record)
      +
      +
      Apply transformation to the record and return another record object (which may be record itself) + or null, corresponding to a map or filter operation respectively.
      +
      +
      void
      + +
      +
      Signal that this transformation instance will no longer will be used.
      +
      + + +
      +
      Configuration specification for this transformation.
      +
      +
      +
      +
      +
      +

      Methods inherited from interface org.apache.kafka.common.Configurable

      +configure
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        apply

        +
        R apply(R record)
        +
        Apply transformation to the record and return another record object (which may be record itself) + or null, corresponding to a map or filter operation respectively. +

        + A transformation must not mutate objects reachable from the given record + (including, but not limited to, Headers, + Structs, Lists, and Maps). + If such objects need to be changed, a new ConnectRecord should be created and returned. +

        + The implementation must be thread-safe.

        +
        +
        Parameters:
        +
        record - the record to be transformed; may not be null
        +
        Returns:
        +
        the transformed record; may be null to indicate that the record should be dropped
        +
        +
        +
      • +
      • +
        +

        config

        +
        ConfigDef config()
        +
        Configuration specification for this transformation.
        +
        +
      • +
      • +
        +

        close

        +
        void close()
        +
        Signal that this transformation instance will no longer will be used.
        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/transforms/package-summary.html b/static/41/javadoc/org/apache/kafka/connect/transforms/package-summary.html new file mode 100644 index 000000000..a6e98dc5b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/transforms/package-summary.html @@ -0,0 +1,100 @@ + + + + +org.apache.kafka.connect.transforms (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.connect.transforms

    +
    +
    +
    package org.apache.kafka.connect.transforms
    +
    +
    Provides a pluggable interface for altering data which is being moved by Connect.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/transforms/package-tree.html b/static/41/javadoc/org/apache/kafka/connect/transforms/package-tree.html new file mode 100644 index 000000000..62179275d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/transforms/package-tree.html @@ -0,0 +1,80 @@ + + + + +org.apache.kafka.connect.transforms Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.connect.transforms

    +Package Hierarchies: + +
    +
    +

    Interface Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/transforms/predicates/Predicate.html b/static/41/javadoc/org/apache/kafka/connect/transforms/predicates/Predicate.html new file mode 100644 index 000000000..127a72edf --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/transforms/predicates/Predicate.html @@ -0,0 +1,187 @@ + + + + +Predicate (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Predicate<R extends ConnectRecord<R>>

    +
    +
    +
    +
    Type Parameters:
    +
    R - The type of record.
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Configurable
    +
    +
    +
    public interface Predicate<R extends ConnectRecord<R>> +extends Configurable, AutoCloseable
    +

    A predicate on records. + Predicates can be used to conditionally apply a Transformation + by configuring the transformation's predicate (and negate) configuration parameters. + In particular, the Filter transformation can be conditionally applied in order to filter + certain records from further processing. + +

    Kafka Connect may discover implementations of this interface using the Java ServiceLoader mechanism. + To support this, implementations of this interface should also contain a service provider configuration file in + META-INF/services/org.apache.kafka.connect.transforms.predicates.Predicate. + +

    Implement Monitorable to enable the predicate to register metrics. + The following tags are automatically added to all metrics registered: connector set to connector name, + task set to the task id and predicate set to the predicate alias.

    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
       
      + + +
      +
      Configuration specification for this predicate.
      +
      +
      boolean
      +
      test(R record)
      +
      +
      Returns whether the given record satisfies this predicate.
      +
      +
      +
      +
      +
      +

      Methods inherited from interface org.apache.kafka.common.Configurable

      +configure
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        config

        +
        ConfigDef config()
        +
        Configuration specification for this predicate.
        +
        +
        Returns:
        +
        the configuration definition for this predicate; never null
        +
        +
        +
      • +
      • +
        +

        test

        +
        boolean test(R record)
        +
        Returns whether the given record satisfies this predicate.
        +
        +
        Parameters:
        +
        record - the record to evaluate; may not be null
        +
        Returns:
        +
        true if the predicate matches, or false otherwise
        +
        +
        +
      • +
      • +
        +

        close

        +
        void close()
        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/transforms/predicates/package-summary.html b/static/41/javadoc/org/apache/kafka/connect/transforms/predicates/package-summary.html new file mode 100644 index 000000000..80fe45fa9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/transforms/predicates/package-summary.html @@ -0,0 +1,100 @@ + + + + +org.apache.kafka.connect.transforms.predicates (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.connect.transforms.predicates

    +
    +
    +
    package org.apache.kafka.connect.transforms.predicates
    +
    +
    Provides a pluggable interface for describing when a Transformation should be applied to a record.
    +
    +
    +
      +
    • + +
    • +
    • +
      +
      Interfaces
      +
      +
      Class
      +
      Description
      +
      Predicate<R extends ConnectRecord<R>>
      +
      +
      A predicate on records.
      +
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/transforms/predicates/package-tree.html b/static/41/javadoc/org/apache/kafka/connect/transforms/predicates/package-tree.html new file mode 100644 index 000000000..c9f9a65ca --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/transforms/predicates/package-tree.html @@ -0,0 +1,76 @@ + + + + +org.apache.kafka.connect.transforms.predicates Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.connect.transforms.predicates

    +Package Hierarchies: + +
    +
    +

    Interface Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/util/ConnectorUtils.html b/static/41/javadoc/org/apache/kafka/connect/util/ConnectorUtils.html new file mode 100644 index 000000000..6fdebf672 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/util/ConnectorUtils.html @@ -0,0 +1,177 @@ + + + + +ConnectorUtils (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ConnectorUtils

    +
    +
    java.lang.Object +
    org.apache.kafka.connect.util.ConnectorUtils
    +
    +
    +
    +
    public class ConnectorUtils +extends Object
    +
    Utilities that connector implementations might find useful. Contains common building blocks + for writing connectors.
    +
    +
    +
      + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      static <T> List<List<T>>
      +
      groupPartitions(List<T> elements, + int numGroups)
      +
      +
      Given a list of elements and a target number of groups, generates list of groups of + elements to match the target number of groups, spreading them evenly among the groups.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ConnectorUtils

        +
        public ConnectorUtils()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        groupPartitions

        +
        public static <T> List<List<T>> groupPartitions(List<T> elements, + int numGroups)
        +
        Given a list of elements and a target number of groups, generates list of groups of + elements to match the target number of groups, spreading them evenly among the groups. + This generates groups with contiguous elements, which results in intuitive ordering if + your elements are also ordered (e.g. alphabetical lists of table names if you sort + table names alphabetically to generate the raw partitions) or can result in efficient + partitioning if elements are sorted according to some criteria that affects performance + (e.g. topic partitions with the same leader).
        +
        +
        Parameters:
        +
        elements - list of elements to partition
        +
        numGroups - the number of output groups to generate.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/util/package-summary.html b/static/41/javadoc/org/apache/kafka/connect/util/package-summary.html new file mode 100644 index 000000000..4b83f23d6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/util/package-summary.html @@ -0,0 +1,87 @@ + + + + +org.apache.kafka.connect.util (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.connect.util

    +
    +
    +
    package org.apache.kafka.connect.util
    +
    +
    Provides common utilities that can be used in component implementations.
    +
    +
    +
      +
    • +
      +
      Classes
      +
      +
      Class
      +
      Description
      + +
      +
      Utilities that connector implementations might find useful.
      +
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/connect/util/package-tree.html b/static/41/javadoc/org/apache/kafka/connect/util/package-tree.html new file mode 100644 index 000000000..87dc79de9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/connect/util/package-tree.html @@ -0,0 +1,71 @@ + + + + +org.apache.kafka.connect.util Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.connect.util

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/ConsumerGroupPartitionAssignor.html b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/ConsumerGroupPartitionAssignor.html new file mode 100644 index 000000000..c7185af33 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/ConsumerGroupPartitionAssignor.html @@ -0,0 +1,98 @@ + + + + +ConsumerGroupPartitionAssignor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ConsumerGroupPartitionAssignor

    +
    +
    +
    +
    All Superinterfaces:
    +
    PartitionAssignor
    +
    +
    +
    public interface ConsumerGroupPartitionAssignor +extends PartitionAssignor
    +
    Server-side partition assignor for consumer groups used by the GroupCoordinator.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +

      Methods inherited from interface org.apache.kafka.coordinator.group.api.assignor.PartitionAssignor

      +assign, name
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/GroupAssignment.html b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/GroupAssignment.html new file mode 100644 index 000000000..f8843f2d8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/GroupAssignment.html @@ -0,0 +1,202 @@ + + + + +GroupAssignment (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class GroupAssignment

    +
    +
    java.lang.Object +
    org.apache.kafka.coordinator.group.api.assignor.GroupAssignment
    +
    +
    +
    +
    public class GroupAssignment +extends Object
    +
    The partition assignment for a consumer group.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        members

        +
        public Map<String,MemberAssignment> members()
        +
        +
        Returns:
        +
        Member assignments keyed by member Ids.
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/GroupSpec.html b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/GroupSpec.html new file mode 100644 index 000000000..5cdcc4490 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/GroupSpec.html @@ -0,0 +1,228 @@ + + + + +GroupSpec (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface GroupSpec

    +
    +
    +
    +
    public interface GroupSpec
    +
    The group metadata specifications required to compute the target assignment.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        memberIds

        +
        Collection<String> memberIds()
        +
        +
        Returns:
        +
        All the member Ids of the consumer group.
        +
        +
        +
      • +
      • +
        +

        subscriptionType

        +
        SubscriptionType subscriptionType()
        +
        +
        Returns:
        +
        The group's subscription type.
        +
        +
        +
      • +
      • +
        +

        isPartitionAssigned

        +
        boolean isPartitionAssigned(Uuid topicId, + int partitionId)
        +
        Determine whether a topic id and partition have been assigned to + a member. This method functions the same for all types of groups.
        +
        +
        Parameters:
        +
        topicId - Uuid corresponding to the partition's topic.
        +
        partitionId - Partition Id within topic.
        +
        Returns:
        +
        True, if the partition is currently assigned to a member. + False, otherwise.
        +
        +
        +
      • +
      • +
        +

        isPartitionAssignable

        +
        boolean isPartitionAssignable(Uuid topicId, + int partitionId)
        +
        For share groups, a partition can only be assigned once its initialization is complete. + For other group types, this initialization is not required and all partitions returned + by the SubscribedTopicDescriber are always assignable.
        +
        +
        Parameters:
        +
        topicId - Uuid corresponding to the partition's topic.
        +
        partitionId - Partition Id within topic.
        +
        Returns:
        +
        True, if the partition is assignable.
        +
        +
        +
      • +
      • +
        +

        memberSubscription

        +
        MemberSubscription memberSubscription(String memberId)
        +
        Gets the member subscription specification for a member.
        +
        +
        Parameters:
        +
        memberId - The member Id.
        +
        Returns:
        +
        The member's subscription metadata.
        +
        Throws:
        +
        IllegalArgumentException - If the member Id isn't found.
        +
        +
        +
      • +
      • +
        +

        memberAssignment

        +
        MemberAssignment memberAssignment(String memberId)
        +
        Gets the current assignment of the member.
        +
        +
        Parameters:
        +
        memberId - The member Id.
        +
        Returns:
        +
        The member's assignment or an empty assignment if the + member does not have one.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/MemberAssignment.html b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/MemberAssignment.html new file mode 100644 index 000000000..49449e475 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/MemberAssignment.html @@ -0,0 +1,129 @@ + + + + +MemberAssignment (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface MemberAssignment

    +
    +
    +
    +
    public interface MemberAssignment
    +
    The partition assignment for a consumer group member.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
       
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        partitions

        +
        Map<Uuid,Set<Integer>> partitions()
        +
        +
        Returns:
        +
        The assigned partitions keyed by topic Ids.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/MemberSubscription.html b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/MemberSubscription.html new file mode 100644 index 000000000..7fc73a983 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/MemberSubscription.html @@ -0,0 +1,164 @@ + + + + +MemberSubscription (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface MemberSubscription

    +
    +
    +
    +
    public interface MemberSubscription
    +
    Interface representing the subscription metadata for a group member.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        rackId

        +
        Optional<String> rackId()
        +
        Gets the rack Id if present.
        +
        +
        Returns:
        +
        An Optional containing the rack Id, or an empty Optional if not present.
        +
        +
        +
      • +
      • +
        +

        instanceId

        +
        Optional<String> instanceId()
        +
        Gets the instance Id if present.
        +
        +
        Returns:
        +
        An Optional containing the instance Id, or an empty Optional if not present.
        +
        +
        +
      • +
      • +
        +

        subscribedTopicIds

        +
        Set<Uuid> subscribedTopicIds()
        +
        Gets the set of subscribed topic Ids.
        +
        +
        Returns:
        +
        The set of subscribed topic Ids.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/PartitionAssignor.html b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/PartitionAssignor.html new file mode 100644 index 000000000..bb2f5b932 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/PartitionAssignor.html @@ -0,0 +1,156 @@ + + + + +PartitionAssignor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface PartitionAssignor

    +
    +
    +
    +
    All Known Subinterfaces:
    +
    ConsumerGroupPartitionAssignor, ShareGroupPartitionAssignor
    +
    +
    +
    public interface PartitionAssignor
    +
    Server-side partition assignor used by the GroupCoordinator.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      assign(GroupSpec groupSpec, + SubscribedTopicDescriber subscribedTopicDescriber)
      +
      +
      Assigns partitions to group members based on the given assignment specification and topic metadata.
      +
      + + +
      +
      Unique name for this assignor.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        name

        +
        String name()
        +
        Unique name for this assignor.
        +
        +
      • +
      • +
        +

        assign

        +
        GroupAssignment assign(GroupSpec groupSpec, + SubscribedTopicDescriber subscribedTopicDescriber) + throws PartitionAssignorException
        +
        Assigns partitions to group members based on the given assignment specification and topic metadata.
        +
        +
        Parameters:
        +
        groupSpec - The assignment spec which includes member metadata.
        +
        subscribedTopicDescriber - The topic and partition metadata describer.
        +
        Returns:
        +
        The new assignment for the group.
        +
        Throws:
        +
        PartitionAssignorException
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/PartitionAssignorException.html b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/PartitionAssignorException.html new file mode 100644 index 000000000..5c4be6fe6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/PartitionAssignorException.html @@ -0,0 +1,167 @@ + + + + +PartitionAssignorException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class PartitionAssignorException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class PartitionAssignorException +extends ApiException
    +
    Exception thrown by PartitionAssignor.assign(GroupSpec, SubscribedTopicDescriber)}. The exception + is only used internally.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        PartitionAssignorException

        +
        public PartitionAssignorException(String message)
        +
        +
      • +
      • +
        +

        PartitionAssignorException

        +
        public PartitionAssignorException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/ShareGroupPartitionAssignor.html b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/ShareGroupPartitionAssignor.html new file mode 100644 index 000000000..95b32265f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/ShareGroupPartitionAssignor.html @@ -0,0 +1,99 @@ + + + + +ShareGroupPartitionAssignor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ShareGroupPartitionAssignor

    +
    +
    +
    +
    All Superinterfaces:
    +
    PartitionAssignor
    +
    +
    +
    @Unstable +public interface ShareGroupPartitionAssignor +extends PartitionAssignor
    +
    Server-side partition assignor for share groups used by the GroupCoordinator.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +

      Methods inherited from interface org.apache.kafka.coordinator.group.api.assignor.PartitionAssignor

      +assign, name
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/SubscribedTopicDescriber.html b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/SubscribedTopicDescriber.html new file mode 100644 index 000000000..bc21dc63b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/SubscribedTopicDescriber.html @@ -0,0 +1,158 @@ + + + + +SubscribedTopicDescriber (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface SubscribedTopicDescriber

    +
    +
    +
    +
    public interface SubscribedTopicDescriber
    +
    The subscribed topic describer is used by the PartitionAssignor + to obtain topic and partition metadata of the subscribed topics.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      int
      + +
      +
      The number of partitions for the given topic Id.
      +
      + +
      racksForPartition(Uuid topicId, + int partition)
      +
      +
      Returns all the available racks associated with the replicas of the given partition.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        numPartitions

        +
        int numPartitions(Uuid topicId)
        +
        The number of partitions for the given topic Id.
        +
        +
        Parameters:
        +
        topicId - Uuid corresponding to the topic.
        +
        Returns:
        +
        The number of partitions corresponding to the given topic Id, + or -1 if the topic id does not exist.
        +
        +
        +
      • +
      • +
        +

        racksForPartition

        +
        Set<String> racksForPartition(Uuid topicId, + int partition)
        +
        Returns all the available racks associated with the replicas of the given partition.
        +
        +
        Parameters:
        +
        topicId - Uuid corresponding to the partition's topic.
        +
        partition - Partition Id within topic.
        +
        Returns:
        +
        The set of racks corresponding to the replicas of the topic's partition. + If the topic Id does not exist, an empty set is returned.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/SubscriptionType.html b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/SubscriptionType.html new file mode 100644 index 000000000..ae2e99e1b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/SubscriptionType.html @@ -0,0 +1,241 @@ + + + + +SubscriptionType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class SubscriptionType

    +
    +
    java.lang.Object +
    java.lang.Enum<SubscriptionType> +
    org.apache.kafka.coordinator.group.api.assignor.SubscriptionType
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<SubscriptionType>, Constable
    +
    +
    +
    public enum SubscriptionType +extends Enum<SubscriptionType>
    +
    The subscription type followed by a consumer group.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      +
        +
      • +
        +

        HOMOGENEOUS

        +
        public static final SubscriptionType HOMOGENEOUS
        +
        A homogeneous subscription type means that all the members + of the group use the same subscription.
        +
        +
      • +
      • +
        +

        HETEROGENEOUS

        +
        public static final SubscriptionType HETEROGENEOUS
        +
        A heterogeneous subscription type means that not all the members + of the group use the same subscription.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static SubscriptionType[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static SubscriptionType valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Enum<SubscriptionType>
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/package-summary.html b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/package-summary.html new file mode 100644 index 000000000..c5ae28d38 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/package-summary.html @@ -0,0 +1,130 @@ + + + + +org.apache.kafka.coordinator.group.api.assignor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.coordinator.group.api.assignor

    +
    +
    +
    package org.apache.kafka.coordinator.group.api.assignor
    +
    +
    Provides the core functionality and metadata management for consumer group partition assignment.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/package-tree.html b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/package-tree.html new file mode 100644 index 000000000..9cdbdc9d7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/coordinator/group/api/assignor/package-tree.html @@ -0,0 +1,121 @@ + + + + +org.apache.kafka.coordinator.group.api.assignor Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.coordinator.group.api.assignor

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/authorizer/AclCreateResult.html b/static/41/javadoc/org/apache/kafka/server/authorizer/AclCreateResult.html new file mode 100644 index 000000000..d7f5db859 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/authorizer/AclCreateResult.html @@ -0,0 +1,190 @@ + + + + +AclCreateResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class AclCreateResult

    +
    +
    java.lang.Object +
    org.apache.kafka.server.authorizer.AclCreateResult
    +
    +
    +
    +
    public class AclCreateResult +extends Object
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        AclCreateResult

        +
        public AclCreateResult(ApiException exception)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        exception

        +
        public Optional<ApiException> exception()
        +
        Returns any exception during create. If exception is empty, the request has succeeded.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/authorizer/AclDeleteResult.AclBindingDeleteResult.html b/static/41/javadoc/org/apache/kafka/server/authorizer/AclDeleteResult.AclBindingDeleteResult.html new file mode 100644 index 000000000..2d3973108 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/authorizer/AclDeleteResult.AclBindingDeleteResult.html @@ -0,0 +1,190 @@ + + + + +AclDeleteResult.AclBindingDeleteResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class AclDeleteResult.AclBindingDeleteResult

    +
    +
    java.lang.Object +
    org.apache.kafka.server.authorizer.AclDeleteResult.AclBindingDeleteResult
    +
    +
    +
    +
    Enclosing class:
    +
    AclDeleteResult
    +
    +
    +
    public static class AclDeleteResult.AclBindingDeleteResult +extends Object
    +
    Delete result for each ACL binding that matched a delete filter.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        AclBindingDeleteResult

        +
        public AclBindingDeleteResult(AclBinding aclBinding)
        +
        +
      • +
      • +
        +

        AclBindingDeleteResult

        +
        public AclBindingDeleteResult(AclBinding aclBinding, + ApiException exception)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        aclBinding

        +
        public AclBinding aclBinding()
        +
        Returns ACL binding that matched the delete filter. If exception() is + empty, the ACL binding was successfully deleted.
        +
        +
      • +
      • +
        +

        exception

        +
        public Optional<ApiException> exception()
        +
        Returns any exception that resulted in failure to delete ACL binding. + If exception is empty, the ACL binding was successfully deleted.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/authorizer/AclDeleteResult.html b/static/41/javadoc/org/apache/kafka/server/authorizer/AclDeleteResult.html new file mode 100644 index 000000000..3d4dccf67 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/authorizer/AclDeleteResult.html @@ -0,0 +1,200 @@ + + + + +AclDeleteResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class AclDeleteResult

    +
    +
    java.lang.Object +
    org.apache.kafka.server.authorizer.AclDeleteResult
    +
    +
    +
    +
    public class AclDeleteResult +extends Object
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/authorizer/Action.html b/static/41/javadoc/org/apache/kafka/server/authorizer/Action.html new file mode 100644 index 000000000..38579e2c5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/authorizer/Action.html @@ -0,0 +1,275 @@ + + + + +Action (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Action

    +
    +
    java.lang.Object +
    org.apache.kafka.server.authorizer.Action
    +
    +
    +
    +
    public class Action +extends Object
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Action

        +
        public Action(AclOperation operation, + ResourcePattern resourcePattern, + int resourceReferenceCount, + boolean logIfAllowed, + boolean logIfDenied)
        +
        +
        Parameters:
        +
        operation - non-null operation being performed
        +
        resourcePattern - non-null resource pattern on which this action is being performed
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        resourcePattern

        +
        public ResourcePattern resourcePattern()
        +
        +
        Returns:
        +
        a non-null resource pattern on which this action is being performed
        +
        +
        +
      • +
      • +
        +

        operation

        +
        public AclOperation operation()
        +
        +
        Returns:
        +
        a non-null operation being performed
        +
        +
        +
      • +
      • +
        +

        logIfAllowed

        +
        public boolean logIfAllowed()
        +
        Indicates if audit logs tracking ALLOWED access should include this action if result is + ALLOWED. The flag is true if access to a resource is granted while processing the request as a + result of this authorization. The flag is false only for requests used to describe access where + no operation on the resource is actually performed based on the authorization result.
        +
        +
      • +
      • +
        +

        logIfDenied

        +
        public boolean logIfDenied()
        +
        Indicates if audit logs tracking DENIED access should include this action if result is + DENIED. The flag is true if access to a resource was explicitly requested and request + is denied as a result of this authorization request. The flag is false if request was + filtering out authorized resources (e.g. to subscribe to regex pattern). The flag is also + false if this is an optional authorization where an alternative resource authorization is + applied if this fails (e.g. Cluster:Create which is subsequently overridden by Topic:Create).
        +
        +
      • +
      • +
        +

        resourceReferenceCount

        +
        public int resourceReferenceCount()
        +
        Number of times the resource being authorized is referenced within the request. For example, a single + request may reference `n` topic partitions of the same topic. Brokers will authorize the topic once + with `resourceReferenceCount=n`. Authorizers may include the count in audit logs.
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/authorizer/AuthorizableRequestContext.html b/static/41/javadoc/org/apache/kafka/server/authorizer/AuthorizableRequestContext.html new file mode 100644 index 000000000..f0a9290f6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/authorizer/AuthorizableRequestContext.html @@ -0,0 +1,214 @@ + + + + +AuthorizableRequestContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface AuthorizableRequestContext

    +
    +
    +
    +
    public interface AuthorizableRequestContext
    +
    Request context interface that provides data from request header as well as connection + and authentication information to plugins.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Returns client IP address from which request was sent.
      +
      + + +
      +
      Returns the client id from the request header.
      +
      +
      int
      + +
      +
      Returns the correlation id from the request header.
      +
      + + +
      +
      Returns name of listener on which request was received.
      +
      + + +
      +
      Returns authenticated principal for the connection on which request was received.
      +
      +
      int
      + +
      +
      16-bit API key of the request from the request header.
      +
      +
      int
      + +
      +
      Returns the request version from the request header.
      +
      + + +
      +
      Returns the security protocol for the listener on which request was received.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        listenerName

        +
        String listenerName()
        +
        Returns name of listener on which request was received.
        +
        +
      • +
      • +
        +

        securityProtocol

        +
        SecurityProtocol securityProtocol()
        +
        Returns the security protocol for the listener on which request was received.
        +
        +
      • +
      • +
        +

        principal

        +
        KafkaPrincipal principal()
        +
        Returns authenticated principal for the connection on which request was received.
        +
        +
      • +
      • +
        +

        clientAddress

        +
        InetAddress clientAddress()
        +
        Returns client IP address from which request was sent.
        +
        +
      • +
      • +
        +

        requestType

        +
        int requestType()
        +
        16-bit API key of the request from the request header. See + https://kafka.apache.org/protocol#protocol_api_keys for request types.
        +
        +
      • +
      • +
        +

        requestVersion

        +
        int requestVersion()
        +
        Returns the request version from the request header.
        +
        +
      • +
      • +
        +

        clientId

        +
        String clientId()
        +
        Returns the client id from the request header.
        +
        +
      • +
      • +
        +

        correlationId

        +
        int correlationId()
        +
        Returns the correlation id from the request header.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/authorizer/AuthorizationResult.html b/static/41/javadoc/org/apache/kafka/server/authorizer/AuthorizationResult.html new file mode 100644 index 000000000..5e1e9a226 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/authorizer/AuthorizationResult.html @@ -0,0 +1,217 @@ + + + + +AuthorizationResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class AuthorizationResult

    +
    +
    java.lang.Object +
    java.lang.Enum<AuthorizationResult> +
    org.apache.kafka.server.authorizer.AuthorizationResult
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<AuthorizationResult>, Constable
    +
    +
    +
    public enum AuthorizationResult +extends Enum<AuthorizationResult>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static AuthorizationResult[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static AuthorizationResult valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/authorizer/Authorizer.html b/static/41/javadoc/org/apache/kafka/server/authorizer/Authorizer.html new file mode 100644 index 000000000..3f9cb1fc7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/authorizer/Authorizer.html @@ -0,0 +1,338 @@ + + + + +Authorizer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Authorizer

    +
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Closeable, Configurable
    +
    +
    +
    public interface Authorizer +extends Configurable, Closeable
    +
    Pluggable authorizer interface for Kafka brokers. + + Startup sequence in brokers: +
      +
    1. Broker creates authorizer instance if configured in `authorizer.class.name`.
    2. +
    3. Broker configures and starts authorizer instance. Authorizer implementation starts loading its metadata.
    4. +
    5. Broker starts SocketServer to accept connections and process requests.
    6. +
    7. For each listener, SocketServer waits for authorization metadata to be available in the + authorizer before accepting connections. The future returned by start(AuthorizerServerInfo) + for each listener must return only when authorizer is ready to authorize requests on the listener.
    8. +
    9. Broker accepts connections. For each connection, broker performs authentication and then accepts Kafka requests. + For each request, broker invokes authorize(AuthorizableRequestContext, List) to authorize + actions performed by the request.
    10. +
    + + Authorizer implementation class may optionally implement @Reconfigurable + to enable dynamic reconfiguration without restarting the broker. +

    Authorizer implementation class may also optionally implement Monitorable + to enable the authorizer to register metrics. The following tags are automatically added to all metrics registered: + config set to authorizer.class.name, class set to the Authorizer class name, + and role set to either broker or controller. +

    + Threading model: +

      +
    • All authorizer operations including authorization and ACL updates must be thread-safe.
    • +
    • ACL update methods are asynchronous. Implementations with low update latency may return a + completed future using CompletableFuture.completedFuture(Object). + This ensures that the request will be handled synchronously by the caller without using a + purgatory to wait for the result. If ACL updates require remote communication which may block, + return a future that is completed asynchronously when the remote operation completes. This enables + the caller to process other requests on the request threads without blocking.
    • +
    • Any threads or thread pools used for processing remote operations asynchronously can be started during + start(AuthorizerServerInfo). These threads must be shutdown during Closeable.close().
    • +
    +

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        start

        +
        Map<Endpoint,? extends CompletionStage<Void>> start(AuthorizerServerInfo serverInfo)
        +
        Starts loading authorization metadata and returns futures that can be used to wait until + metadata for authorizing requests on each listener is available. Each listener will be + started only after its metadata is available and authorizer is ready to start authorizing + requests on that listener.
        +
        +
        Parameters:
        +
        serverInfo - Metadata for the broker including broker id and listener endpoints
        +
        Returns:
        +
        CompletionStage for each endpoint that completes when authorizer is ready to + start authorizing requests on that listener.
        +
        +
        +
      • +
      • +
        +

        authorize

        +
        List<AuthorizationResult> authorize(AuthorizableRequestContext requestContext, + List<Action> actions)
        +
        Authorizes the specified action. Additional metadata for the action is specified + in `requestContext`. +

        + This is a synchronous API designed for use with locally cached ACLs. Since this method is invoked on the + request thread while processing each request, implementations of this method should avoid time-consuming + remote communication that may block request threads.

        +
        +
        Parameters:
        +
        requestContext - Request context including request type, security protocol and listener name
        +
        actions - Actions being authorized including resource and operation for each action
        +
        Returns:
        +
        List of authorization results for each action in the same order as the provided actions
        +
        +
        +
      • +
      • +
        +

        createAcls

        +
        List<? extends CompletionStage<AclCreateResult>> createAcls(AuthorizableRequestContext requestContext, + List<AclBinding> aclBindings)
        +
        Creates new ACL bindings. +

        + This is an asynchronous API that enables the caller to avoid blocking during the update. Implementations of this + API can return completed futures using CompletableFuture.completedFuture(Object) + to process the update synchronously on the request thread.

        +
        +
        Parameters:
        +
        requestContext - Request context if the ACL is being created by a broker to handle + a client request to create ACLs.
        +
        aclBindings - ACL bindings to create
        +
        Returns:
        +
        Create result for each ACL binding in the same order as in the input list. Each result + is returned as a CompletionStage that completes when the result is available.
        +
        +
        +
      • +
      • +
        +

        deleteAcls

        +
        List<? extends CompletionStage<AclDeleteResult>> deleteAcls(AuthorizableRequestContext requestContext, + List<AclBindingFilter> aclBindingFilters)
        +
        Deletes all ACL bindings that match the provided filters. +

        + This is an asynchronous API that enables the caller to avoid blocking during the update. Implementations of this + API can return completed futures using CompletableFuture.completedFuture(Object) + to process the update synchronously on the request thread. +

        + Refer to the authorizer implementation docs for details on concurrent update guarantees.

        +
        +
        Parameters:
        +
        requestContext - Request context if the ACL is being deleted by a broker to handle + a client request to delete ACLs.
        +
        aclBindingFilters - Filters to match ACL bindings that are to be deleted
        +
        Returns:
        +
        Delete result for each filter in the same order as in the input list. + Each result indicates which ACL bindings were actually deleted as well as any + bindings that matched but could not be deleted. Each result is returned as a + CompletionStage that completes when the result is available.
        +
        +
        +
      • +
      • +
        +

        acls

        + +
        Returns ACL bindings which match the provided filter. +

        + This is a synchronous API designed for use with locally cached ACLs. This method is invoked on the request + thread while processing DescribeAcls requests and should avoid time-consuming remote communication that may + block request threads.

        +
        +
        Returns:
        +
        Iterator for ACL bindings, which may be populated lazily.
        +
        +
        +
      • +
      • +
        +

        aclCount

        +
        default int aclCount()
        +
        Get the current number of ACLs, for the purpose of metrics. Authorizers that don't implement this function + will simply return -1.
        +
        +
      • +
      • +
        +

        authorizeByResourceType

        +
        default AuthorizationResult authorizeByResourceType(AuthorizableRequestContext requestContext, + AclOperation op, + ResourceType resourceType)
        +
        Check if the caller is authorized to perform the given ACL operation on at least one + resource of the given type. + + Custom authorizer implementations should consider overriding this default implementation because: + 1. The default implementation iterates all AclBindings multiple times, without any caching + by principal, host, operation, permission types, and resource types. More efficient + implementations may be added in custom authorizers that directly access cached entries. + 2. The default implementation cannot integrate with any audit logging included in the + authorizer implementation. + 3. The default implementation does not support any custom authorizer configs or other access + rules apart from ACLs.
        +
        +
        Parameters:
        +
        requestContext - Request context including request resourceType, security protocol and listener name
        +
        op - The ACL operation to check
        +
        resourceType - The resource type to check
        +
        Returns:
        +
        Return AuthorizationResult.ALLOWED if the caller is authorized + to perform the given ACL operation on at least one resource of the + given type. Return AuthorizationResult.DENIED otherwise.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/authorizer/AuthorizerServerInfo.html b/static/41/javadoc/org/apache/kafka/server/authorizer/AuthorizerServerInfo.html new file mode 100644 index 000000000..be7134cf6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/authorizer/AuthorizerServerInfo.html @@ -0,0 +1,178 @@ + + + + +AuthorizerServerInfo (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface AuthorizerServerInfo

    +
    +
    +
    +
    public interface AuthorizerServerInfo
    +
    Runtime broker configuration metadata provided to authorizers during start up.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        clusterResource

        +
        ClusterResource clusterResource()
        +
        Returns cluster metadata for the broker running this authorizer including cluster id.
        +
        +
      • +
      • +
        +

        brokerId

        +
        int brokerId()
        +
        Returns broker id. This may be a generated broker id if `broker.id` was not configured.
        +
        +
      • +
      • +
        +

        endpoints

        +
        Collection<Endpoint> endpoints()
        +
        Returns endpoints for all listeners including the advertised host and port to which + the listener is bound.
        +
        +
      • +
      • +
        +

        interBrokerEndpoint

        +
        Endpoint interBrokerEndpoint()
        +
        Returns the inter-broker endpoint. This is one of the endpoints returned by endpoints().
        +
        +
      • +
      • +
        +

        earlyStartListeners

        +
        Collection<String> earlyStartListeners()
        +
        Returns the configured early start listeners.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/authorizer/package-summary.html b/static/41/javadoc/org/apache/kafka/server/authorizer/package-summary.html new file mode 100644 index 000000000..32e876425 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/authorizer/package-summary.html @@ -0,0 +1,114 @@ + + + + +org.apache.kafka.server.authorizer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.server.authorizer

    +
    +
    +
    package org.apache.kafka.server.authorizer
    +
    +
    Provides pluggable interface for performing authorization on a Kafka server.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/authorizer/package-tree.html b/static/41/javadoc/org/apache/kafka/server/authorizer/package-tree.html new file mode 100644 index 000000000..e3a9b3b41 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/authorizer/package-tree.html @@ -0,0 +1,109 @@ + + + + +org.apache.kafka.server.authorizer Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.server.authorizer

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/log/remote/storage/LogSegmentData.html b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/LogSegmentData.html new file mode 100644 index 000000000..fa4ace725 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/LogSegmentData.html @@ -0,0 +1,291 @@ + + + + +LogSegmentData (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class LogSegmentData

    +
    +
    java.lang.Object +
    org.apache.kafka.server.log.remote.storage.LogSegmentData
    +
    +
    +
    +
    public class LogSegmentData +extends Object
    +
    This represents all the required data and indexes for a specific log segment that needs to be stored in the remote + storage. This is passed with RemoteStorageManager.copyLogSegmentData(RemoteLogSegmentMetadata, LogSegmentData) + while copying a specific log segment to the remote storage.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        LogSegmentData

        +
        public LogSegmentData(Path logSegment, + Path offsetIndex, + Path timeIndex, + Optional<Path> transactionIndex, + Path producerSnapshotIndex, + ByteBuffer leaderEpochIndex)
        +
        Creates a LogSegmentData instance with data and indexes.
        +
        +
        Parameters:
        +
        logSegment - actual log segment file
        +
        offsetIndex - offset index file
        +
        timeIndex - time index file
        +
        transactionIndex - transaction index file, which can be null
        +
        producerSnapshotIndex - producer snapshot until this segment
        +
        leaderEpochIndex - leader-epoch-index until this segment
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        logSegment

        +
        public Path logSegment()
        +
        +
        Returns:
        +
        Log segment file of this segment.
        +
        +
        +
      • +
      • +
        +

        offsetIndex

        +
        public Path offsetIndex()
        +
        +
        Returns:
        +
        Offset index file.
        +
        +
        +
      • +
      • +
        +

        timeIndex

        +
        public Path timeIndex()
        +
        +
        Returns:
        +
        Time index file of this segment.
        +
        +
        +
      • +
      • +
        +

        transactionIndex

        +
        public Optional<Path> transactionIndex()
        +
        +
        Returns:
        +
        Transaction index file of this segment if it exists.
        +
        +
        +
      • +
      • +
        +

        producerSnapshotIndex

        +
        public Path producerSnapshotIndex()
        +
        +
        Returns:
        +
        Producer snapshot file until this segment.
        +
        +
        +
      • +
      • +
        +

        leaderEpochIndex

        +
        public ByteBuffer leaderEpochIndex()
        +
        +
        Returns:
        +
        Leader epoch index until this segment.
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteLogMetadata.html b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteLogMetadata.html new file mode 100644 index 000000000..d165cfe0c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteLogMetadata.html @@ -0,0 +1,167 @@ + + + + +RemoteLogMetadata (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class RemoteLogMetadata

    +
    +
    java.lang.Object +
    org.apache.kafka.server.log.remote.storage.RemoteLogMetadata
    +
    +
    +
    +
    Direct Known Subclasses:
    +
    RemoteLogSegmentMetadata, RemoteLogSegmentMetadataUpdate, RemotePartitionDeleteMetadata
    +
    +
    +
    public abstract class RemoteLogMetadata +extends Object
    +
    Base class for remote log metadata objects like RemoteLogSegmentMetadata, RemoteLogSegmentMetadataUpdate, + and RemotePartitionDeleteMetadata.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        eventTimestampMs

        +
        public long eventTimestampMs()
        +
        +
        Returns:
        +
        Epoch time in milliseconds at which this event is occurred.
        +
        +
        +
      • +
      • +
        +

        brokerId

        +
        public int brokerId()
        +
        +
        Returns:
        +
        Broker id from which this event is generated.
        +
        +
        +
      • +
      • +
        +

        topicIdPartition

        +
        public abstract TopicIdPartition topicIdPartition()
        +
        +
        Returns:
        +
        TopicIdPartition for which this event is generated.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteLogMetadataManager.html b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteLogMetadataManager.html new file mode 100644 index 000000000..fad110de1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteLogMetadataManager.html @@ -0,0 +1,482 @@ + + + + +RemoteLogMetadataManager (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface RemoteLogMetadataManager

    +
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Closeable, Configurable
    +
    +
    +
    public interface RemoteLogMetadataManager +extends Configurable, Closeable
    +
    This interface provides storing and fetching remote log segment metadata with strongly consistent semantics. +

    + This class can be plugged in to Kafka cluster by adding the implementation class as + remote.log.metadata.manager.class.name property value. There is an inbuilt implementation backed by + topic storage in the local cluster. This is used as the default implementation if + remote.log.metadata.manager.class.name is not configured. +

    +

    + remote.log.metadata.manager.class.path property is about the class path of the RemoteLogMetadataManager + implementation. If specified, the RemoteLogMetadataManager implementation and its dependent libraries will be loaded + by a dedicated classloader which searches this class path before the Kafka broker class path. The syntax of this + parameter is same with the standard Java class path string. +

    +

    + remote.log.metadata.manager.listener.name property is about listener name of the local broker to which + it should get connected if needed by RemoteLogMetadataManager implementation. +

    + "cluster.id", "broker.id" and all other properties prefixed with the config: "remote.log.metadata.manager.impl.prefix" + (default value is "rlmm.config.") are passed when Configurable.configure(Map) is invoked on this instance. +

    + + Implement Monitorable to enable the manager to register metrics. + The following tags are automatically added to all metrics registered: config set to + remote.log.metadata.manager.class.name, and class set to the RemoteLogMetadataManager class name.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        addRemoteLogSegmentMetadata

        +
        CompletableFuture<Void> addRemoteLogSegmentMetadata(RemoteLogSegmentMetadata remoteLogSegmentMetadata) + throws RemoteStorageException
        +
        This method is used to add RemoteLogSegmentMetadata asynchronously with the containing RemoteLogSegmentId into RemoteLogMetadataManager. +

        + RemoteLogSegmentMetadata is identified by RemoteLogSegmentId and it should have the initial state which is RemoteLogSegmentState.COPY_SEGMENT_STARTED. +

        + updateRemoteLogSegmentMetadata(RemoteLogSegmentMetadataUpdate) should be used to update an existing RemoteLogSegmentMetadata.

        +
        +
        Parameters:
        +
        remoteLogSegmentMetadata - metadata about the remote log segment.
        +
        Returns:
        +
        a CompletableFuture which will complete once this operation is finished.
        +
        Throws:
        +
        RemoteStorageException - if there are any storage related errors occurred.
        +
        IllegalArgumentException - if the given metadata instance does not have the state as RemoteLogSegmentState.COPY_SEGMENT_STARTED
        +
        +
        +
      • +
      • +
        +

        updateRemoteLogSegmentMetadata

        +
        CompletableFuture<Void> updateRemoteLogSegmentMetadata(RemoteLogSegmentMetadataUpdate remoteLogSegmentMetadataUpdate) + throws RemoteStorageException
        +
        This method is used to update the RemoteLogSegmentMetadata asynchronously. Currently, it allows to update with the new + state based on the life cycle of the segment. It can go through the below state transitions. +

        +

        + +---------------------+            +----------------------+
        + |COPY_SEGMENT_STARTED |----------->|COPY_SEGMENT_FINISHED |
        + +-------------------+-+            +--+-------------------+
        +                     |                 |
        +                     |                 |
        +                     v                 v
        +                  +--+-----------------+-+
        +                  |DELETE_SEGMENT_STARTED|
        +                  +-----------+----------+
        +                              |
        +                              |
        +                              v
        +                  +-----------+-----------+
        +                  |DELETE_SEGMENT_FINISHED|
        +                  +-----------------------+
        + 
        +

        + RemoteLogSegmentState.COPY_SEGMENT_STARTED - This state indicates that the segment copying to remote storage is started but not yet finished. + RemoteLogSegmentState.COPY_SEGMENT_FINISHED - This state indicates that the segment copying to remote storage is finished. +
        + The leader broker copies the log segments to the remote storage and puts the remote log segment metadata with the + state as “COPY_SEGMENT_STARTED” and updates the state as “COPY_SEGMENT_FINISHED” once the copy is successful. +

        + RemoteLogSegmentState.DELETE_SEGMENT_STARTED - This state indicates that the segment deletion is started but not yet finished. + RemoteLogSegmentState.DELETE_SEGMENT_FINISHED - This state indicates that the segment is deleted successfully. +
        + Leader partitions publish both the above delete segment events when remote log retention is reached for the + respective segments. Remote Partition Removers also publish these events when a segment is deleted as part of + the remote partition deletion.
        +
        +
        Parameters:
        +
        remoteLogSegmentMetadataUpdate - update of the remote log segment metadata.
        +
        Returns:
        +
        a CompletableFuture which will complete once this operation is finished.
        +
        Throws:
        +
        RemoteStorageException - if there are any storage related errors occurred.
        +
        RemoteResourceNotFoundException - when there are no resources associated with the given remoteLogSegmentMetadataUpdate.
        +
        IllegalArgumentException - if the given metadata instance has the state as RemoteLogSegmentState.COPY_SEGMENT_STARTED
        +
        +
        +
      • +
      • +
        +

        remoteLogSegmentMetadata

        +
        Optional<RemoteLogSegmentMetadata> remoteLogSegmentMetadata(TopicIdPartition topicIdPartition, + int epochForOffset, + long offset) + throws RemoteStorageException
        +
        Returns RemoteLogSegmentMetadata if it exists for the given topic partition containing the offset with + the given leader-epoch for the offset, else returns Optional.empty().
        +
        +
        Parameters:
        +
        topicIdPartition - topic partition
        +
        epochForOffset - leader epoch for the given offset
        +
        offset - offset
        +
        Returns:
        +
        the requested remote log segment metadata if it exists.
        +
        Throws:
        +
        RemoteStorageException - if there are any storage related errors occurred.
        +
        +
        +
      • +
      • +
        +

        highestOffsetForEpoch

        +
        Optional<Long> highestOffsetForEpoch(TopicIdPartition topicIdPartition, + int leaderEpoch) + throws RemoteStorageException
        +
        Returns the highest log offset of topic partition for the given leader epoch in remote storage. This is used by + remote log management subsystem to know up to which offset the segments have been copied to remote storage for + a given leader epoch.
        +
        +
        Parameters:
        +
        topicIdPartition - topic partition
        +
        leaderEpoch - leader epoch
        +
        Returns:
        +
        the requested highest log offset if exists.
        +
        Throws:
        +
        RemoteStorageException - if there are any storage related errors occurred.
        +
        +
        +
      • +
      • +
        +

        putRemotePartitionDeleteMetadata

        +
        CompletableFuture<Void> putRemotePartitionDeleteMetadata(RemotePartitionDeleteMetadata remotePartitionDeleteMetadata) + throws RemoteStorageException
        +
        This method is used to update the metadata about remote partition delete event asynchronously. Currently, it allows updating the + state (RemotePartitionDeleteState) of a topic partition in remote metadata storage. Controller invokes + this method with RemotePartitionDeleteMetadata having state as RemotePartitionDeleteState.DELETE_PARTITION_MARKED. + So, remote partition removers can act on this event to clean the respective remote log segments of the partition. +


        + In the case of default RLMM implementation, remote partition remover processes RemotePartitionDeleteState.DELETE_PARTITION_MARKED +

        +
        +
        Parameters:
        +
        remotePartitionDeleteMetadata - update on delete state of a partition.
        +
        Returns:
        +
        a CompletableFuture which will complete once this operation is finished.
        +
        Throws:
        +
        RemoteStorageException - if there are any storage related errors occurred.
        +
        RemoteResourceNotFoundException - when there are no resources associated with the given remotePartitionDeleteMetadata.
        +
        +
        +
      • +
      • +
        +

        listRemoteLogSegments

        +
        Iterator<RemoteLogSegmentMetadata> listRemoteLogSegments(TopicIdPartition topicIdPartition) + throws RemoteStorageException
        +
        Returns all the remote log segment metadata of the given topicIdPartition. +

        + Remote Partition Removers uses this method to fetch all the segments for a given topic partition, so that they + can delete them.

        +
        +
        Returns:
        +
        Iterator of all the remote log segment metadata for the given topic partition.
        +
        Throws:
        +
        RemoteStorageException
        +
        +
        +
      • +
      • +
        +

        listRemoteLogSegments

        +
        Iterator<RemoteLogSegmentMetadata> listRemoteLogSegments(TopicIdPartition topicIdPartition, + int leaderEpoch) + throws RemoteStorageException
        +
        Returns iterator of remote log segment metadata, sorted by RemoteLogSegmentMetadata.startOffset() in + ascending order which contains the given leader epoch. This is used by remote log retention management subsystem + to fetch the segment metadata for a given leader epoch.
        +
        +
        Parameters:
        +
        topicIdPartition - topic partition
        +
        leaderEpoch - leader epoch
        +
        Returns:
        +
        Iterator of remote segments, sorted by start offset in ascending order.
        +
        Throws:
        +
        RemoteStorageException
        +
        +
        +
      • +
      • +
        +

        onPartitionLeadershipChanges

        +
        void onPartitionLeadershipChanges(Set<TopicIdPartition> leaderPartitions, + Set<TopicIdPartition> followerPartitions)
        +
        This method is invoked only when there are changes in leadership of the topic partitions that this broker is + responsible for.
        +
        +
        Parameters:
        +
        leaderPartitions - partitions that have become leaders on this broker.
        +
        followerPartitions - partitions that have become followers on this broker.
        +
        +
        +
      • +
      • +
        +

        onStopPartitions

        +
        void onStopPartitions(Set<TopicIdPartition> partitions)
        +
        This method is invoked only when the topic partitions are stopped on this broker. This can happen when a + partition is emigrated to other broker or a partition is deleted.
        +
        +
        Parameters:
        +
        partitions - topic partitions that have been stopped.
        +
        +
        +
      • +
      • +
        +

        remoteLogSize

        +
        long remoteLogSize(TopicIdPartition topicIdPartition, + int leaderEpoch) + throws RemoteStorageException
        +
        Returns total size of the log for the given leader epoch in remote storage.
        +
        +
        Parameters:
        +
        topicIdPartition - topic partition for which size needs to be calculated.
        +
        leaderEpoch - Size will only include segments belonging to this epoch.
        +
        Returns:
        +
        Total size of the log stored in remote storage in bytes.
        +
        Throws:
        +
        RemoteStorageException
        +
        +
        +
      • +
      • +
        +

        nextSegmentWithTxnIndex

        +
        default Optional<RemoteLogSegmentMetadata> nextSegmentWithTxnIndex(TopicIdPartition topicIdPartition, + int epoch, + long offset) + throws RemoteStorageException
        +
        Returns the next segment metadata that contains the aborted transaction entries for the given topic partition, epoch and offset. +
          +
        • The default implementation returns the segment metadata that matches the given epoch and offset + irrespective of the presence of the transaction index.
        • +
        • The custom implementation can optimize by returning the next segment metadata that contains the txn index + in the given epoch. If there are no segments with txn index in the given epoch, then return empty.
        • +
        +
        +
        Parameters:
        +
        topicIdPartition - topic partition to search for.
        +
        epoch - leader epoch for the given offset.
        +
        offset - offset
        +
        Returns:
        +
        The next segment metadata. The transaction index may or may not exist in the returned segment metadata + which depends on the RLMM plugin implementation. The caller of this method handles for both the cases.
        +
        Throws:
        +
        RemoteStorageException - if there are any storage related errors occurred.
        +
        +
        +
      • +
      • +
        +

        isReady

        +
        default boolean isReady(TopicIdPartition topicIdPartition)
        +
        Denotes whether the partition metadata is ready to serve.
        +
        +
        Parameters:
        +
        topicIdPartition - topic partition
        +
        Returns:
        +
        True if the partition is ready to serve for remote storage operations.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteLogSegmentId.html b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteLogSegmentId.html new file mode 100644 index 000000000..7f55e22f9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteLogSegmentId.html @@ -0,0 +1,237 @@ + + + + +RemoteLogSegmentId (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class RemoteLogSegmentId

    +
    +
    java.lang.Object +
    org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId
    +
    +
    +
    +
    public class RemoteLogSegmentId +extends Object
    +
    This class represents a universally unique identifier associated to a topic partition's log segment. This will be + regenerated for every attempt of copying a specific log segment in RemoteStorageManager.copyLogSegmentData(RemoteLogSegmentMetadata, LogSegmentData). + Once it is stored in remote storage, it is used to access that segment later from remote log metadata storage.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        RemoteLogSegmentId

        +
        public RemoteLogSegmentId(TopicIdPartition topicIdPartition, + Uuid id)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        generateNew

        +
        public static RemoteLogSegmentId generateNew(TopicIdPartition topicIdPartition)
        +
        Creates a new RemoteLogSegmentId for the provided TopicIdPartition with a random Uuid.
        +
        +
        Parameters:
        +
        topicIdPartition - TopicIdPartition of this remote log segment.
        +
        Returns:
        +
        generated RemoteLogSegmentId.
        +
        +
        +
      • +
      • +
        +

        topicIdPartition

        +
        public TopicIdPartition topicIdPartition()
        +
        +
        Returns:
        +
        TopicIdPartition of this remote log segment.
        +
        +
        +
      • +
      • +
        +

        id

        +
        public Uuid id()
        +
        +
        Returns:
        +
        Universally Unique Id of this remote log segment.
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteLogSegmentMetadata.CustomMetadata.html b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteLogSegmentMetadata.CustomMetadata.html new file mode 100644 index 000000000..239310075 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteLogSegmentMetadata.CustomMetadata.html @@ -0,0 +1,214 @@ + + + + +RemoteLogSegmentMetadata.CustomMetadata (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class RemoteLogSegmentMetadata.CustomMetadata

    +
    +
    java.lang.Object +
    org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata.CustomMetadata
    +
    +
    +
    +
    Enclosing class:
    +
    RemoteLogSegmentMetadata
    +
    +
    +
    public static class RemoteLogSegmentMetadata.CustomMetadata +extends Object
    +
    Custom metadata from a RemoteStorageManager plugin. + +

    The content of these metadata is RSM-dependent and is opaque to the broker, i.e. + it's not interpreted, only stored along with the rest of the remote log segment metadata. + +

    Examples of such metadata are: +

      +
    1. The storage path on the remote storage in case it's nondeterministic or version-dependent.
    2. +
    3. The actual size of the all files related to the segment on the remote storage.
    4. +
    + +

    The maximum size the broker accepts and stores is controlled by + the remote.log.metadata.custom.metadata.max.bytes setting.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        CustomMetadata

        +
        public CustomMetadata(byte[] value)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        value

        +
        public byte[] value()
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteLogSegmentMetadata.html b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteLogSegmentMetadata.html new file mode 100644 index 000000000..98a0eb33e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteLogSegmentMetadata.html @@ -0,0 +1,541 @@ + + + + +RemoteLogSegmentMetadata (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class RemoteLogSegmentMetadata

    +
    +
    java.lang.Object +
    org.apache.kafka.server.log.remote.storage.RemoteLogMetadata +
    org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata
    +
    +
    +
    +
    +
    public class RemoteLogSegmentMetadata +extends RemoteLogMetadata
    +
    It describes the metadata about a topic partition's remote log segment in the remote storage. This is uniquely + represented with RemoteLogSegmentId. +

    + New instance is always created with the state as RemoteLogSegmentState.COPY_SEGMENT_STARTED. This can be + updated by applying RemoteLogSegmentMetadataUpdate for the respective RemoteLogSegmentId of the + RemoteLogSegmentMetadata.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        RemoteLogSegmentMetadata

        +
        public RemoteLogSegmentMetadata(RemoteLogSegmentId remoteLogSegmentId, + long startOffset, + long endOffset, + long maxTimestampMs, + int brokerId, + long eventTimestampMs, + int segmentSizeInBytes, + Optional<RemoteLogSegmentMetadata.CustomMetadata> customMetadata, + RemoteLogSegmentState state, + Map<Integer,Long> segmentLeaderEpochs)
        +
        Creates an instance with the given metadata of remote log segment. +

        + segmentLeaderEpochs can not be empty. If all the records in this segment belong to the same leader epoch + then it should have an entry with epoch mapping to start-offset of this segment.

        +
        +
        Parameters:
        +
        remoteLogSegmentId - Universally unique remote log segment id.
        +
        startOffset - Start offset of this segment (inclusive).
        +
        endOffset - End offset of this segment (inclusive).
        +
        maxTimestampMs - Maximum timestamp in milli seconds in this segment.
        +
        brokerId - Broker id from which this event is generated.
        +
        eventTimestampMs - Epoch time in milli seconds at which the remote log segment is copied to the remote tier storage.
        +
        segmentSizeInBytes - Size of this segment in bytes.
        +
        customMetadata - Custom metadata.
        +
        state - State of the respective segment of remoteLogSegmentId.
        +
        segmentLeaderEpochs - leader epochs occurred within this segment.
        +
        +
        +
      • +
      • +
        +

        RemoteLogSegmentMetadata

        +
        public RemoteLogSegmentMetadata(RemoteLogSegmentId remoteLogSegmentId, + long startOffset, + long endOffset, + long maxTimestampMs, + int brokerId, + long eventTimestampMs, + int segmentSizeInBytes, + Optional<RemoteLogSegmentMetadata.CustomMetadata> customMetadata, + RemoteLogSegmentState state, + Map<Integer,Long> segmentLeaderEpochs, + boolean txnIdxEmpty)
        +
        Creates an instance with the given metadata of remote log segment. +

        + segmentLeaderEpochs can not be empty. If all the records in this segment belong to the same leader epoch + then it should have an entry with epoch mapping to start-offset of this segment.

        +
        +
        Parameters:
        +
        remoteLogSegmentId - Universally unique remote log segment id.
        +
        startOffset - Start offset of this segment (inclusive).
        +
        endOffset - End offset of this segment (inclusive).
        +
        maxTimestampMs - Maximum timestamp in milli seconds in this segment.
        +
        brokerId - Broker id from which this event is generated.
        +
        eventTimestampMs - Epoch time in milli seconds at which the remote log segment is copied to the remote tier storage.
        +
        segmentSizeInBytes - Size of this segment in bytes.
        +
        customMetadata - Custom metadata.
        +
        state - State of the respective segment of remoteLogSegmentId.
        +
        segmentLeaderEpochs - leader epochs occurred within this segment.
        +
        txnIdxEmpty - True if the transaction index is empty, false otherwise.
        +
        +
        +
      • +
      • +
        +

        RemoteLogSegmentMetadata

        +
        public RemoteLogSegmentMetadata(RemoteLogSegmentId remoteLogSegmentId, + long startOffset, + long endOffset, + long maxTimestampMs, + int brokerId, + long eventTimestampMs, + int segmentSizeInBytes, + Map<Integer,Long> segmentLeaderEpochs)
        +
        Creates an instance with the given metadata of remote log segment and its state as RemoteLogSegmentState.COPY_SEGMENT_STARTED. +

        + segmentLeaderEpochs can not be empty. If all the records in this segment belong to the same leader epoch + then it should have an entry with epoch mapping to start-offset of this segment.

        +
        +
        Parameters:
        +
        remoteLogSegmentId - Universally unique remote log segment id.
        +
        startOffset - Start offset of this segment (inclusive).
        +
        endOffset - End offset of this segment (inclusive).
        +
        maxTimestampMs - Maximum timestamp in this segment
        +
        brokerId - Broker id from which this event is generated.
        +
        eventTimestampMs - Epoch time in milli seconds at which the remote log segment is copied to the remote tier storage.
        +
        segmentSizeInBytes - Size of this segment in bytes.
        +
        segmentLeaderEpochs - leader epochs occurred within this segment
        +
        +
        +
      • +
      • +
        +

        RemoteLogSegmentMetadata

        +
        public RemoteLogSegmentMetadata(RemoteLogSegmentId remoteLogSegmentId, + long startOffset, + long endOffset, + long maxTimestampMs, + int brokerId, + long eventTimestampMs, + int segmentSizeInBytes, + Map<Integer,Long> segmentLeaderEpochs, + boolean txnIdxEmpty)
        +
        Creates an instance with the given metadata of remote log segment and its state as RemoteLogSegmentState.COPY_SEGMENT_STARTED. +

        + segmentLeaderEpochs can not be empty. If all the records in this segment belong to the same leader epoch + then it should have an entry with epoch mapping to start-offset of this segment.

        +
        +
        Parameters:
        +
        remoteLogSegmentId - Universally unique remote log segment id.
        +
        startOffset - Start offset of this segment (inclusive).
        +
        endOffset - End offset of this segment (inclusive).
        +
        maxTimestampMs - Maximum timestamp in this segment
        +
        brokerId - Broker id from which this event is generated.
        +
        eventTimestampMs - Epoch time in milli seconds at which the remote log segment is copied to the remote tier storage.
        +
        segmentSizeInBytes - Size of this segment in bytes.
        +
        segmentLeaderEpochs - leader epochs occurred within this segment
        +
        txnIdxEmpty - True if the transaction index is empty, false otherwise.
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        remoteLogSegmentId

        +
        public RemoteLogSegmentId remoteLogSegmentId()
        +
        +
        Returns:
        +
        unique id of this segment.
        +
        +
        +
      • +
      • +
        +

        startOffset

        +
        public long startOffset()
        +
        +
        Returns:
        +
        Start offset of this segment (inclusive).
        +
        +
        +
      • +
      • +
        +

        endOffset

        +
        public long endOffset()
        +
        +
        Returns:
        +
        End offset of this segment (inclusive).
        +
        +
        +
      • +
      • +
        +

        segmentSizeInBytes

        +
        public int segmentSizeInBytes()
        +
        +
        Returns:
        +
        Total size of this segment in bytes.
        +
        +
        +
      • +
      • +
        +

        maxTimestampMs

        +
        public long maxTimestampMs()
        +
        +
        Returns:
        +
        Maximum timestamp in milli seconds of a record within this segment.
        +
        +
        +
      • +
      • +
        +

        segmentLeaderEpochs

        +
        public NavigableMap<Integer,Long> segmentLeaderEpochs()
        +
        +
        Returns:
        +
        Map of leader epoch vs offset for the records available in this segment.
        +
        +
        +
      • +
      • +
        +

        customMetadata

        + +
        +
        Returns:
        +
        Custom metadata.
        +
        +
        +
      • +
      • +
        +

        state

        +
        public RemoteLogSegmentState state()
        + +
        +
      • +
      • +
        +

        isTxnIdxEmpty

        +
        public boolean isTxnIdxEmpty()
        +
        If true indicates that the transaction index is empty.
        +
        +
        Returns:
        +
        True if the Transaction index is empty, false otherwise.
        +
        +
        +
      • +
      • +
        +

        createWithUpdates

        +
        public RemoteLogSegmentMetadata createWithUpdates(RemoteLogSegmentMetadataUpdate rlsmUpdate)
        +
        Creates a new RemoteLogSegmentMetadata applying the given rlsmUpdate on this instance. This method will + not update this instance.
        +
        +
        Parameters:
        +
        rlsmUpdate - update to be applied.
        +
        Returns:
        +
        a new instance created by applying the given update on this instance.
        +
        +
        +
      • +
      • +
        +

        topicIdPartition

        +
        public TopicIdPartition topicIdPartition()
        +
        +
        Specified by:
        +
        topicIdPartition in class RemoteLogMetadata
        +
        Returns:
        +
        TopicIdPartition for which this event is generated.
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteLogSegmentMetadataUpdate.html b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteLogSegmentMetadataUpdate.html new file mode 100644 index 000000000..246e5fea6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteLogSegmentMetadataUpdate.html @@ -0,0 +1,265 @@ + + + + +RemoteLogSegmentMetadataUpdate (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class RemoteLogSegmentMetadataUpdate

    +
    +
    java.lang.Object +
    org.apache.kafka.server.log.remote.storage.RemoteLogMetadata +
    org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate
    +
    +
    +
    +
    +
    public class RemoteLogSegmentMetadataUpdate +extends RemoteLogMetadata
    +
    It describes the metadata update about the log segment in the remote storage. This is currently used to update the + state of the remote log segment by using RemoteLogMetadataManager.updateRemoteLogSegmentMetadata(RemoteLogSegmentMetadataUpdate). + This also includes the timestamp of this event.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        RemoteLogSegmentMetadataUpdate

        +
        public RemoteLogSegmentMetadataUpdate(RemoteLogSegmentId remoteLogSegmentId, + long eventTimestampMs, + Optional<RemoteLogSegmentMetadata.CustomMetadata> customMetadata, + RemoteLogSegmentState state, + int brokerId)
        +
        +
        Parameters:
        +
        remoteLogSegmentId - Universally unique remote log segment id.
        +
        eventTimestampMs - Epoch time in milli seconds at which the remote log segment is copied to the remote tier storage.
        +
        customMetadata - Custom metadata.
        +
        state - State of the remote log segment.
        +
        brokerId - Broker id from which this event is generated.
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteLogSegmentState.html b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteLogSegmentState.html new file mode 100644 index 000000000..9b3c78b50 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteLogSegmentState.html @@ -0,0 +1,297 @@ + + + + +RemoteLogSegmentState (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class RemoteLogSegmentState

    +
    +
    java.lang.Object +
    java.lang.Enum<RemoteLogSegmentState> +
    org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<RemoteLogSegmentState>, Constable
    +
    +
    +
    public enum RemoteLogSegmentState +extends Enum<RemoteLogSegmentState>
    +
    This enum indicates the state of the remote log segment. This will be based on the action executed on this + segment by the remote log service implementation. +

    + It goes through the below state transitions. Self transition is treated as valid. This allows updating with the + same state in case of retries and failover. +

    +

    + +---------------------+            +----------------------+
    + |COPY_SEGMENT_STARTED |----------> |COPY_SEGMENT_FINISHED |
    + +-------------------+-+            +--+-------------------+
    +                     |                 |
    +                     |                 |
    +                     v                 v
    +                  +--+-----------------+-+
    +                  |DELETE_SEGMENT_STARTED|
    +                  +-----------+----------+
    +                              |
    +                              |
    +                              v
    +                  +-----------+-----------+
    +                  |DELETE_SEGMENT_FINISHED|
    +                  +-----------------------+
    + 
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      +
        +
      • +
        +

        COPY_SEGMENT_STARTED

        +
        public static final RemoteLogSegmentState COPY_SEGMENT_STARTED
        +
        This state indicates that the segment copying to remote storage is started but not yet finished.
        +
        +
      • +
      • +
        +

        COPY_SEGMENT_FINISHED

        +
        public static final RemoteLogSegmentState COPY_SEGMENT_FINISHED
        +
        This state indicates that the segment copying to remote storage is finished.
        +
        +
      • +
      • +
        +

        DELETE_SEGMENT_STARTED

        +
        public static final RemoteLogSegmentState DELETE_SEGMENT_STARTED
        +
        This state indicates that the segment deletion is started but not yet finished.
        +
        +
      • +
      • +
        +

        DELETE_SEGMENT_FINISHED

        +
        public static final RemoteLogSegmentState DELETE_SEGMENT_FINISHED
        +
        This state indicates that the segment is deleted successfully.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static RemoteLogSegmentState[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static RemoteLogSegmentState valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        id

        +
        public byte id()
        +
        +
      • +
      • +
        +

        forId

        +
        public static RemoteLogSegmentState forId(byte id)
        +
        +
      • +
      • +
        +

        isValidTransition

        +
        public static boolean isValidTransition(RemoteLogSegmentState srcState, + RemoteLogSegmentState targetState)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemotePartitionDeleteMetadata.html b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemotePartitionDeleteMetadata.html new file mode 100644 index 000000000..0b1dbd731 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemotePartitionDeleteMetadata.html @@ -0,0 +1,238 @@ + + + + +RemotePartitionDeleteMetadata (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class RemotePartitionDeleteMetadata

    +
    +
    java.lang.Object +
    org.apache.kafka.server.log.remote.storage.RemoteLogMetadata +
    org.apache.kafka.server.log.remote.storage.RemotePartitionDeleteMetadata
    +
    +
    +
    +
    +
    public class RemotePartitionDeleteMetadata +extends RemoteLogMetadata
    +
    This class represents the metadata about the remote partition. It can be created/updated with RemoteLogMetadataManager.putRemotePartitionDeleteMetadata(RemotePartitionDeleteMetadata). + Possible state transitions are mentioned at RemotePartitionDeleteState.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        RemotePartitionDeleteMetadata

        +
        public RemotePartitionDeleteMetadata(TopicIdPartition topicIdPartition, + RemotePartitionDeleteState state, + long eventTimestampMs, + int brokerId)
        +
        Creates an instance of this class with the given metadata.
        +
        +
        Parameters:
        +
        topicIdPartition - topic partition for which this event is meant for.
        +
        state - State of the remote topic partition.
        +
        eventTimestampMs - Epoch time in milli seconds at which this event is occurred.
        +
        brokerId - Id of the broker in which this event is raised.
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemotePartitionDeleteState.html b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemotePartitionDeleteState.html new file mode 100644 index 000000000..5c73bace6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemotePartitionDeleteState.html @@ -0,0 +1,286 @@ + + + + +RemotePartitionDeleteState (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class RemotePartitionDeleteState

    +
    +
    java.lang.Object +
    java.lang.Enum<RemotePartitionDeleteState> +
    org.apache.kafka.server.log.remote.storage.RemotePartitionDeleteState
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<RemotePartitionDeleteState>, Constable
    +
    +
    +
    public enum RemotePartitionDeleteState +extends Enum<RemotePartitionDeleteState>
    +
    This enum indicates the deletion state of the remote topic partition. This will be based on the action executed on this + partition by the remote log service implementation. + State transitions are mentioned below. Self transition is treated as valid. This allows updating with the + same state in case of retries and failover. +

    +

    + +-------------------------+
    + |DELETE_PARTITION_MARKED  |
    + +-----------+-------------+
    +             |
    +             |
    + +-----------v--------------+
    + |DELETE_PARTITION_STARTED  |
    + +-----------+--------------+
    +             |
    +             |
    + +-----------v--------------+
    + |DELETE_PARTITION_FINISHED |
    + +--------------------------+
    + 
    +

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      +
        +
      • +
        +

        DELETE_PARTITION_MARKED

        +
        public static final RemotePartitionDeleteState DELETE_PARTITION_MARKED
        +
        This is used when a topic/partition is marked for delete by the controller. + That means, all its remote log segments are eligible for deletion so that remote partition removers can + start deleting them.
        +
        +
      • +
      • +
        +

        DELETE_PARTITION_STARTED

        +
        public static final RemotePartitionDeleteState DELETE_PARTITION_STARTED
        +
        This state indicates that the partition deletion is started but not yet finished.
        +
        +
      • +
      • +
        +

        DELETE_PARTITION_FINISHED

        +
        public static final RemotePartitionDeleteState DELETE_PARTITION_FINISHED
        +
        This state indicates that the partition is deleted successfully.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static RemotePartitionDeleteState[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static RemotePartitionDeleteState valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        id

        +
        public byte id()
        +
        +
      • +
      • +
        +

        forId

        +
        public static RemotePartitionDeleteState forId(byte id)
        +
        +
      • +
      • +
        +

        isValidTransition

        +
        public static boolean isValidTransition(RemotePartitionDeleteState srcState, + RemotePartitionDeleteState targetState)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteResourceNotFoundException.html b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteResourceNotFoundException.html new file mode 100644 index 000000000..2ece0f8ac --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteResourceNotFoundException.html @@ -0,0 +1,170 @@ + + + + +RemoteResourceNotFoundException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class RemoteResourceNotFoundException

    +
    +
    java.lang.Object +
    java.lang.Throwable +
    java.lang.Exception +
    org.apache.kafka.server.log.remote.storage.RemoteStorageException +
    org.apache.kafka.server.log.remote.storage.RemoteResourceNotFoundException
    +
    +
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class RemoteResourceNotFoundException +extends RemoteStorageException
    +
    Exception thrown when a resource is not found on the remote storage. +

    + A resource can be a log segment, any of the indexes or any which was stored in remote storage for a particular log + segment.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        RemoteResourceNotFoundException

        +
        public RemoteResourceNotFoundException(String message)
        +
        +
      • +
      • +
        +

        RemoteResourceNotFoundException

        +
        public RemoteResourceNotFoundException(Throwable cause)
        +
        +
      • +
      • +
        +

        RemoteResourceNotFoundException

        +
        public RemoteResourceNotFoundException(String message, + Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteStorageException.html b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteStorageException.html new file mode 100644 index 000000000..b1e2f2456 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteStorageException.html @@ -0,0 +1,170 @@ + + + + +RemoteStorageException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class RemoteStorageException

    +
    +
    java.lang.Object +
    java.lang.Throwable +
    java.lang.Exception +
    org.apache.kafka.server.log.remote.storage.RemoteStorageException
    +
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    Direct Known Subclasses:
    +
    RemoteResourceNotFoundException
    +
    +
    +
    public class RemoteStorageException +extends Exception
    +
    Exception thrown when there is a remote storage error. This can be used as the base exception by implementors of + RemoteStorageManager or RemoteLogMetadataManager to create extended exceptions.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        RemoteStorageException

        +
        public RemoteStorageException(String message)
        +
        +
      • +
      • +
        +

        RemoteStorageException

        +
        public RemoteStorageException(String message, + Throwable cause)
        +
        +
      • +
      • +
        +

        RemoteStorageException

        +
        public RemoteStorageException(Throwable cause)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteStorageManager.IndexType.html b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteStorageManager.IndexType.html new file mode 100644 index 000000000..f66101c85 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteStorageManager.IndexType.html @@ -0,0 +1,261 @@ + + + + +RemoteStorageManager.IndexType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class RemoteStorageManager.IndexType

    +
    +
    java.lang.Object +
    java.lang.Enum<RemoteStorageManager.IndexType> +
    org.apache.kafka.server.log.remote.storage.RemoteStorageManager.IndexType
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<RemoteStorageManager.IndexType>, Constable
    +
    +
    +
    Enclosing interface:
    +
    RemoteStorageManager
    +
    +
    +
    public static enum RemoteStorageManager.IndexType +extends Enum<RemoteStorageManager.IndexType>
    +
    Type of the index file.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static RemoteStorageManager.IndexType[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static RemoteStorageManager.IndexType valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteStorageManager.html b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteStorageManager.html new file mode 100644 index 000000000..9d34b3d4c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteStorageManager.html @@ -0,0 +1,303 @@ + + + + +RemoteStorageManager (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface RemoteStorageManager

    +
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Closeable, Configurable
    +
    +
    +
    public interface RemoteStorageManager +extends Configurable, Closeable
    +
    This interface provides the lifecycle of remote log segments that includes copy, fetch, and delete from remote + storage. +

    + Each upload or copy of a segment is initiated with RemoteLogSegmentMetadata containing RemoteLogSegmentId + which is universally unique even for the same topic partition and offsets. +

    + RemoteLogSegmentMetadata is stored in RemoteLogMetadataManager before and after copy/delete operations on + RemoteStorageManager with the respective RemoteLogSegmentState. RemoteLogMetadataManager is + responsible for storing and fetching metadata about the remote log segments in a strongly consistent manner. + This allows RemoteStorageManager to have eventual consistency on metadata (although the data is stored + in strongly consistent semantics). +

    + All properties prefixed with the config: "remote.log.storage.manager.impl.prefix" + (default value is "rsm.config.") are passed when Configurable.configure(Map) is invoked on this instance. + + Implement Monitorable to enable the manager to register metrics. + The following tags are automatically added to all metrics registered: config set to + remote.log.storage.manager.class.name, and class set to the RemoteStorageManager class name.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        copyLogSegmentData

        +
        Optional<RemoteLogSegmentMetadata.CustomMetadata> copyLogSegmentData(RemoteLogSegmentMetadata remoteLogSegmentMetadata, + LogSegmentData logSegmentData) + throws RemoteStorageException
        +
        Copies the given LogSegmentData provided for the given remoteLogSegmentMetadata. This includes + log segment and its auxiliary indexes like offset index, time index, transaction index, leader epoch index, and + producer snapshot index. +

        + Invoker of this API should always send a unique id as part of RemoteLogSegmentMetadata.remoteLogSegmentId() + even when it retries to invoke this method for the same log segment data. +

        + This operation is expected to be idempotent. If a copy operation is retried and there is existing content already written, + it should be overwritten, and do not throw RemoteStorageException

        +
        +
        Parameters:
        +
        remoteLogSegmentMetadata - metadata about the remote log segment.
        +
        logSegmentData - data to be copied to tiered storage.
        +
        Returns:
        +
        custom metadata to be added to the segment metadata after copying.
        +
        Throws:
        +
        RemoteStorageException - if there are any errors in storing the data of the segment.
        +
        +
        +
      • +
      • +
        +

        fetchLogSegment

        +
        InputStream fetchLogSegment(RemoteLogSegmentMetadata remoteLogSegmentMetadata, + int startPosition) + throws RemoteStorageException
        +
        Returns the remote log segment data file/object as InputStream for the given RemoteLogSegmentMetadata + starting from the given startPosition. The stream will end at the end of the remote log segment data file/object.
        +
        +
        Parameters:
        +
        remoteLogSegmentMetadata - metadata about the remote log segment.
        +
        startPosition - start position of log segment to be read, inclusive.
        +
        Returns:
        +
        input stream of the requested log segment data.
        +
        Throws:
        +
        RemoteStorageException - if there are any errors while fetching the desired segment.
        +
        RemoteResourceNotFoundException - the requested log segment is not found in the remote storage.
        +
        +
        +
      • +
      • +
        +

        fetchLogSegment

        +
        InputStream fetchLogSegment(RemoteLogSegmentMetadata remoteLogSegmentMetadata, + int startPosition, + int endPosition) + throws RemoteStorageException
        +
        Returns the remote log segment data file/object as InputStream for the given RemoteLogSegmentMetadata + starting from the given startPosition. The stream will end at the smaller of endPosition and the end of the + remote log segment data file/object.
        +
        +
        Parameters:
        +
        remoteLogSegmentMetadata - metadata about the remote log segment.
        +
        startPosition - start position of log segment to be read, inclusive.
        +
        endPosition - end position of log segment to be read, inclusive.
        +
        Returns:
        +
        input stream of the requested log segment data.
        +
        Throws:
        +
        RemoteStorageException - if there are any errors while fetching the desired segment.
        +
        RemoteResourceNotFoundException - the requested log segment is not found in the remote storage.
        +
        +
        +
      • +
      • +
        +

        fetchIndex

        +
        InputStream fetchIndex(RemoteLogSegmentMetadata remoteLogSegmentMetadata, + RemoteStorageManager.IndexType indexType) + throws RemoteStorageException
        +
        Returns the index for the respective log segment of RemoteLogSegmentMetadata. +

        + Note: The transaction index may not exist because of no transactional records. + In this case, it should throw a RemoteResourceNotFoundException, instead of returning null.

        +
        +
        Parameters:
        +
        remoteLogSegmentMetadata - metadata about the remote log segment.
        +
        indexType - type of the index to be fetched for the segment.
        +
        Returns:
        +
        input stream of the requested index.
        +
        Throws:
        +
        RemoteStorageException - if there are any errors while fetching the index.
        +
        RemoteResourceNotFoundException - the requested index is not found in the remote storage
        +
        +
        +
      • +
      • +
        +

        deleteLogSegmentData

        +
        void deleteLogSegmentData(RemoteLogSegmentMetadata remoteLogSegmentMetadata) + throws RemoteStorageException
        +
        Deletes the resources associated with the given remoteLogSegmentMetadata. Deletion is considered as + successful if this call returns successfully without any errors. It will throw RemoteStorageException if + there are any errors in deleting the file. +

        + This operation is expected to be idempotent. If resources are not found, it is not expected to + throw RemoteResourceNotFoundException as it may be already removed from a previous attempt.

        +
        +
        Parameters:
        +
        remoteLogSegmentMetadata - metadata about the remote log segment to be deleted.
        +
        Throws:
        +
        RemoteStorageException - if there are any storage related errors occurred.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteStorageMetrics.html b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteStorageMetrics.html new file mode 100644 index 000000000..4419c660c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/RemoteStorageMetrics.html @@ -0,0 +1,387 @@ + + + + +RemoteStorageMetrics (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class RemoteStorageMetrics

    +
    +
    java.lang.Object +
    org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics
    +
    +
    +
    +
    public class RemoteStorageMetrics +extends Object
    +
    This class contains the metrics related to tiered storage feature, which is to have a centralized + place to store them, so that we can verify all of them easily.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        REMOTE_STORAGE_THREAD_POOL_METRICS

        +
        public static final Set<String> REMOTE_STORAGE_THREAD_POOL_METRICS
        +
        +
      • +
      • +
        +

        REMOTE_COPY_BYTES_PER_SEC_METRIC

        +
        public static final com.yammer.metrics.core.MetricName REMOTE_COPY_BYTES_PER_SEC_METRIC
        +
        +
      • +
      • +
        +

        REMOTE_FETCH_BYTES_PER_SEC_METRIC

        +
        public static final com.yammer.metrics.core.MetricName REMOTE_FETCH_BYTES_PER_SEC_METRIC
        +
        +
      • +
      • +
        +

        REMOTE_FETCH_REQUESTS_PER_SEC_METRIC

        +
        public static final com.yammer.metrics.core.MetricName REMOTE_FETCH_REQUESTS_PER_SEC_METRIC
        +
        +
      • +
      • +
        +

        REMOTE_COPY_REQUESTS_PER_SEC_METRIC

        +
        public static final com.yammer.metrics.core.MetricName REMOTE_COPY_REQUESTS_PER_SEC_METRIC
        +
        +
      • +
      • +
        +

        REMOTE_DELETE_REQUESTS_PER_SEC_METRIC

        +
        public static final com.yammer.metrics.core.MetricName REMOTE_DELETE_REQUESTS_PER_SEC_METRIC
        +
        +
      • +
      • +
        +

        BUILD_REMOTE_LOG_AUX_STATE_REQUESTS_PER_SEC_METRIC

        +
        public static final com.yammer.metrics.core.MetricName BUILD_REMOTE_LOG_AUX_STATE_REQUESTS_PER_SEC_METRIC
        +
        +
      • +
      • +
        +

        FAILED_REMOTE_FETCH_PER_SEC_METRIC

        +
        public static final com.yammer.metrics.core.MetricName FAILED_REMOTE_FETCH_PER_SEC_METRIC
        +
        +
      • +
      • +
        +

        FAILED_REMOTE_COPY_PER_SEC_METRIC

        +
        public static final com.yammer.metrics.core.MetricName FAILED_REMOTE_COPY_PER_SEC_METRIC
        +
        +
      • +
      • +
        +

        REMOTE_LOG_METADATA_COUNT_METRIC

        +
        public static final com.yammer.metrics.core.MetricName REMOTE_LOG_METADATA_COUNT_METRIC
        +
        +
      • +
      • +
        +

        REMOTE_LOG_SIZE_BYTES_METRIC

        +
        public static final com.yammer.metrics.core.MetricName REMOTE_LOG_SIZE_BYTES_METRIC
        +
        +
      • +
      • +
        +

        REMOTE_LOG_SIZE_COMPUTATION_TIME_METRIC

        +
        public static final com.yammer.metrics.core.MetricName REMOTE_LOG_SIZE_COMPUTATION_TIME_METRIC
        +
        +
      • +
      • +
        +

        FAILED_REMOTE_DELETE_PER_SEC_METRIC

        +
        public static final com.yammer.metrics.core.MetricName FAILED_REMOTE_DELETE_PER_SEC_METRIC
        +
        +
      • +
      • +
        +

        FAILED_BUILD_REMOTE_LOG_AUX_STATE_PER_SEC_METRIC

        +
        public static final com.yammer.metrics.core.MetricName FAILED_BUILD_REMOTE_LOG_AUX_STATE_PER_SEC_METRIC
        +
        +
      • +
      • +
        +

        REMOTE_COPY_LAG_BYTES_METRIC

        +
        public static final com.yammer.metrics.core.MetricName REMOTE_COPY_LAG_BYTES_METRIC
        +
        +
      • +
      • +
        +

        REMOTE_COPY_LAG_SEGMENTS_METRIC

        +
        public static final com.yammer.metrics.core.MetricName REMOTE_COPY_LAG_SEGMENTS_METRIC
        +
        +
      • +
      • +
        +

        REMOTE_DELETE_LAG_BYTES_METRIC

        +
        public static final com.yammer.metrics.core.MetricName REMOTE_DELETE_LAG_BYTES_METRIC
        +
        +
      • +
      • +
        +

        REMOTE_DELETE_LAG_SEGMENTS_METRIC

        +
        public static final com.yammer.metrics.core.MetricName REMOTE_DELETE_LAG_SEGMENTS_METRIC
        +
        +
      • +
      • +
        +

        REMOTE_LOG_MANAGER_TASKS_AVG_IDLE_PERCENT_METRIC

        +
        public static final com.yammer.metrics.core.MetricName REMOTE_LOG_MANAGER_TASKS_AVG_IDLE_PERCENT_METRIC
        +
        +
      • +
      • +
        +

        REMOTE_LOG_READER_TASK_QUEUE_SIZE_METRIC

        +
        public static final com.yammer.metrics.core.MetricName REMOTE_LOG_READER_TASK_QUEUE_SIZE_METRIC
        +
        +
      • +
      • +
        +

        REMOTE_LOG_READER_AVG_IDLE_PERCENT_METRIC

        +
        public static final com.yammer.metrics.core.MetricName REMOTE_LOG_READER_AVG_IDLE_PERCENT_METRIC
        +
        +
      • +
      • +
        +

        REMOTE_LOG_READER_FETCH_RATE_AND_TIME_METRIC

        +
        public static final com.yammer.metrics.core.MetricName REMOTE_LOG_READER_FETCH_RATE_AND_TIME_METRIC
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        RemoteStorageMetrics

        +
        public RemoteStorageMetrics()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        allMetrics

        +
        public static Set<com.yammer.metrics.core.MetricName> allMetrics()
        +
        +
      • +
      • +
        +

        brokerTopicStatsMetrics

        +
        public static Set<com.yammer.metrics.core.MetricName> brokerTopicStatsMetrics()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/log/remote/storage/package-summary.html b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/package-summary.html new file mode 100644 index 000000000..5d648586c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/package-summary.html @@ -0,0 +1,153 @@ + + + + +org.apache.kafka.server.log.remote.storage (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.server.log.remote.storage

    +
    +
    +
    package org.apache.kafka.server.log.remote.storage
    +
    +
    Provides a pluggable API for defining remote storage and retrieval of Kafka log segments.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/log/remote/storage/package-tree.html b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/package-tree.html new file mode 100644 index 000000000..c9210b345 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/log/remote/storage/package-tree.html @@ -0,0 +1,131 @@ + + + + +org.apache.kafka.server.log.remote.storage Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.server.log.remote.storage

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/policy/AlterConfigPolicy.RequestMetadata.html b/static/41/javadoc/org/apache/kafka/server/policy/AlterConfigPolicy.RequestMetadata.html new file mode 100644 index 000000000..88c7cc4ba --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/policy/AlterConfigPolicy.RequestMetadata.html @@ -0,0 +1,221 @@ + + + + +AlterConfigPolicy.RequestMetadata (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class AlterConfigPolicy.RequestMetadata

    +
    +
    java.lang.Object +
    org.apache.kafka.server.policy.AlterConfigPolicy.RequestMetadata
    +
    +
    +
    +
    Enclosing interface:
    +
    AlterConfigPolicy
    +
    +
    +
    public static class AlterConfigPolicy.RequestMetadata +extends Object
    +
    Class containing the create request parameters.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        RequestMetadata

        +
        public RequestMetadata(ConfigResource resource, + Map<String,String> configs)
        +
        Create an instance of this class with the provided parameters. + + This constructor is public to make testing of AlterConfigPolicy implementations easier.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configs

        +
        public Map<String,String> configs()
        +
        Return the configs in the request.
        +
        +
      • +
      • +
        +

        resource

        +
        public ConfigResource resource()
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/policy/AlterConfigPolicy.html b/static/41/javadoc/org/apache/kafka/server/policy/AlterConfigPolicy.html new file mode 100644 index 000000000..cb23bcc7c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/policy/AlterConfigPolicy.html @@ -0,0 +1,176 @@ + + + + +AlterConfigPolicy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface AlterConfigPolicy

    +
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Configurable
    +
    +
    +
    public interface AlterConfigPolicy +extends Configurable, AutoCloseable
    +

    An interface for enforcing a policy on alter configs requests. + +

    Common use cases are requiring that the replication factor, min.insync.replicas and/or retention settings for a + topic remain within an allowable range. + +

    If alter.config.policy.class.name is defined, Kafka will create an instance of the specified class + using the default constructor and will then pass the broker configs to its configure() method. During + broker shutdown, the close() method will be invoked so that resources can be released (if necessary).

    +
    +
    +
      + +
    • +
      +

      Nested Class Summary

      +
      Nested Classes
      +
      +
      Modifier and Type
      +
      Interface
      +
      Description
      +
      static class 
      + +
      +
      Class containing the create request parameters.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
      +
      Validate the request parameters and throw a PolicyViolationException with a suitable error + message if the alter configs request parameters for the provided resource do not satisfy this policy.
      +
      +
      +
      +
      +
      +

      Methods inherited from interface java.lang.AutoCloseable

      +close
      +
      +

      Methods inherited from interface org.apache.kafka.common.Configurable

      +configure
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        validate

        +
        void validate(AlterConfigPolicy.RequestMetadata requestMetadata) + throws PolicyViolationException
        +
        Validate the request parameters and throw a PolicyViolationException with a suitable error + message if the alter configs request parameters for the provided resource do not satisfy this policy. + + Clients will receive the POLICY_VIOLATION error code along with the exception's message. Note that validation + failure only affects the relevant resource, other resources in the request will still be processed.
        +
        +
        Parameters:
        +
        requestMetadata - the alter configs request parameters for the provided resource (topic is the only resource + type whose configs can be updated currently).
        +
        Throws:
        +
        PolicyViolationException - if the request parameters do not satisfy this policy.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/policy/CreateTopicPolicy.RequestMetadata.html b/static/41/javadoc/org/apache/kafka/server/policy/CreateTopicPolicy.RequestMetadata.html new file mode 100644 index 000000000..9ac3e462c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/policy/CreateTopicPolicy.RequestMetadata.html @@ -0,0 +1,279 @@ + + + + +CreateTopicPolicy.RequestMetadata (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class CreateTopicPolicy.RequestMetadata

    +
    +
    java.lang.Object +
    org.apache.kafka.server.policy.CreateTopicPolicy.RequestMetadata
    +
    +
    +
    +
    Enclosing interface:
    +
    CreateTopicPolicy
    +
    +
    +
    public static class CreateTopicPolicy.RequestMetadata +extends Object
    +
    Class containing the create request parameters.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        RequestMetadata

        +
        public RequestMetadata(String topic, + Integer numPartitions, + Short replicationFactor, + Map<Integer,List<Integer>> replicasAssignments, + Map<String,String> configs)
        +
        Create an instance of this class with the provided parameters. + + This constructor is public to make testing of CreateTopicPolicy implementations easier.
        +
        +
        Parameters:
        +
        topic - the name of the topic to create.
        +
        numPartitions - the number of partitions to create or null if replicasAssignments is set.
        +
        replicationFactor - the replication factor for the topic or null if replicaAssignments is set.
        +
        replicasAssignments - replica assignments or null if numPartitions and replicationFactor is set. The + assignment is a map from partition id to replica (broker) ids.
        +
        configs - topic configs for the topic to be created, not including broker defaults. Broker configs are + passed via the configure() method of the policy implementation.
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        topic

        +
        public String topic()
        +
        Return the name of the topic to create.
        +
        +
      • +
      • +
        +

        numPartitions

        +
        public Integer numPartitions()
        +
        Return the number of partitions to create or null if replicaAssignments is not null.
        +
        +
      • +
      • +
        +

        replicationFactor

        +
        public Short replicationFactor()
        +
        Return the number of replicas to create or null if replicaAssignments is not null.
        +
        +
      • +
      • +
        +

        replicasAssignments

        +
        public Map<Integer,List<Integer>> replicasAssignments()
        +
        Return a map from partition id to replica (broker) ids or null if numPartitions and replicationFactor are + set instead.
        +
        +
      • +
      • +
        +

        configs

        +
        public Map<String,String> configs()
        +
        Return topic configs in the request, not including broker defaults. Broker configs are passed via + the configure() method of the policy implementation.
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/policy/CreateTopicPolicy.html b/static/41/javadoc/org/apache/kafka/server/policy/CreateTopicPolicy.html new file mode 100644 index 000000000..3e8de8e80 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/policy/CreateTopicPolicy.html @@ -0,0 +1,175 @@ + + + + +CreateTopicPolicy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface CreateTopicPolicy

    +
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Configurable
    +
    +
    +
    public interface CreateTopicPolicy +extends Configurable, AutoCloseable
    +

    An interface for enforcing a policy on create topics requests. + +

    Common use cases are requiring that the replication factor, min.insync.replicas and/or retention settings for a + topic are within an allowable range. + +

    If create.topic.policy.class.name is defined, Kafka will create an instance of the specified class + using the default constructor and will then pass the broker configs to its configure() method. During + broker shutdown, the close() method will be invoked so that resources can be released (if necessary).

    +
    +
    +
      + +
    • +
      +

      Nested Class Summary

      +
      Nested Classes
      +
      +
      Modifier and Type
      +
      Interface
      +
      Description
      +
      static class 
      + +
      +
      Class containing the create request parameters.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
      +
      Validate the request parameters and throw a PolicyViolationException with a suitable error + message if the create topics request parameters for the provided topic do not satisfy this policy.
      +
      +
      +
      +
      +
      +

      Methods inherited from interface java.lang.AutoCloseable

      +close
      +
      +

      Methods inherited from interface org.apache.kafka.common.Configurable

      +configure
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        validate

        +
        void validate(CreateTopicPolicy.RequestMetadata requestMetadata) + throws PolicyViolationException
        +
        Validate the request parameters and throw a PolicyViolationException with a suitable error + message if the create topics request parameters for the provided topic do not satisfy this policy. + + Clients will receive the POLICY_VIOLATION error code along with the exception's message. Note that validation + failure only affects the relevant topic, other topics in the request will still be processed.
        +
        +
        Parameters:
        +
        requestMetadata - the create topics request parameters for the provided topic.
        +
        Throws:
        +
        PolicyViolationException - if the request parameters do not satisfy this policy.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/policy/package-summary.html b/static/41/javadoc/org/apache/kafka/server/policy/package-summary.html new file mode 100644 index 000000000..a783e9fde --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/policy/package-summary.html @@ -0,0 +1,105 @@ + + + + +org.apache.kafka.server.policy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.server.policy

    +
    +
    +
    package org.apache.kafka.server.policy
    +
    +
    Provides pluggable interfaces for expressing policies on topics and configs.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/policy/package-tree.html b/static/41/javadoc/org/apache/kafka/server/policy/package-tree.html new file mode 100644 index 000000000..86023a876 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/policy/package-tree.html @@ -0,0 +1,89 @@ + + + + +org.apache.kafka.server.policy Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.server.policy

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/quota/ClientQuotaCallback.html b/static/41/javadoc/org/apache/kafka/server/quota/ClientQuotaCallback.html new file mode 100644 index 000000000..39211460f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/quota/ClientQuotaCallback.html @@ -0,0 +1,288 @@ + + + + +ClientQuotaCallback (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ClientQuotaCallback

    +
    +
    +
    +
    All Superinterfaces:
    +
    Configurable
    +
    +
    +
    public interface ClientQuotaCallback +extends Configurable
    +
    Quota callback interface for brokers and controllers that enables customization of client quota computation. + Implement Monitorable to enable the callback to register metrics. + The following tags are automatically added to all metrics registered: +
      +
    • config set to client.quota.callback.class
    • +
    • class set to the ClientQuotaCallback class name
    • +
    • role set to broker/controller, which indicates the role of the server
    • +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
      +
      Closes this instance.
      +
      + +
      quotaLimit(ClientQuotaType quotaType, + Map<String,String> metricTags)
      +
      +
      Returns the quota limit associated with the provided metric tags.
      +
      + +
      quotaMetricTags(ClientQuotaType quotaType, + KafkaPrincipal principal, + String clientId)
      +
      +
      Quota callback invoked to determine the quota metric tags to be applied for a request.
      +
      +
      boolean
      + +
      +
      Returns true if any of the existing quota configs may have been updated since the last call + to this method for the provided quota type.
      +
      +
      void
      +
      removeQuota(ClientQuotaType quotaType, + ClientQuotaEntity quotaEntity)
      +
      +
      Quota configuration removal callback that is invoked when quota configuration for an entity is + removed in the quorum.
      +
      +
      boolean
      + +
      +
      This callback is invoked whenever there are changes in the cluster metadata, such as + brokers being added or removed, topics being created or deleted, or partition leadership updates.
      +
      +
      void
      +
      updateQuota(ClientQuotaType quotaType, + ClientQuotaEntity quotaEntity, + double newValue)
      +
      +
      Quota configuration update callback that is invoked when quota configuration for an entity is + updated in the quorum.
      +
      +
      +
      +
      +
      +

      Methods inherited from interface org.apache.kafka.common.Configurable

      +configure
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        quotaMetricTags

        +
        Map<String,String> quotaMetricTags(ClientQuotaType quotaType, + KafkaPrincipal principal, + String clientId)
        +
        Quota callback invoked to determine the quota metric tags to be applied for a request. + Quota limits are associated with quota metrics and all clients which use the same + metric tags share the quota limit.
        +
        +
        Parameters:
        +
        quotaType - Type of quota requested
        +
        principal - The user principal of the connection for which quota is requested
        +
        clientId - The client id associated with the request
        +
        Returns:
        +
        quota metric tags that indicate which other clients share this quota
        +
        +
        +
      • +
      • +
        +

        quotaLimit

        +
        Double quotaLimit(ClientQuotaType quotaType, + Map<String,String> metricTags)
        +
        Returns the quota limit associated with the provided metric tags. These tags were returned from + a previous call to quotaMetricTags(ClientQuotaType, KafkaPrincipal, String). This method is + invoked by quota managers to obtain the current quota limit applied to a metric when the first request + using these tags is processed. It is also invoked after a quota update or cluster metadata change. + If the tags are no longer in use after the update, (e.g. this is a {user, client-id} quota metric + and the quota now in use is a {user} quota), null is returned.
        +
        +
        Parameters:
        +
        quotaType - Type of quota requested
        +
        metricTags - Metric tags for a quota metric of type `quotaType`
        +
        Returns:
        +
        the quota limit for the provided metric tags or null if the metric tags are no longer in use
        +
        +
        +
      • +
      • +
        +

        updateQuota

        +
        void updateQuota(ClientQuotaType quotaType, + ClientQuotaEntity quotaEntity, + double newValue)
        +
        Quota configuration update callback that is invoked when quota configuration for an entity is + updated in the quorum. This is useful to track configured quotas if built-in quota configuration + tools are used for quota management.
        +
        +
        Parameters:
        +
        quotaType - Type of quota being updated
        +
        quotaEntity - The quota entity for which quota is being updated
        +
        newValue - The new quota value
        +
        +
        +
      • +
      • +
        +

        removeQuota

        +
        void removeQuota(ClientQuotaType quotaType, + ClientQuotaEntity quotaEntity)
        +
        Quota configuration removal callback that is invoked when quota configuration for an entity is + removed in the quorum. This is useful to track configured quotas if built-in quota configuration + tools are used for quota management.
        +
        +
        Parameters:
        +
        quotaType - Type of quota being updated
        +
        quotaEntity - The quota entity for which quota is being updated
        +
        +
        +
      • +
      • +
        +

        quotaResetRequired

        +
        boolean quotaResetRequired(ClientQuotaType quotaType)
        +
        Returns true if any of the existing quota configs may have been updated since the last call + to this method for the provided quota type. Quota updates as a result of calls to + updateClusterMetadata(Cluster), updateQuota(ClientQuotaType, ClientQuotaEntity, double) + and removeQuota(ClientQuotaType, ClientQuotaEntity) are automatically processed. + So callbacks that rely only on built-in quota configuration tools always return false. Quota callbacks + with external quota configuration or custom reconfigurable quota configs that affect quota limits must + return true if existing metric configs may need to be updated. This method is invoked on every request + and hence is expected to be handled by callbacks as a simple flag that is updated when quotas change.
        +
        +
        Parameters:
        +
        quotaType - Type of quota
        +
        +
        +
      • +
      • +
        +

        updateClusterMetadata

        +
        boolean updateClusterMetadata(Cluster cluster)
        +
        This callback is invoked whenever there are changes in the cluster metadata, such as + brokers being added or removed, topics being created or deleted, or partition leadership updates. + This is useful if quota computation takes partitions into account. + Topics that are being deleted will not be included in `cluster`.
        +
        +
        Parameters:
        +
        cluster - Cluster metadata including partitions and their leaders if known
        +
        Returns:
        +
        true if quotas have changed and metric configs may need to be updated
        +
        +
        +
      • +
      • +
        +

        close

        +
        void close()
        +
        Closes this instance.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/quota/ClientQuotaEntity.ConfigEntity.html b/static/41/javadoc/org/apache/kafka/server/quota/ClientQuotaEntity.ConfigEntity.html new file mode 100644 index 000000000..3f8f3656d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/quota/ClientQuotaEntity.ConfigEntity.html @@ -0,0 +1,147 @@ + + + + +ClientQuotaEntity.ConfigEntity (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ClientQuotaEntity.ConfigEntity

    +
    +
    +
    +
    Enclosing interface:
    +
    ClientQuotaEntity
    +
    +
    +
    public static interface ClientQuotaEntity.ConfigEntity
    +
    Interface representing a quota configuration entity. Quota may be + configured at levels that include one or more configuration entities. + For example, {user, client-id} quota is represented using two + instances of ConfigEntity with entity types USER and CLIENT_ID.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        name

        +
        String name()
        +
        Returns the name of this entity. For default quotas, an empty string is returned.
        +
        +
      • +
      • +
        +

        entityType

        + +
        Returns the type of this entity.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/quota/ClientQuotaEntity.ConfigEntityType.html b/static/41/javadoc/org/apache/kafka/server/quota/ClientQuotaEntity.ConfigEntityType.html new file mode 100644 index 000000000..cd6b92a86 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/quota/ClientQuotaEntity.ConfigEntityType.html @@ -0,0 +1,238 @@ + + + + +ClientQuotaEntity.ConfigEntityType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class ClientQuotaEntity.ConfigEntityType

    +
    +
    java.lang.Object +
    java.lang.Enum<ClientQuotaEntity.ConfigEntityType> +
    org.apache.kafka.server.quota.ClientQuotaEntity.ConfigEntityType
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<ClientQuotaEntity.ConfigEntityType>, Constable
    +
    +
    +
    Enclosing interface:
    +
    ClientQuotaEntity
    +
    +
    +
    public static enum ClientQuotaEntity.ConfigEntityType +extends Enum<ClientQuotaEntity.ConfigEntityType>
    + +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static ClientQuotaEntity.ConfigEntityType[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static ClientQuotaEntity.ConfigEntityType valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/quota/ClientQuotaEntity.html b/static/41/javadoc/org/apache/kafka/server/quota/ClientQuotaEntity.html new file mode 100644 index 000000000..8d0dca65f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/quota/ClientQuotaEntity.html @@ -0,0 +1,154 @@ + + + + +ClientQuotaEntity (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ClientQuotaEntity

    +
    +
    +
    +
    public interface ClientQuotaEntity
    +
    The metadata for an entity for which quota is configured. Quotas may be defined at + different levels and `configEntities` gives the list of config entities that define + the level of this quota entity.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configEntities

        + +
        Returns the list of configuration entities that this quota entity is comprised of. + For {user} or {clientId} quota, this is a single entity and for {user, clientId} + quota, this is a list of two entities.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/quota/ClientQuotaType.html b/static/41/javadoc/org/apache/kafka/server/quota/ClientQuotaType.html new file mode 100644 index 000000000..0a6931f9a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/quota/ClientQuotaType.html @@ -0,0 +1,234 @@ + + + + +ClientQuotaType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class ClientQuotaType

    +
    +
    java.lang.Object +
    java.lang.Enum<ClientQuotaType> +
    org.apache.kafka.server.quota.ClientQuotaType
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<ClientQuotaType>, Constable
    +
    +
    +
    public enum ClientQuotaType +extends Enum<ClientQuotaType>
    +
    Types of quotas that may be configured on brokers for client requests.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static ClientQuotaType[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static ClientQuotaType valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/quota/package-summary.html b/static/41/javadoc/org/apache/kafka/server/quota/package-summary.html new file mode 100644 index 000000000..bc575496f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/quota/package-summary.html @@ -0,0 +1,109 @@ + + + + +org.apache.kafka.server.quota (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.server.quota

    +
    +
    +
    package org.apache.kafka.server.quota
    +
    +
    Provides pluggable interface for enforcing client quotas from a Kafka server.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/quota/package-tree.html b/static/41/javadoc/org/apache/kafka/server/quota/package-tree.html new file mode 100644 index 000000000..c5e834265 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/quota/package-tree.html @@ -0,0 +1,88 @@ + + + + +org.apache.kafka.server.quota Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.server.quota

    +Package Hierarchies: + +
    +
    +

    Interface Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/telemetry/ClientTelemetry.html b/static/41/javadoc/org/apache/kafka/server/telemetry/ClientTelemetry.html new file mode 100644 index 000000000..358d5ecf6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/telemetry/ClientTelemetry.html @@ -0,0 +1,135 @@ + + + + +ClientTelemetry (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ClientTelemetry

    +
    +
    +
    +
    public interface ClientTelemetry
    +
    A MetricsReporter may implement this interface to indicate support for collecting client + telemetry on the server side.
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/telemetry/ClientTelemetryPayload.html b/static/41/javadoc/org/apache/kafka/server/telemetry/ClientTelemetryPayload.html new file mode 100644 index 000000000..93d268173 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/telemetry/ClientTelemetryPayload.html @@ -0,0 +1,181 @@ + + + + +ClientTelemetryPayload (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ClientTelemetryPayload

    +
    +
    +
    +
    public interface ClientTelemetryPayload
    +
    A client telemetry payload as sent by the client to the telemetry receiver. The payload is + received by the broker's ClientTelemetryReceiver implementation.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Method returns the client's instance id.
      +
      + + +
      +
      Method returns the content-type format of the metrics data which is being sent by the client.
      +
      + + +
      +
      Method returns the serialized metrics data as received by the client.
      +
      +
      boolean
      + +
      +
      Indicates whether the client is terminating and thus making its last metrics push.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        clientInstanceId

        +
        Uuid clientInstanceId()
        +
        Method returns the client's instance id.
        +
        +
        Returns:
        +
        Client's instance id.
        +
        +
        +
      • +
      • +
        +

        isTerminating

        +
        boolean isTerminating()
        +
        Indicates whether the client is terminating and thus making its last metrics push.
        +
        +
        Returns:
        +
        true if client is terminating, else false
        +
        +
        +
      • +
      • +
        +

        contentType

        +
        String contentType()
        +
        Method returns the content-type format of the metrics data which is being sent by the client.
        +
        +
        Returns:
        +
        Metrics data content-type/serialization format.
        +
        +
        +
      • +
      • +
        +

        data

        +
        ByteBuffer data()
        +
        Method returns the serialized metrics data as received by the client.
        +
        +
        Returns:
        +
        Serialized metrics data.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/telemetry/ClientTelemetryReceiver.html b/static/41/javadoc/org/apache/kafka/server/telemetry/ClientTelemetryReceiver.html new file mode 100644 index 000000000..582f9b892 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/telemetry/ClientTelemetryReceiver.html @@ -0,0 +1,141 @@ + + + + +ClientTelemetryReceiver (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ClientTelemetryReceiver

    +
    +
    +
    +
    public interface ClientTelemetryReceiver
    +
    ClientTelemetryReceiver defines the behaviour for telemetry receiver on the broker side + which receives client telemetry metrics.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        exportMetrics

        +
        void exportMetrics(AuthorizableRequestContext context, + ClientTelemetryPayload payload)
        +
        Called by the broker when a client reports telemetry metrics. The associated request context + can be used by the metrics plugin to retrieve additional client information such as client ids + or endpoints. +

        + This method may be called from the request handling thread, and as such should avoid blocking.

        +
        +
        Parameters:
        +
        context - the client request context for the corresponding PushTelemetryRequest + api call.
        +
        payload - the encoded telemetry payload as sent by the client.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/telemetry/package-summary.html b/static/41/javadoc/org/apache/kafka/server/telemetry/package-summary.html new file mode 100644 index 000000000..e0a82b8c7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/telemetry/package-summary.html @@ -0,0 +1,97 @@ + + + + +org.apache.kafka.server.telemetry (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.server.telemetry

    +
    +
    +
    package org.apache.kafka.server.telemetry
    +
    +
    Provides pluggable interface for capturing client telemetry metrics.
    +
    +
    +
      +
    • +
      +
      Interfaces
      +
      +
      Class
      +
      Description
      + +
      +
      A MetricsReporter may implement this interface to indicate support for collecting client + telemetry on the server side.
      +
      + +
      +
      A client telemetry payload as sent by the client to the telemetry receiver.
      +
      + +
      +
      ClientTelemetryReceiver defines the behaviour for telemetry receiver on the broker side + which receives client telemetry metrics.
      +
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/server/telemetry/package-tree.html b/static/41/javadoc/org/apache/kafka/server/telemetry/package-tree.html new file mode 100644 index 000000000..7a87275e1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/server/telemetry/package-tree.html @@ -0,0 +1,69 @@ + + + + +org.apache.kafka.server.telemetry Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.server.telemetry

    +Package Hierarchies: + +
    +
    +

    Interface Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/AutoOffsetReset.html b/static/41/javadoc/org/apache/kafka/streams/AutoOffsetReset.html new file mode 100644 index 000000000..9ea0e1220 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/AutoOffsetReset.html @@ -0,0 +1,219 @@ + + + + +AutoOffsetReset (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class AutoOffsetReset

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.AutoOffsetReset
    +
    +
    +
    +
    public class AutoOffsetReset +extends Object
    +
    Sets the auto.offset.reset configuration when + adding a source processor + or when creating KStream or KTable via StreamsBuilder.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        none

        +
        public static AutoOffsetReset none()
        +
        Creates an AutoOffsetReset instance representing "none".
        +
        +
        Returns:
        +
        An AutoOffsetReset instance for no reset.
        +
        +
        +
      • +
      • +
        +

        earliest

        +
        public static AutoOffsetReset earliest()
        +
        Creates an AutoOffsetReset instance representing "earliest".
        +
        +
        Returns:
        +
        An AutoOffsetReset instance for the "earliest" offset.
        +
        +
        +
      • +
      • +
        +

        latest

        +
        public static AutoOffsetReset latest()
        +
        Creates an AutoOffsetReset instance representing "latest".
        +
        +
        Returns:
        +
        An AutoOffsetReset instance for the "latest" offset.
        +
        +
        +
      • +
      • +
        +

        byDuration

        +
        public static AutoOffsetReset byDuration(Duration duration)
        +
        Creates an AutoOffsetReset instance for the specified reset duration.
        +
        +
        Parameters:
        +
        duration - The duration to use for the offset reset; must be non-negative.
        +
        Returns:
        +
        An AutoOffsetReset instance with the specified duration.
        +
        Throws:
        +
        IllegalArgumentException - If the duration is negative.
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/ClientInstanceIds.html b/static/41/javadoc/org/apache/kafka/streams/ClientInstanceIds.html new file mode 100644 index 000000000..f5efb3355 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/ClientInstanceIds.html @@ -0,0 +1,167 @@ + + + + +ClientInstanceIds (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ClientInstanceIds

    +
    +
    +
    +
    public interface ClientInstanceIds
    +
    Encapsulates the client instance id used for metrics collection by + producers, consumers, and the admin client used by Kafka Streams.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        adminInstanceId

        +
        Uuid adminInstanceId()
        +
        Returns the client instance id of the admin client.
        +
        +
        Returns:
        +
        the client instance id of the admin client
        +
        Throws:
        +
        IllegalStateException - If telemetry is disabled on the admin client.
        +
        +
        +
      • +
      • +
        +

        consumerInstanceIds

        +
        Map<String,Uuid> consumerInstanceIds()
        +
        Returns the client instance id of the consumers.
        +
        +
        Returns:
        +
        a map from thread key to client instance id
        +
        +
        +
      • +
      • +
        +

        producerInstanceIds

        +
        Map<String,Uuid> producerInstanceIds()
        +
        Returns the client instance id of the producers.
        +
        +
        Returns:
        +
        a map from thread key to client instance id
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/GroupProtocol.html b/static/41/javadoc/org/apache/kafka/streams/GroupProtocol.html new file mode 100644 index 000000000..8afea6c04 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/GroupProtocol.html @@ -0,0 +1,267 @@ + + + + +GroupProtocol (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class GroupProtocol

    +
    +
    java.lang.Object +
    java.lang.Enum<GroupProtocol> +
    org.apache.kafka.streams.GroupProtocol
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<GroupProtocol>, Constable
    +
    +
    +
    public enum GroupProtocol +extends Enum<GroupProtocol>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      +
        +
      • +
        +

        CLASSIC

        +
        public static final GroupProtocol CLASSIC
        +
        Classic group protocol.
        +
        +
      • +
      • +
        +

        STREAMS

        +
        public static final GroupProtocol STREAMS
        +
        Streams group protocol
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        name

        +
        public final String name
        +
        String representation of the group protocol.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static GroupProtocol[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static GroupProtocol valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        of

        +
        public static GroupProtocol of(String name)
        +
        Case-insensitive group protocol lookup by string name.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/KafkaClientSupplier.html b/static/41/javadoc/org/apache/kafka/streams/KafkaClientSupplier.html new file mode 100644 index 000000000..b72985a17 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/KafkaClientSupplier.html @@ -0,0 +1,218 @@ + + + + +KafkaClientSupplier (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface KafkaClientSupplier

    +
    +
    +
    +
    public interface KafkaClientSupplier
    +
    KafkaClientSupplier can be used to provide custom Kafka clients to a KafkaStreams instance.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/KafkaStreams.CloseOptions.html b/static/41/javadoc/org/apache/kafka/streams/KafkaStreams.CloseOptions.html new file mode 100644 index 000000000..df3240929 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/KafkaStreams.CloseOptions.html @@ -0,0 +1,172 @@ + + + + +KafkaStreams.CloseOptions (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class KafkaStreams.CloseOptions

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.KafkaStreams.CloseOptions
    +
    +
    +
    +
    Enclosing class:
    +
    KafkaStreams
    +
    +
    +
    public static class KafkaStreams.CloseOptions +extends Object
    +
    Class that handles options passed in case of KafkaStreams instance scale down
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/KafkaStreams.State.html b/static/41/javadoc/org/apache/kafka/streams/KafkaStreams.State.html new file mode 100644 index 000000000..1ed18ae1f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/KafkaStreams.State.html @@ -0,0 +1,367 @@ + + + + +KafkaStreams.State (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class KafkaStreams.State

    +
    +
    java.lang.Object +
    java.lang.Enum<KafkaStreams.State> +
    org.apache.kafka.streams.KafkaStreams.State
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<KafkaStreams.State>, Constable
    +
    +
    +
    Enclosing class:
    +
    KafkaStreams
    +
    +
    +
    public static enum KafkaStreams.State +extends Enum<KafkaStreams.State>
    +
    Kafka Streams states are the possible state that a Kafka Streams instance can be in. + An instance must only be in one state at a time. + The expected state transition with the following defined states is: + +
    +                 +--------------+
    +         +<----- | Created (0)  |
    +         |       +-----+--------+
    +         |             |
    +         |             v
    +         |       +----+--+------+
    +         |       | Re-          |
    +         +<----- | Balancing (1)| -------->+
    +         |       +-----+-+------+          |
    +         |             | ^                 |
    +         |             v |                 |
    +         |       +--------------+          v
    +         |       | Running (2)  | -------->+
    +         |       +------+-------+          |
    +         |              |                  |
    +         |              v                  |
    +         |       +------+-------+     +----+-------+
    +         +-----> | Pending      |     | Pending    |
    +                 | Shutdown (3) |     | Error (5)  |
    +                 +------+-------+     +-----+------+
    +                        |                   |
    +                        v                   v
    +                 +------+-------+     +-----+--------+
    +                 | Not          |     | Error (6)    |
    +                 | Running (4)  |     +--------------+
    +                 +--------------+
    +
    +
    + 
    + Note the following: +
      +
    • + RUNNING state will transit to REBALANCING if any of its threads is in PARTITION_REVOKED or PARTITIONS_ASSIGNED state +
    • +
    • + REBALANCING state will transit to RUNNING if all of its threads are in RUNNING state + (Note: a thread transits to RUNNING state, if all active tasks got restored are are ready for processing. + Standby tasks are not considered.) +
    • +
    • + Any state except NOT_RUNNING, PENDING_ERROR or ERROR can go to PENDING_SHUTDOWN (whenever close is called) +
    • +
    • + Of special importance: If the global stream thread dies, or all stream threads die (or both) then + the instance will be in the ERROR state. The user will not need to close it. +
    • +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static KafkaStreams.State[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static KafkaStreams.State valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        hasNotStarted

        +
        public boolean hasNotStarted()
        +
        +
      • +
      • +
        +

        isRunningOrRebalancing

        +
        public boolean isRunningOrRebalancing()
        +
        +
      • +
      • +
        +

        isShuttingDown

        +
        public boolean isShuttingDown()
        +
        +
      • +
      • +
        +

        hasCompletedShutdown

        +
        public boolean hasCompletedShutdown()
        +
        +
      • +
      • +
        +

        hasStartedOrFinishedShuttingDown

        +
        public boolean hasStartedOrFinishedShuttingDown()
        +
        +
      • +
      • +
        +

        isValidTransition

        +
        public boolean isValidTransition(KafkaStreams.State newState)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/KafkaStreams.StateListener.html b/static/41/javadoc/org/apache/kafka/streams/KafkaStreams.StateListener.html new file mode 100644 index 000000000..a0ef6ff1c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/KafkaStreams.StateListener.html @@ -0,0 +1,139 @@ + + + + +KafkaStreams.StateListener (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface KafkaStreams.StateListener

    +
    +
    +
    +
    Enclosing class:
    +
    KafkaStreams
    +
    +
    +
    public static interface KafkaStreams.StateListener
    +
    Listen to KafkaStreams.State change events.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
      +
      Called when state changes.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        onChange

        +
        void onChange(KafkaStreams.State newState, + KafkaStreams.State oldState)
        +
        Called when state changes.
        +
        +
        Parameters:
        +
        newState - new state
        +
        oldState - previous state
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/KafkaStreams.html b/static/41/javadoc/org/apache/kafka/streams/KafkaStreams.html new file mode 100644 index 000000000..2a0f66090 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/KafkaStreams.html @@ -0,0 +1,989 @@ + + + + +KafkaStreams (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class KafkaStreams

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.KafkaStreams
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    AutoCloseable
    +
    +
    +
    public class KafkaStreams +extends Object +implements AutoCloseable
    +
    A Kafka client that allows for performing continuous computation on input coming from one or more input topics and + sends output to zero, one, or more output topics. +

    + The computational logic can be specified either by using the Topology to define a DAG topology of + Processors or by using the StreamsBuilder which provides the high-level DSL to define + transformations. +

    + One KafkaStreams instance can contain one or more threads specified in the configs for the processing work. +

    + A KafkaStreams instance can co-ordinate with any other instances with the same + application ID (whether in the same process, on other processes on this + machine, or on remote machines) as a single (possibly distributed) stream processing application. + These instances will divide up the work based on the assignment of the input topic partitions so that all partitions + are being consumed. + If instances are added or fail, all (remaining) instances will rebalance the partition assignment among themselves + to balance processing load and ensure that all input topic partitions are processed. +

    + Internally a KafkaStreams instance contains a normal KafkaProducer and KafkaConsumer instance + that is used for reading input and writing output. +

    + A simple example might look like this: +

    
    + Properties props = new Properties();
    + props.put(StreamsConfig.APPLICATION_ID_CONFIG, "my-stream-processing-application");
    + props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    + props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
    + props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
    +
    + StreamsBuilder builder = new StreamsBuilder();
    + builder.<String, String>stream("my-input-topic").mapValues(value -> String.valueOf(value.length())).to("my-output-topic");
    +
    + KafkaStreams streams = new KafkaStreams(builder.build(), props);
    + streams.start();
    + 
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        KafkaStreams

        +
        public KafkaStreams(Topology topology, + Properties props)
        +
        Create a KafkaStreams instance. +

        + Note: even if you never call start() on a KafkaStreams instance, + you still must close() it to avoid resource leaks.

        +
        +
        Parameters:
        +
        topology - the topology specifying the computational logic
        +
        props - properties for StreamsConfig
        +
        Throws:
        +
        StreamsException - if any fatal error occurs
        +
        +
        +
      • +
      • +
        +

        KafkaStreams

        +
        public KafkaStreams(Topology topology, + Properties props, + KafkaClientSupplier clientSupplier)
        +
        Create a KafkaStreams instance. +

        + Note: even if you never call start() on a KafkaStreams instance, + you still must close() it to avoid resource leaks.

        +
        +
        Parameters:
        +
        topology - the topology specifying the computational logic
        +
        props - properties for StreamsConfig
        +
        clientSupplier - the Kafka clients supplier which provides underlying producer and consumer clients + for the new KafkaStreams instance
        +
        Throws:
        +
        StreamsException - if any fatal error occurs
        +
        +
        +
      • +
      • +
        +

        KafkaStreams

        +
        public KafkaStreams(Topology topology, + Properties props, + org.apache.kafka.common.utils.Time time)
        +
        Create a KafkaStreams instance. +

        + Note: even if you never call start() on a KafkaStreams instance, + you still must close() it to avoid resource leaks.

        +
        +
        Parameters:
        +
        topology - the topology specifying the computational logic
        +
        props - properties for StreamsConfig
        +
        time - Time implementation; cannot be null
        +
        Throws:
        +
        StreamsException - if any fatal error occurs
        +
        +
        +
      • +
      • +
        +

        KafkaStreams

        +
        public KafkaStreams(Topology topology, + Properties props, + KafkaClientSupplier clientSupplier, + org.apache.kafka.common.utils.Time time)
        +
        Create a KafkaStreams instance. +

        + Note: even if you never call start() on a KafkaStreams instance, + you still must close() it to avoid resource leaks.

        +
        +
        Parameters:
        +
        topology - the topology specifying the computational logic
        +
        props - properties for StreamsConfig
        +
        clientSupplier - the Kafka clients supplier which provides underlying producer and consumer clients + for the new KafkaStreams instance
        +
        time - Time implementation; cannot be null
        +
        Throws:
        +
        StreamsException - if any fatal error occurs
        +
        +
        +
      • +
      • +
        +

        KafkaStreams

        +
        public KafkaStreams(Topology topology, + StreamsConfig applicationConfigs)
        +
        Create a KafkaStreams instance. +

        + Note: even if you never call start() on a KafkaStreams instance, + you still must close() it to avoid resource leaks.

        +
        +
        Parameters:
        +
        topology - the topology specifying the computational logic
        +
        applicationConfigs - configs for Kafka Streams
        +
        Throws:
        +
        StreamsException - if any fatal error occurs
        +
        +
        +
      • +
      • +
        +

        KafkaStreams

        +
        public KafkaStreams(Topology topology, + StreamsConfig applicationConfigs, + KafkaClientSupplier clientSupplier)
        +
        Create a KafkaStreams instance. +

        + Note: even if you never call start() on a KafkaStreams instance, + you still must close() it to avoid resource leaks.

        +
        +
        Parameters:
        +
        topology - the topology specifying the computational logic
        +
        applicationConfigs - configs for Kafka Streams
        +
        clientSupplier - the Kafka clients supplier which provides underlying producer and consumer clients + for the new KafkaStreams instance
        +
        Throws:
        +
        StreamsException - if any fatal error occurs
        +
        +
        +
      • +
      • +
        +

        KafkaStreams

        +
        public KafkaStreams(Topology topology, + StreamsConfig applicationConfigs, + org.apache.kafka.common.utils.Time time)
        +
        Create a KafkaStreams instance. +

        + Note: even if you never call start() on a KafkaStreams instance, + you still must close() it to avoid resource leaks.

        +
        +
        Parameters:
        +
        topology - the topology specifying the computational logic
        +
        applicationConfigs - configs for Kafka Streams
        +
        time - Time implementation; cannot be null
        +
        Throws:
        +
        StreamsException - if any fatal error occurs
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        state

        +
        public KafkaStreams.State state()
        +
        Return the current KafkaStreams.State of this KafkaStreams instance.
        +
        +
        Returns:
        +
        the current state of this Kafka Streams instance
        +
        +
        +
      • +
      • +
        +

        setStateListener

        +
        public void setStateListener(KafkaStreams.StateListener listener)
        +
        An app can set a single KafkaStreams.StateListener so that the app is notified when state changes.
        +
        +
        Parameters:
        +
        listener - a new state listener
        +
        Throws:
        +
        IllegalStateException - if this KafkaStreams instance has already been started.
        +
        +
        +
      • +
      • +
        +

        setUncaughtExceptionHandler

        +
        public void setUncaughtExceptionHandler(StreamsUncaughtExceptionHandler userStreamsUncaughtExceptionHandler)
        +
        Set the handler invoked when an internal stream thread + throws an unexpected exception. + These might be exceptions indicating rare bugs in Kafka Streams, or they + might be exceptions thrown by your code, for example a NullPointerException thrown from your processor logic. + The handler will execute on the thread that produced the exception. + In order to get the thread that threw the exception, use Thread.currentThread(). +

        + Note, this handler must be thread safe, since it will be shared among all threads, and invoked from any + thread that encounters such an exception.

        +
        +
        Parameters:
        +
        userStreamsUncaughtExceptionHandler - the uncaught exception handler of type StreamsUncaughtExceptionHandler for all internal threads
        +
        Throws:
        +
        IllegalStateException - if this KafkaStreams instance has already been started.
        +
        NullPointerException - if userStreamsUncaughtExceptionHandler is null.
        +
        +
        +
      • +
      • +
        +

        setGlobalStateRestoreListener

        +
        public void setGlobalStateRestoreListener(StateRestoreListener globalStateRestoreListener)
        +
        Set the listener which is triggered whenever a StateStore is being restored in order to resume + processing.
        +
        +
        Parameters:
        +
        globalStateRestoreListener - The listener triggered when StateStore is being restored.
        +
        Throws:
        +
        IllegalStateException - if this KafkaStreams instance has already been started.
        +
        +
        +
      • +
      • +
        +

        setStandbyUpdateListener

        +
        public void setStandbyUpdateListener(StandbyUpdateListener standbyListener)
        +
        Set the listener which is triggered whenever a standby task is updated
        +
        +
        Parameters:
        +
        standbyListener - The listener triggered when a standby task is updated.
        +
        Throws:
        +
        IllegalStateException - if this KafkaStreams instance has already been started.
        +
        +
        +
      • +
      • +
        +

        metrics

        +
        public Map<MetricName,? extends Metric> metrics()
        +
        Get read-only handle on global metrics registry, including streams client's own metrics plus + its embedded producer, consumer and admin clients' metrics.
        +
        +
        Returns:
        +
        Map of all metrics.
        +
        +
        +
      • +
      • +
        +

        addStreamThread

        +
        public Optional<String> addStreamThread()
        +
        Adds and starts a stream thread in addition to the stream threads that are already running in this + Kafka Streams client. +

        + Since the number of stream threads increases, the sizes of the caches in the new stream thread + and the existing stream threads are adapted so that the sum of the cache sizes over all stream + threads does not exceed the total cache size specified in configuration + StreamsConfig.STATESTORE_CACHE_MAX_BYTES_CONFIG. +

        + Stream threads can only be added if this Kafka Streams client is in state RUNNING or REBALANCING.

        +
        +
        Returns:
        +
        name of the added stream thread or empty if a new stream thread could not be added
        +
        +
        +
      • +
      • +
        +

        removeStreamThread

        +
        public Optional<String> removeStreamThread()
        +
        Removes one stream thread out of the running stream threads from this Kafka Streams client. +

        + The removed stream thread is gracefully shut down. This method does not specify which stream + thread is shut down. +

        + Since the number of stream threads decreases, the sizes of the caches in the remaining stream + threads are adapted so that the sum of the cache sizes over all stream threads equals the total + cache size specified in configuration StreamsConfig.STATESTORE_CACHE_MAX_BYTES_CONFIG.

        +
        +
        Returns:
        +
        name of the removed stream thread or empty if a stream thread could not be removed because + no stream threads are alive
        +
        +
        +
      • +
      • +
        +

        removeStreamThread

        +
        public Optional<String> removeStreamThread(Duration timeout)
        +
        Removes one stream thread out of the running stream threads from this Kafka Streams client. +

        + The removed stream thread is gracefully shut down. This method does not specify which stream + thread is shut down. +

        + Since the number of stream threads decreases, the sizes of the caches in the remaining stream + threads are adapted so that the sum of the cache sizes over all stream threads equals the total + cache size specified in configuration StreamsConfig.STATESTORE_CACHE_MAX_BYTES_CONFIG.

        +
        +
        Parameters:
        +
        timeout - The length of time to wait for the thread to shut down
        +
        Returns:
        +
        name of the removed stream thread or empty if a stream thread could not be removed because + no stream threads are alive
        +
        Throws:
        +
        TimeoutException - if the thread does not stop in time
        +
        +
        +
      • +
      • +
        +

        start

        +
        public void start() + throws IllegalStateException, +StreamsException
        +
        Start the KafkaStreams instance by starting all its threads. + This function is expected to be called only once during the life cycle of the client. +

        + Because threads are started in the background, this method does not block. + However, if you have global stores in your topology, this method blocks until all global stores are restored. + As a consequence, any fatal exception that happens during processing is by default only logged. + If you want to be notified about dying threads, you can + register an uncaught exception handler + before starting the KafkaStreams instance. +

        + Note, for brokers with version 0.9.x or lower, the broker version cannot be checked. + There will be no error and the client will hang and retry to verify the broker version until it + times out.

        +
        +
        Throws:
        +
        IllegalStateException - if process was already started
        +
        StreamsException - if the Kafka brokers have version 0.10.0.x or + if exactly-once is enabled for pre 0.11.0.x brokers
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close()
        +
        Shutdown this KafkaStreams instance by signaling all the threads to stop, and then wait for them to join. + This will block until all threads have stopped.
        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        +
        +
      • +
      • +
        +

        close

        +
        public boolean close(Duration timeout) + throws IllegalArgumentException
        +
        Shutdown this KafkaStreams by signaling all the threads to stop, and then wait up to the timeout for the + threads to join. + A timeout of Duration.ZERO (or any other zero duration) makes the close operation asynchronous. + Negative-duration timeouts are rejected.
        +
        +
        Parameters:
        +
        timeout - how long to wait for the threads to shut down
        +
        Returns:
        +
        true if all threads were successfully stopped—false if the timeout was reached + before all threads stopped + Note that this method must not be called in the KafkaStreams.StateListener.onChange(KafkaStreams.State, KafkaStreams.State) callback of KafkaStreams.StateListener.
        +
        Throws:
        +
        IllegalArgumentException - if timeout can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        close

        +
        public boolean close(KafkaStreams.CloseOptions options) + throws IllegalArgumentException
        +
        Shutdown this KafkaStreams by signaling all the threads to stop, and then wait up to the timeout for the + threads to join.
        +
        +
        Parameters:
        +
        options - contains timeout to specify how long to wait for the threads to shut down, and a flag leaveGroup to + trigger consumer leave call
        +
        Returns:
        +
        true if all threads were successfully stopped—false if the timeout was reached + before all threads stopped + Note that this method must not be called in the KafkaStreams.StateListener.onChange(KafkaStreams.State, KafkaStreams.State) callback of KafkaStreams.StateListener.
        +
        Throws:
        +
        IllegalArgumentException - if timeout can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        cleanUp

        +
        public void cleanUp()
        +
        Do a cleanup of the local StateStore directory (StreamsConfig.STATE_DIR_CONFIG) by deleting all + data with regard to the application ID. +

        + May only be called either before this KafkaStreams instance is started or after the + instance is closed. +

        + Calling this method triggers a restore of local StateStores on the next application start.

        +
        +
        Throws:
        +
        IllegalStateException - if this KafkaStreams instance has been started and hasn't fully shut down
        +
        StreamsException - if cleanup failed
        +
        +
        +
      • +
      • +
        +

        metadataForAllStreamsClients

        +
        public Collection<StreamsMetadata> metadataForAllStreamsClients()
        +
        Find all currently running KafkaStreams instances (potentially remotely) that use the same + application ID as this instance (i.e., all instances that belong to + the same Kafka Streams application) and return StreamsMetadata for each discovered instance. +

        + Note: this is a point in time view, and it may change due to partition reassignment.

        +
        +
        Returns:
        +
        StreamsMetadata for each KafkaStreams instances of this application
        +
        +
        +
      • +
      • +
        +

        streamsMetadataForStore

        +
        public Collection<StreamsMetadata> streamsMetadataForStore(String storeName)
        +
        Find all currently running KafkaStreams instances (potentially remotely) that +
          +
        • use the same application ID as this instance (i.e., all + instances that belong to the same Kafka Streams application)
        • +
        • and that contain a StateStore with the given storeName
        • +
        + and return StreamsMetadata for each discovered instance. +

        + Note: this is a point in time view, and it may change due to partition reassignment.

        +
        +
        Parameters:
        +
        storeName - the storeName to find metadata for
        +
        Returns:
        +
        StreamsMetadata for each KafkaStreams instances with the provided storeName of + this application
        +
        +
        +
      • +
      • +
        +

        queryMetadataForKey

        +
        public <K> KeyQueryMetadata queryMetadataForKey(String storeName, + K key, + Serializer<K> keySerializer)
        +
        Finds the metadata containing the active hosts and standby hosts where the key being queried would reside.
        +
        +
        Type Parameters:
        +
        K - key type + Returns KeyQueryMetadata containing all metadata about hosting the given key for the given store, + or null if no matching metadata could be found.
        +
        Parameters:
        +
        storeName - the storeName to find metadata for
        +
        key - the key to find metadata for
        +
        keySerializer - serializer for the key
        +
        +
        +
      • +
      • +
        +

        queryMetadataForKey

        +
        public <K> KeyQueryMetadata queryMetadataForKey(String storeName, + K key, + StreamPartitioner<? super K,?> partitioner)
        +
        Finds the metadata containing the active hosts and standby hosts where the key being queried would reside.
        +
        +
        Type Parameters:
        +
        K - key type + Returns KeyQueryMetadata containing all metadata about hosting the given key for the given store, using + the supplied partitioner, or null if no matching metadata could be found.
        +
        Parameters:
        +
        storeName - the storeName to find metadata for
        +
        key - the key to find metadata for
        +
        partitioner - the partitioner to be used to locate the host for the key
        +
        +
        +
      • +
      • +
        +

        store

        +
        public <T> T store(StoreQueryParameters<T> storeQueryParameters)
        +
        Get a facade wrapping the local StateStore instances with the provided StoreQueryParameters. + The returned object can be used to query the StateStore instances.
        +
        +
        Parameters:
        +
        storeQueryParameters - the parameters used to fetch a queryable store
        +
        Returns:
        +
        A facade wrapping the local StateStore instances
        +
        Throws:
        +
        StreamsNotStartedException - If Streams has not yet been started. Just call start() + and then retry this call.
        +
        UnknownStateStoreException - If the specified store name does not exist in the topology.
        +
        InvalidStateStorePartitionException - If the specified partition does not exist.
        +
        InvalidStateStoreException - If the Streams instance isn't in a queryable state. + If the store's type does not match the QueryableStoreType, + the Streams instance is not in a queryable state with respect + to the parameters, or if the store is not available locally, then + an InvalidStateStoreException is thrown upon store access.
        +
        +
        +
      • +
      • +
        +

        pause

        +
        public void pause()
        +
        This method pauses processing for the KafkaStreams instance. + +

        Paused topologies will only skip over (a) processing, (b) punctuation, and (c) standby tasks. + Notably, paused topologies will still poll Kafka consumers, and commit offsets. + This method sets transient state that is not maintained or managed among instances. + Note that pause() can be called before start() in order to start a KafkaStreams instance + in a manner where the processing is paused as described, but the consumers are started up.

        +
        +
      • +
      • +
        +

        isPaused

        +
        public boolean isPaused()
        +
        +
        Returns:
        +
        true when the KafkaStreams instance has its processing paused.
        +
        +
        +
      • +
      • +
        +

        resume

        +
        public void resume()
        +
        This method resumes processing for the KafkaStreams instance.
        +
        +
      • +
      • +
        +

        clientInstanceIds

        +
        public ClientInstanceIds clientInstanceIds(Duration timeout)
        +
        Returns the internal clients' assigned client instance ids.
        +
        +
        Returns:
        +
        The internal clients' assigned instance ids used for metrics collection.
        +
        Throws:
        +
        IllegalArgumentException - If timeout is negative.
        +
        IllegalStateException - If KafkaStreams is not running.
        +
        TimeoutException - Indicates that a request timed out.
        +
        StreamsException - For any other error that might occur.
        +
        +
        +
      • +
      • +
        +

        metadataForLocalThreads

        +
        public Set<ThreadMetadata> metadataForLocalThreads()
        +
        Returns runtime information about the local threads of this KafkaStreams instance.
        +
        +
        Returns:
        +
        the set of ThreadMetadata.
        +
        +
        +
      • +
      • +
        +

        allLocalStorePartitionLags

        +
        public Map<String,Map<Integer,LagInfo>> allLocalStorePartitionLags()
        +
        Returns LagInfo, for all store partitions (active or standby) local to this Streams instance. Note that the + values returned are just estimates and meant to be used for making soft decisions on whether the data in the store + partition is fresh enough for querying. + +

        Note: Each invocation of this method issues a call to the Kafka brokers. Thus, it's advisable to limit the frequency + of invocation to once every few seconds.

        +
        +
        Returns:
        +
        map of store names to another map of partition to LagInfos
        +
        Throws:
        +
        StreamsException - if the admin client request throws exception
        +
        +
        +
      • +
      • +
        +

        query

        +
        @Evolving +public <R> StateQueryResult<R> query(StateQueryRequest<R> request)
        +
        Run an interactive query against a state store. +

        + This method allows callers outside the Streams runtime to access the internal state of + stateful processors. See IQ docs + for more information. +

        + NOTICE: This functionality is InterfaceStability.Evolving and subject to change in minor versions. + Once it is stabilized, this notice and the evolving annotation will be removed.

        +
        +
        Type Parameters:
        +
        R - The result type specified by the query.
        +
        Throws:
        +
        StreamsNotStartedException - If Streams has not yet been started. Just call start() and then retry this call.
        +
        StreamsStoppedException - If Streams is in a terminal state like PENDING_SHUTDOWN, + NOT_RUNNING, PENDING_ERROR, or ERROR. The caller should + discover a new instance to query.
        +
        UnknownStateStoreException - If the specified store name does not exist in the + topology.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/KeyQueryMetadata.html b/static/41/javadoc/org/apache/kafka/streams/KeyQueryMetadata.html new file mode 100644 index 000000000..d798db339 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/KeyQueryMetadata.html @@ -0,0 +1,278 @@ + + + + +KeyQueryMetadata (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class KeyQueryMetadata

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.KeyQueryMetadata
    +
    +
    +
    +
    public class KeyQueryMetadata +extends Object
    +
    Represents all the metadata related to a key, where a particular key resides in a KafkaStreams application. + It contains the active HostInfo and a set of standby HostInfos, denoting the instances where the key resides. + It also contains the partition number where the key belongs, which could be useful when used in conjunction with other APIs. + e.g: Relating with lags for that store partition. + NOTE: This is a point in time view. It may change as rebalances happen.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        NOT_AVAILABLE

        +
        public static final KeyQueryMetadata NOT_AVAILABLE
        +
        Sentinel to indicate that the KeyQueryMetadata is currently unavailable. This can occur during rebalance + operations.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        KeyQueryMetadata

        +
        public KeyQueryMetadata(HostInfo activeHost, + Set<HostInfo> standbyHosts, + int partition)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        activeHost

        +
        public HostInfo activeHost()
        +
        Get the active Kafka Streams instance for given key.
        +
        +
        Returns:
        +
        active instance's HostInfo
        +
        +
        +
      • +
      • +
        +

        standbyHosts

        +
        public Set<HostInfo> standbyHosts()
        +
        Get the Kafka Streams instances that host the key as standbys.
        +
        +
        Returns:
        +
        set of standby HostInfo or an empty set, if no standbys are configured
        +
        +
        +
      • +
      • +
        +

        partition

        +
        public int partition()
        +
        Get the store partition corresponding to the key.
        +
        +
        Returns:
        +
        store partition number
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object obj)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/KeyValue.html b/static/41/javadoc/org/apache/kafka/streams/KeyValue.html new file mode 100644 index 000000000..fea1a5d9d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/KeyValue.html @@ -0,0 +1,275 @@ + + + + +KeyValue (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class KeyValue<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.KeyValue<K,V>
    +
    +
    +
    +
    Type Parameters:
    +
    K - Key type
    +
    V - Value type
    +
    +
    +
    public class KeyValue<K,V> +extends Object
    +
    A key-value pair defined for a single Kafka Streams record. + If the record comes directly from a Kafka topic then its key/value are defined as the message key/value.
    +
    +
    +
      + +
    • +
      +

      Field Summary

      +
      Fields
      +
      +
      Modifier and Type
      +
      Field
      +
      Description
      +
      final K
      + +
      +
      The key of the key-value pair.
      +
      +
      final V
      + +
      +
      The value of the key-value pair.
      +
      +
      +
      +
    • + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      +
      KeyValue(K key, + V value)
      +
      +
      Create a new key-value pair.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      boolean
      + +
       
      +
      int
      + +
       
      +
      static <K, +V> KeyValue<K,V>
      +
      pair(K key, + V value)
      +
      +
      Create a new key-value pair.
      +
      + + +
       
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +getClass, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        key

        +
        public final K key
        +
        The key of the key-value pair.
        +
        +
      • +
      • +
        +

        value

        +
        public final V value
        +
        The value of the key-value pair.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        KeyValue

        +
        public KeyValue(K key, + V value)
        +
        Create a new key-value pair.
        +
        +
        Parameters:
        +
        key - the key
        +
        value - the value
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        pair

        +
        public static <K, +V> KeyValue<K,V> pair(K key, + V value)
        +
        Create a new key-value pair.
        +
        +
        Type Parameters:
        +
        K - the type of the key
        +
        V - the type of the value
        +
        Parameters:
        +
        key - the key
        +
        value - the value
        +
        Returns:
        +
        a new key-value pair
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object obj)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/LagInfo.html b/static/41/javadoc/org/apache/kafka/streams/LagInfo.html new file mode 100644 index 000000000..e4698be24 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/LagInfo.html @@ -0,0 +1,213 @@ + + + + +LagInfo (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class LagInfo

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.LagInfo
    +
    +
    +
    +
    public class LagInfo +extends Object
    +
    Encapsulates information about lag, at a store partition replica (active or standby). This information is constantly changing as the + tasks process records and thus, they should be treated as simply instantaneous measure of lag.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      long
      + +
      +
      Get the current maximum offset on the store partition's changelog topic, that has been successfully written into + the store partition's state store.
      +
      +
      long
      + +
      +
      Get the end offset position for this store partition's changelog topic on the Kafka brokers.
      +
      +
      boolean
      + +
       
      +
      int
      + +
       
      +
      long
      + +
      +
      Get the measured lag between current and end offset positions, for this store partition replica
      +
      + + +
       
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +getClass, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        currentOffsetPosition

        +
        public long currentOffsetPosition()
        +
        Get the current maximum offset on the store partition's changelog topic, that has been successfully written into + the store partition's state store.
        +
        +
        Returns:
        +
        current consume offset for standby/restoring store partitions & simply end offset for active store partition replicas
        +
        +
        +
      • +
      • +
        +

        endOffsetPosition

        +
        public long endOffsetPosition()
        +
        Get the end offset position for this store partition's changelog topic on the Kafka brokers.
        +
        +
        Returns:
        +
        last offset written to the changelog topic partition
        +
        +
        +
      • +
      • +
        +

        offsetLag

        +
        public long offsetLag()
        +
        Get the measured lag between current and end offset positions, for this store partition replica
        +
        +
        Returns:
        +
        lag as measured by message offsets
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object obj)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/StoreQueryParameters.html b/static/41/javadoc/org/apache/kafka/streams/StoreQueryParameters.html new file mode 100644 index 000000000..eab2c42e9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/StoreQueryParameters.html @@ -0,0 +1,273 @@ + + + + +StoreQueryParameters (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StoreQueryParameters<T>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.StoreQueryParameters<T>
    +
    +
    +
    +
    public class StoreQueryParameters<T> +extends Object
    +
    StoreQueryParameters allows you to pass a variety of parameters when fetching a store for interactive query.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        fromNameAndType

        +
        public static <T> StoreQueryParameters<T> fromNameAndType(String storeName, + QueryableStoreType<T> queryableStoreType)
        +
        +
      • +
      • +
        +

        withPartition

        +
        public StoreQueryParameters<T> withPartition(Integer partition)
        +
        Set a specific partition that should be queried exclusively.
        +
        +
        Parameters:
        +
        partition - The specific integer partition to be fetched from the stores list by using StoreQueryParameters.
        +
        Returns:
        +
        StoreQueryParameters a new StoreQueryParameters instance configured with the specified partition
        +
        +
        +
      • +
      • +
        +

        enableStaleStores

        +
        public StoreQueryParameters<T> enableStaleStores()
        +
        Enable querying of stale state stores, i.e., allow to query active tasks during restore as well as standby tasks.
        +
        +
        Returns:
        +
        StoreQueryParameters a new StoreQueryParameters instance configured with serving from stale stores enabled
        +
        +
        +
      • +
      • +
        +

        storeName

        +
        public String storeName()
        +
        Get the name of the state store that should be queried.
        +
        +
        Returns:
        +
        String state store name
        +
        +
        +
      • +
      • +
        +

        queryableStoreType

        +
        public QueryableStoreType<T> queryableStoreType()
        +
        Get the queryable store type for which key is queried by the user.
        +
        +
        Returns:
        +
        QueryableStoreType type of queryable store
        +
        +
        +
      • +
      • +
        +

        partition

        +
        public Integer partition()
        +
        Get the store partition that will be queried. + If the method returns null, it would mean that no specific partition has been requested, + so all the local partitions for the store will be queried.
        +
        +
        Returns:
        +
        Integer partition
        +
        +
        +
      • +
      • +
        +

        staleStoresEnabled

        +
        public boolean staleStoresEnabled()
        +
        Get the flag staleStores. If true, include standbys and recovering stores along with running stores.
        +
        +
        Returns:
        +
        boolean staleStores
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object obj)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/StreamsBuilder.html b/static/41/javadoc/org/apache/kafka/streams/StreamsBuilder.html new file mode 100644 index 000000000..b8e150fd3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/StreamsBuilder.html @@ -0,0 +1,790 @@ + + + + +StreamsBuilder (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StreamsBuilder

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.StreamsBuilder
    +
    +
    +
    +
    public class StreamsBuilder +extends Object
    +
    StreamsBuilder provides the high-level Kafka Streams DSL to specify a Kafka Streams topology. + +

    + It is a requirement that the processing logic (Topology) be defined in a deterministic way, + as in, the order in which all operators are added must be predictable and the same across all application + instances. + Topologies are only identical if all operators are added in the same order. + If different KafkaStreams instances of the same application build different topologies the result may be + incompatible runtime code and unexpected results or errors

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StreamsBuilder

        +
        public StreamsBuilder()
        +
        +
      • +
      • +
        +

        StreamsBuilder

        +
        public StreamsBuilder(TopologyConfig topologyConfigs)
        +
        Create a StreamsBuilder instance.
        +
        +
        Parameters:
        +
        topologyConfigs - the streams configs that apply at the topology level. Please refer to TopologyConfig for more detail
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        stream

        +
        public <K, +V> KStream<K,V> stream(String topic)
        +
        Create a KStream from the specified topic. + The default "auto.offset.reset" strategy, default TimestampExtractor, and default key and value + deserializers as specified in the config are used. +

        + If multiple topics are specified there is no ordering guarantee for records from different topics. +

        + Note that the specified input topic must be partitioned by key. + If this is not the case it is the user's responsibility to repartition the data before any key based operation + (like aggregation or join) is applied to the returned KStream.

        +
        +
        Parameters:
        +
        topic - the topic name; cannot be null
        +
        Returns:
        +
        a KStream for the specified topic
        +
        +
        +
      • +
      • +
        +

        stream

        +
        public <K, +V> KStream<K,V> stream(String topic, + Consumed<K,V> consumed)
        +
        Create a KStream from the specified topic. + The "auto.offset.reset" strategy, TimestampExtractor, key and value deserializers + are defined by the options in Consumed are used. +

        + Note that the specified input topic must be partitioned by key. + If this is not the case it is the user's responsibility to repartition the data before any key based operation + (like aggregation or join) is applied to the returned KStream.

        +
        +
        Parameters:
        +
        topic - the topic names; cannot be null
        +
        consumed - the instance of Consumed used to define optional parameters
        +
        Returns:
        +
        a KStream for the specified topic
        +
        +
        +
      • +
      • +
        +

        stream

        +
        public <K, +V> KStream<K,V> stream(Collection<String> topics)
        +
        Create a KStream from the specified topics. + The default "auto.offset.reset" strategy, default TimestampExtractor, and default key and value + deserializers as specified in the config are used. +

        + If multiple topics are specified there is no ordering guarantee for records from different topics. +

        + Note that the specified input topics must be partitioned by key. + If this is not the case it is the user's responsibility to repartition the data before any key based operation + (like aggregation or join) is applied to the returned KStream.

        +
        +
        Parameters:
        +
        topics - the topic names; must contain at least one topic name
        +
        Returns:
        +
        a KStream for the specified topics
        +
        +
        +
      • +
      • +
        +

        stream

        +
        public <K, +V> KStream<K,V> stream(Collection<String> topics, + Consumed<K,V> consumed)
        +
        Create a KStream from the specified topics. + The "auto.offset.reset" strategy, TimestampExtractor, key and value deserializers + are defined by the options in Consumed are used. +

        + If multiple topics are specified there is no ordering guarantee for records from different topics. +

        + Note that the specified input topics must be partitioned by key. + If this is not the case it is the user's responsibility to repartition the data before any key based operation + (like aggregation or join) is applied to the returned KStream.

        +
        +
        Parameters:
        +
        topics - the topic names; must contain at least one topic name
        +
        consumed - the instance of Consumed used to define optional parameters
        +
        Returns:
        +
        a KStream for the specified topics
        +
        +
        +
      • +
      • +
        +

        stream

        +
        public <K, +V> KStream<K,V> stream(Pattern topicPattern)
        +
        Create a KStream from the specified topic pattern. + The default "auto.offset.reset" strategy, default TimestampExtractor, and default key and value + deserializers as specified in the config are used. +

        + If multiple topics are matched by the specified pattern, the created KStream will read data from all of + them and there is no ordering guarantee between records from different topics. This also means that the work + will not be parallelized for multiple topics, and the number of tasks will scale with the maximum partition + count of any matching topic rather than the total number of partitions across all topics. +

        + Note that the specified input topics must be partitioned by key. + If this is not the case it is the user's responsibility to repartition the data before any key based operation + (like aggregation or join) is applied to the returned KStream.

        +
        +
        Parameters:
        +
        topicPattern - the pattern to match for topic names
        +
        Returns:
        +
        a KStream for topics matching the regex pattern.
        +
        +
        +
      • +
      • +
        +

        stream

        +
        public <K, +V> KStream<K,V> stream(Pattern topicPattern, + Consumed<K,V> consumed)
        +
        Create a KStream from the specified topic pattern. + The "auto.offset.reset" strategy, TimestampExtractor, key and value deserializers + are defined by the options in Consumed are used. +

        + If multiple topics are matched by the specified pattern, the created KStream will read data from all of + them and there is no ordering guarantee between records from different topics. This also means that the work + will not be parallelized for multiple topics, and the number of tasks will scale with the maximum partition + count of any matching topic rather than the total number of partitions across all topics. +

        + Note that the specified input topics must be partitioned by key. + If this is not the case it is the user's responsibility to repartition the data before any key based operation + (like aggregation or join) is applied to the returned KStream.

        +
        +
        Parameters:
        +
        topicPattern - the pattern to match for topic names
        +
        consumed - the instance of Consumed used to define optional parameters
        +
        Returns:
        +
        a KStream for topics matching the regex pattern.
        +
        +
        +
      • +
      • +
        +

        table

        +
        public <K, +V> KTable<K,V> table(String topic, + Consumed<K,V> consumed, + Materialized<K,V,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Create a KTable for the specified topic. + The "auto.offset.reset" strategy, TimestampExtractor, key and value deserializers + are defined by the options in Consumed are used. + Input records with null key will be dropped. +

        + Note that the specified input topic must be partitioned by key. + If this is not the case the returned KTable will be corrupted. +

        + The resulting KTable will be materialized in a local KeyValueStore using the given + Materialized instance. + An internal changelog topic is created by default. Because the source topic can + be used for recovery, you can avoid creating the changelog topic by setting + the "topology.optimization" to "all" in the StreamsConfig. +

        + You should only specify serdes in the Consumed instance as these will also be used to overwrite the + serdes in Materialized, i.e., +

         
        + streamBuilder.table(topic, Consumed.with(Serde.String(), Serde.String()), Materialized.<String, String, KeyValueStore<Bytes, byte[]>as(storeName))
        + 
        + 
        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...): +
        
        + KafkaStreams streams = ...
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>> localStore = streams.store(storeQueryParams);
        + K key = "some-key";
        + ValueAndTimestamp<V> valueForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application.
        +
        +
        Parameters:
        +
        topic - the topic name; cannot be null
        +
        consumed - the instance of Consumed used to define optional parameters; cannot be null
        +
        materialized - the instance of Materialized used to materialize a state store; cannot be null
        +
        Returns:
        +
        a KTable for the specified topic
        +
        +
        +
      • +
      • +
        +

        table

        +
        public <K, +V> KTable<K,V> table(String topic)
        +
        Create a KTable for the specified topic. + The default "auto.offset.reset" strategy and default key and value deserializers as specified in the + config are used. + Input records with null key will be dropped. +

        + Note that the specified input topics must be partitioned by key. + If this is not the case the returned KTable will be corrupted. +

        + The resulting KTable will be materialized in a local KeyValueStore with an internal + store name. Note that store name may not be queryable through Interactive Queries. + An internal changelog topic is created by default. Because the source topic can + be used for recovery, you can avoid creating the changelog topic by setting + the "topology.optimization" to "all" in the StreamsConfig.

        +
        +
        Parameters:
        +
        topic - the topic name; cannot be null
        +
        Returns:
        +
        a KTable for the specified topic
        +
        +
        +
      • +
      • +
        +

        table

        +
        public <K, +V> KTable<K,V> table(String topic, + Consumed<K,V> consumed)
        +
        Create a KTable for the specified topic. + The "auto.offset.reset" strategy, TimestampExtractor, key and value deserializers + are defined by the options in Consumed are used. + Input records with null key will be dropped. +

        + Note that the specified input topics must be partitioned by key. + If this is not the case the returned KTable will be corrupted. +

        + The resulting KTable will be materialized in a local KeyValueStore with an internal + store name. Note that store name may not be queryable through Interactive Queries. + An internal changelog topic is created by default. Because the source topic can + be used for recovery, you can avoid creating the changelog topic by setting + the "topology.optimization" to "all" in the StreamsConfig.

        +
        +
        Parameters:
        +
        topic - the topic name; cannot be null
        +
        consumed - the instance of Consumed used to define optional parameters; cannot be null
        +
        Returns:
        +
        a KTable for the specified topic
        +
        +
        +
      • +
      • +
        +

        table

        +
        public <K, +V> KTable<K,V> table(String topic, + Materialized<K,V,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Create a KTable for the specified topic. + The default "auto.offset.reset" strategy as specified in the config are used. + Key and value deserializers as defined by the options in Materialized are used. + Input records with null key will be dropped. +

        + Note that the specified input topics must be partitioned by key. + If this is not the case the returned KTable will be corrupted. +

        + The resulting KTable will be materialized in a local KeyValueStore using the Materialized instance. + An internal changelog topic is created by default. Because the source topic can + be used for recovery, you can avoid creating the changelog topic by setting + the "topology.optimization" to "all" in the StreamsConfig.

        +
        +
        Parameters:
        +
        topic - the topic name; cannot be null
        +
        materialized - the instance of Materialized used to materialize a state store; cannot be null
        +
        Returns:
        +
        a KTable for the specified topic
        +
        +
        +
      • +
      • +
        +

        globalTable

        +
        public <K, +V> GlobalKTable<K,V> globalTable(String topic, + Consumed<K,V> consumed)
        +
        Create a GlobalKTable for the specified topic. + Input records with null key will be dropped. +

        + The resulting GlobalKTable will be materialized in a local KeyValueStore with an internal + store name. Note that store name may not be queryable through Interactive Queries. + No internal changelog topic is created since the original input topic can be used for recovery (cf. + methods of KGroupedStream and KGroupedTable that return a KTable). +

        + Note that GlobalKTable always applies "auto.offset.reset" strategy "earliest" + regardless of the specified value in StreamsConfig or Consumed. + Furthermore, GlobalKTable cannot be a versioned state store.

        +
        +
        Parameters:
        +
        topic - the topic name; cannot be null
        +
        consumed - the instance of Consumed used to define optional parameters
        +
        Returns:
        +
        a GlobalKTable for the specified topic
        +
        +
        +
      • +
      • +
        +

        globalTable

        +
        public <K, +V> GlobalKTable<K,V> globalTable(String topic)
        +
        Create a GlobalKTable for the specified topic. + The default key and value deserializers as specified in the config are used. + Input records with null key will be dropped. +

        + The resulting GlobalKTable will be materialized in a local KeyValueStore with an internal + store name. Note that store name may not be queryable through Interactive Queries. + No internal changelog topic is created since the original input topic can be used for recovery (cf. + methods of KGroupedStream and KGroupedTable that return a KTable). +

        + Note that GlobalKTable always applies "auto.offset.reset" strategy "earliest" + regardless of the specified value in StreamsConfig. + Furthermore, GlobalKTable cannot be a versioned state store.

        +
        +
        Parameters:
        +
        topic - the topic name; cannot be null
        +
        Returns:
        +
        a GlobalKTable for the specified topic
        +
        +
        +
      • +
      • +
        +

        globalTable

        +
        public <K, +V> GlobalKTable<K,V> globalTable(String topic, + Consumed<K,V> consumed, + Materialized<K,V,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Create a GlobalKTable for the specified topic. +

        + Input KeyValue pairs with null key will be dropped. +

        + The resulting GlobalKTable will be materialized in a local KeyValueStore configured with + the provided instance of Materialized. + However, no internal changelog topic is created since the original input topic can be used for recovery (cf. + methods of KGroupedStream and KGroupedTable that return a KTable). +

        + You should only specify serdes in the Consumed instance as these will also be used to overwrite the + serdes in Materialized, i.e., +

         
        + streamBuilder.globalTable(topic, Consumed.with(Serde.String(), Serde.String()), Materialized.<String, String, KeyValueStore<Bytes, byte[]>as(storeName))
        + 
        + 
        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...): +
        
        + KafkaStreams streams = ...
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>> localStore = streams.store(storeQueryParams);
        + K key = "some-key";
        + ValueAndTimestamp<V> valueForKey = localStore.get(key);
        + 
        + Note that GlobalKTable always applies "auto.offset.reset" strategy "earliest" + regardless of the specified value in StreamsConfig or Consumed. + Furthermore, GlobalKTable cannot be a versioned state store.
        +
        +
        Parameters:
        +
        topic - the topic name; cannot be null
        +
        consumed - the instance of Consumed used to define optional parameters; can't be null
        +
        materialized - the instance of Materialized used to materialize a state store; cannot be null
        +
        Returns:
        +
        a GlobalKTable for the specified topic
        +
        +
        +
      • +
      • +
        +

        globalTable

        +
        public <K, +V> GlobalKTable<K,V> globalTable(String topic, + Materialized<K,V,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Create a GlobalKTable for the specified topic. +

        + Input KeyValue pairs with null key will be dropped. +

        + The resulting GlobalKTable will be materialized in a local KeyValueStore configured with + the provided instance of Materialized. + However, no internal changelog topic is created since the original input topic can be used for recovery (cf. + methods of KGroupedStream and KGroupedTable that return a KTable). +

        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ...
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>> localStore = streams.store(storeQueryParams);
        + K key = "some-key";
        + ValueAndTimestamp<V> valueForKey = localStore.get(key);
        + 
        + Note that GlobalKTable always applies "auto.offset.reset" strategy "earliest" + regardless of the specified value in StreamsConfig. + Furthermore, GlobalKTable cannot be a versioned state store.
        +
        +
        Parameters:
        +
        topic - the topic name; cannot be null
        +
        materialized - the instance of Materialized used to materialize a state store; cannot be null
        +
        Returns:
        +
        a GlobalKTable for the specified topic
        +
        +
        +
      • +
      • +
        +

        addStateStore

        +
        public StreamsBuilder addStateStore(StoreBuilder<?> builder)
        +
        Adds a state store to the underlying Topology. +

        + It is required to connect state stores to Processors, + or ValueTransformers + before they can be used.

        +
        +
        Parameters:
        +
        builder - the builder used to obtain this state store StateStore instance
        +
        Returns:
        +
        itself
        +
        Throws:
        +
        TopologyException - if state store supplier is already added
        +
        +
        +
      • +
      • +
        +

        addGlobalStore

        +
        public <KIn, +VIn> StreamsBuilder addGlobalStore(StoreBuilder<?> storeBuilder, + String topic, + Consumed<KIn,VIn> consumed, + ProcessorSupplier<KIn,VIn,Void,Void> stateUpdateSupplier)
        +
        Adds a global StateStore to the topology. + The StateStore sources its data from all partitions of the provided input topic. + There will be exactly one instance of this StateStore per Kafka Streams instance. +

        + A SourceNode with the provided sourceName will be added to consume the data arriving from the partitions + of the input topic. +

        + The provided ProcessorSupplier will be used to create an + Processor that will receive all records forwarded from the SourceNode. + The supplier should always generate a new instance. Creating a single Processor object + and returning the same object reference in ProcessorSupplier.get() is a + violation of the supplier pattern and leads to runtime exceptions. + This Processor should be used to keep the StateStore up-to-date. + The default TimestampExtractor as specified in the config is used. +

        + It is not required to connect a global store to the Processors, + or ValueTransformer; + those have read-only access to all global stores by default.

        +
        +
        Parameters:
        +
        storeBuilder - user defined StoreBuilder; can't be null
        +
        topic - the topic to source the data from
        +
        consumed - the instance of Consumed used to define optional parameters; can't be null
        +
        stateUpdateSupplier - the instance of ProcessorSupplier
        +
        Returns:
        +
        itself
        +
        Throws:
        +
        TopologyException - if the processor of state is already registered
        +
        +
        +
      • +
      • +
        +

        build

        +
        public Topology build()
        +
        Returns the Topology that represents the specified processing logic. + Note that using this method means no optimizations are performed.
        +
        +
        Returns:
        +
        the Topology that represents the specified processing logic
        +
        +
        +
      • +
      • +
        +

        build

        +
        public Topology build(Properties props)
        +
        Returns the Topology that represents the specified processing logic and accepts + a Properties instance used to indicate whether to optimize topology or not.
        +
        +
        Parameters:
        +
        props - the Properties used for building possibly optimized topology
        +
        Returns:
        +
        the Topology that represents the specified processing logic
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/StreamsConfig.InternalConfig.html b/static/41/javadoc/org/apache/kafka/streams/StreamsConfig.InternalConfig.html new file mode 100644 index 000000000..753eb5ba5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/StreamsConfig.InternalConfig.html @@ -0,0 +1,400 @@ + + + + +StreamsConfig.InternalConfig (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StreamsConfig.InternalConfig

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.StreamsConfig.InternalConfig
    +
    +
    +
    +
    Enclosing class:
    +
    StreamsConfig
    +
    +
    +
    public static class StreamsConfig.InternalConfig +extends Object
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        INTERNAL_TASK_ASSIGNOR_CLASS

        +
        public static final String INTERNAL_TASK_ASSIGNOR_CLASS
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        REFERENCE_CONTAINER_PARTITION_ASSIGNOR

        +
        public static final String REFERENCE_CONTAINER_PARTITION_ASSIGNOR
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        ASSIGNMENT_LISTENER

        +
        public static final String ASSIGNMENT_LISTENER
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        INTERNAL_CONSUMER_WRAPPER

        +
        public static final String INTERNAL_CONSUMER_WRAPPER
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        EMIT_INTERVAL_MS_KSTREAMS_OUTER_JOIN_SPURIOUS_RESULTS_FIX

        +
        public static final String EMIT_INTERVAL_MS_KSTREAMS_OUTER_JOIN_SPURIOUS_RESULTS_FIX
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        EMIT_INTERVAL_MS_KSTREAMS_WINDOWED_AGGREGATION

        +
        public static final String EMIT_INTERVAL_MS_KSTREAMS_WINDOWED_AGGREGATION
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED

        +
        public static final String IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        TOPIC_PREFIX_ALTERNATIVE

        +
        public static final String TOPIC_PREFIX_ALTERNATIVE
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        STATE_UPDATER_ENABLED

        +
        public static final String STATE_UPDATER_ENABLED
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        PROCESSING_THREADS_ENABLED

        +
        public static final String PROCESSING_THREADS_ENABLED
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InternalConfig

        +
        public InternalConfig()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        stateUpdaterEnabled

        +
        public static boolean stateUpdaterEnabled(Map<String,Object> configs)
        +
        +
      • +
      • +
        +

        processingThreadsEnabled

        +
        public static boolean processingThreadsEnabled(Map<String,Object> configs)
        +
        +
      • +
      • +
        +

        getBoolean

        +
        public static boolean getBoolean(Map<String,Object> configs, + String key, + boolean defaultValue)
        +
        +
      • +
      • +
        +

        getLong

        +
        public static long getLong(Map<String,Object> configs, + String key, + long defaultValue)
        +
        +
      • +
      • +
        +

        getString

        +
        public static String getString(Map<String,Object> configs, + String key, + String defaultValue)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/StreamsConfig.html b/static/41/javadoc/org/apache/kafka/streams/StreamsConfig.html new file mode 100644 index 000000000..2bf667abe --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/StreamsConfig.html @@ -0,0 +1,3162 @@ + + + + +StreamsConfig (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StreamsConfig

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.AbstractConfig +
    org.apache.kafka.streams.StreamsConfig
    +
    +
    +
    +
    +
    public class StreamsConfig +extends AbstractConfig
    +
    Configuration for a KafkaStreams instance. + Can also be used to configure the Kafka Streams internal KafkaConsumer, KafkaProducer and Admin. + To avoid consumer/producer/admin property conflicts, you should prefix those properties using + consumerPrefix(String), producerPrefix(String) and adminClientPrefix(String), respectively. +

    + Example: +

    
    + // potentially wrong: sets "metadata.max.age.ms" to 1 minute for producer AND consumer
    + Properties streamsProperties = new Properties();
    + streamsProperties.put(ConsumerConfig.METADATA_MAX_AGE_CONFIG, 60000);
    + // or
    + streamsProperties.put(ProducerConfig.METADATA_MAX_AGE_CONFIG, 60000);
    +
    + // suggested:
    + Properties streamsProperties = new Properties();
    + // sets "metadata.max.age.ms" to 1 minute for consumer only
    + streamsProperties.put(StreamsConfig.consumerPrefix(ConsumerConfig.METADATA_MAX_AGE_CONFIG), 60000);
    + // sets "metadata.max.age.ms" to 1 minute for producer only
    + streamsProperties.put(StreamsConfig.producerPrefix(ProducerConfig.METADATA_MAX_AGE_CONFIG), 60000);
    +
    + StreamsConfig streamsConfig = new StreamsConfig(streamsProperties);
    + 
    + + This instance can also be used to pass in custom configurations to different modules (e.g. passing a special config in your customized serde class). + The consumer/producer/admin prefix can also be used to distinguish these custom config values passed to different clients with the same config name. + * Example: +
    
    + Properties streamsProperties = new Properties();
    + // sets "my.custom.config" to "foo" for consumer only
    + streamsProperties.put(StreamsConfig.consumerPrefix("my.custom.config"), "foo");
    + // sets "my.custom.config" to "bar" for producer only
    + streamsProperties.put(StreamsConfig.producerPrefix("my.custom.config"), "bar");
    + // sets "my.custom.config2" to "boom" for all clients universally
    + streamsProperties.put("my.custom.config2", "boom");
    +
    + // as a result, inside producer's serde class configure(..) function,
    + // users can now read both key-value pairs "my.custom.config" -> "foo"
    + // and "my.custom.config2" -> "boom" from the config map
    + StreamsConfig streamsConfig = new StreamsConfig(streamsProperties);
    + 
    + + + When increasing ProducerConfig.MAX_BLOCK_MS_CONFIG to be more resilient to non-available brokers you should also + increase ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG using the following guidance: +
    +     max.poll.interval.ms > max.block.ms
    + 
    + + + Kafka Streams requires at least the following properties to be set: + + + By default, Kafka Streams does not allow users to overwrite the following properties (Streams setting shown in parentheses): +
      +
    • "group.id" (<application.id>) - Streams client will always use the application ID a consumer group ID
    • +
    • "enable.auto.commit" (false) - Streams client will always disable/turn off auto committing
    • +
    • "partition.assignment.strategy" (StreamsPartitionAssignor) - Streams client will always use its own partition assignor
    • +
    + + If "processing.guarantee" is set to "exactly_once_v2", + Kafka Streams does not allow users to overwrite the following properties (Streams setting shown in parentheses): +
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        DUMMY_THREAD_INDEX

        +
        @Deprecated +public static final int DUMMY_THREAD_INDEX
        +
        Deprecated.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MAX_TASK_IDLE_MS_DISABLED

        +
        public static final long MAX_TASK_IDLE_MS_DISABLED
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MAX_RACK_AWARE_ASSIGNMENT_TAG_LIST_SIZE

        +
        public static final int MAX_RACK_AWARE_ASSIGNMENT_TAG_LIST_SIZE
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MAX_RACK_AWARE_ASSIGNMENT_TAG_KEY_LENGTH

        +
        public static final int MAX_RACK_AWARE_ASSIGNMENT_TAG_KEY_LENGTH
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MAX_RACK_AWARE_ASSIGNMENT_TAG_VALUE_LENGTH

        +
        public static final int MAX_RACK_AWARE_ASSIGNMENT_TAG_VALUE_LENGTH
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        TOPIC_PREFIX

        +
        public static final String TOPIC_PREFIX
        +
        Prefix used to provide default topic configs to be applied when creating internal topics. + These should be valid properties from TopicConfig. + It is recommended to use topicPrefix(String).
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        CONSUMER_PREFIX

        +
        public static final String CONSUMER_PREFIX
        +
        Prefix used to isolate consumer configs from other client configs. + It is recommended to use consumerPrefix(String) to add this prefix to consumer + properties.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MAIN_CONSUMER_PREFIX

        +
        public static final String MAIN_CONSUMER_PREFIX
        +
        Prefix used to override consumer configs for the main consumer client from + the general consumer client configs. The override precedence is the following (from highest to lowest precedence): + 1. main.consumer.[config-name] + 2. consumer.[config-name] + 3. [config-name]
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        RESTORE_CONSUMER_PREFIX

        +
        public static final String RESTORE_CONSUMER_PREFIX
        +
        Prefix used to override consumer configs for the restore consumer client from + the general consumer client configs. The override precedence is the following (from highest to lowest precedence): + 1. restore.consumer.[config-name] + 2. consumer.[config-name] + 3. [config-name]
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        GLOBAL_CONSUMER_PREFIX

        +
        public static final String GLOBAL_CONSUMER_PREFIX
        +
        Prefix used to override consumer configs for the global consumer client from + the general consumer client configs. The override precedence is the following (from highest to lowest precedence): + 1. global.consumer.[config-name] + 2. consumer.[config-name] + 3. [config-name]
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        PRODUCER_PREFIX

        +
        public static final String PRODUCER_PREFIX
        +
        Prefix used to isolate producer configs from other client configs. + It is recommended to use producerPrefix(String) to add this prefix to producer + properties.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        ADMIN_CLIENT_PREFIX

        +
        public static final String ADMIN_CLIENT_PREFIX
        +
        Prefix used to isolate admin configs from other client configs. + It is recommended to use adminClientPrefix(String) to add this prefix to admin + client properties.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        CLIENT_TAG_PREFIX

        +
        public static final String CLIENT_TAG_PREFIX
        +
        Prefix used to add arbitrary tags to a Kafka Stream's instance as key-value pairs. + Example: + client.tag.zone=zone1 + client.tag.cluster=cluster1
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        NO_OPTIMIZATION

        +
        public static final String NO_OPTIMIZATION
        +
        Config value for parameter "topology.optimization" for disabling topology optimization
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        OPTIMIZE

        +
        public static final String OPTIMIZE
        +
        Config value for parameter "topology.optimization" for enabling topology optimization
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        REUSE_KTABLE_SOURCE_TOPICS

        +
        public static final String REUSE_KTABLE_SOURCE_TOPICS
        +
        Config value for parameter "topology.optimization" + for enabling the specific optimization that reuses source topic as changelog topic + for KTables.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MERGE_REPARTITION_TOPICS

        +
        public static final String MERGE_REPARTITION_TOPICS
        +
        Config value for parameter "topology.optimization" + for enabling the specific optimization that merges duplicated repartition topics.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SINGLE_STORE_SELF_JOIN

        +
        public static final String SINGLE_STORE_SELF_JOIN
        +
        Config value for parameter "topology.optimization" + for enabling the optimization that optimizes inner stream-stream joins into self-joins when + both arguments are the same stream.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_0100

        +
        public static final String UPGRADE_FROM_0100
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 0.10.0.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_0101

        +
        public static final String UPGRADE_FROM_0101
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 0.10.1.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_0102

        +
        public static final String UPGRADE_FROM_0102
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 0.10.2.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_0110

        +
        public static final String UPGRADE_FROM_0110
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 0.11.0.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_10

        +
        public static final String UPGRADE_FROM_10
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 1.0.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_11

        +
        public static final String UPGRADE_FROM_11
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 1.1.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_20

        +
        public static final String UPGRADE_FROM_20
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 2.0.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_21

        +
        public static final String UPGRADE_FROM_21
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 2.1.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_22

        +
        public static final String UPGRADE_FROM_22
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 2.2.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_23

        +
        public static final String UPGRADE_FROM_23
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 2.3.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_24

        +
        public static final String UPGRADE_FROM_24
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 2.4.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_25

        +
        public static final String UPGRADE_FROM_25
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 2.5.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_26

        +
        public static final String UPGRADE_FROM_26
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 2.6.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_27

        +
        public static final String UPGRADE_FROM_27
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 2.7.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_28

        +
        public static final String UPGRADE_FROM_28
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 2.8.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_30

        +
        public static final String UPGRADE_FROM_30
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 3.0.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_31

        +
        public static final String UPGRADE_FROM_31
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 3.1.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_32

        +
        public static final String UPGRADE_FROM_32
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 3.2.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_33

        +
        public static final String UPGRADE_FROM_33
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 3.3.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_34

        +
        public static final String UPGRADE_FROM_34
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 3.4.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_35

        +
        public static final String UPGRADE_FROM_35
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 3.5.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_36

        +
        public static final String UPGRADE_FROM_36
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 3.6.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_37

        +
        public static final String UPGRADE_FROM_37
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 3.7.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_38

        +
        public static final String UPGRADE_FROM_38
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 3.8.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_39

        +
        public static final String UPGRADE_FROM_39
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 3.9.x.
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_40

        +
        public static final String UPGRADE_FROM_40
        +
        Config value for parameter "upgrade.from" for upgrading an application from version 4.0.x.
        +
        +
      • +
      • +
        +

        AT_LEAST_ONCE

        +
        public static final String AT_LEAST_ONCE
        +
        Config value for parameter "processing.guarantee" for at-least-once processing guarantees.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        EXACTLY_ONCE_V2

        +
        public static final String EXACTLY_ONCE_V2
        +
        Config value for parameter "processing.guarantee" for exactly-once processing guarantees. + +

        Enabling exactly-once-v2 requires broker version 2.5 or higher.

        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        RACK_AWARE_ASSIGNMENT_STRATEGY_NONE

        +
        public static final String RACK_AWARE_ASSIGNMENT_STRATEGY_NONE
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC

        +
        public static final String RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY

        +
        public static final String RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        METRICS_LATEST

        +
        public static final String METRICS_LATEST
        +
        Config value for parameter "built.in.metrics.version" for the latest built-in metrics version.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        ACCEPTABLE_RECOVERY_LAG_CONFIG

        +
        public static final String ACCEPTABLE_RECOVERY_LAG_CONFIG
        +
        acceptable.recovery.lag
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        APPLICATION_ID_CONFIG

        +
        public static final String APPLICATION_ID_CONFIG
        +
        application.id
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        APPLICATION_SERVER_CONFIG

        +
        public static final String APPLICATION_SERVER_CONFIG
        +
        application.server
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        BOOTSTRAP_SERVERS_CONFIG

        +
        public static final String BOOTSTRAP_SERVERS_CONFIG
        +
        bootstrap.servers
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        BUFFERED_RECORDS_PER_PARTITION_CONFIG

        +
        public static final String BUFFERED_RECORDS_PER_PARTITION_CONFIG
        +
        buffered.records.per.partition
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        BUFFERED_RECORDS_PER_PARTITION_DOC

        +
        @Deprecated +public static final String BUFFERED_RECORDS_PER_PARTITION_DOC
        +
        Deprecated.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        BUILT_IN_METRICS_VERSION_CONFIG

        +
        public static final String BUILT_IN_METRICS_VERSION_CONFIG
        +
        built.in.metrics.version
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        CACHE_MAX_BYTES_BUFFERING_CONFIG

        +
        @Deprecated +public static final String CACHE_MAX_BYTES_BUFFERING_CONFIG
        +
        Deprecated. +
        Since 3.4. Use "statestore.cache.max.bytes" instead.
        +
        +
        cache.max.bytes.buffering
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        CACHE_MAX_BYTES_BUFFERING_DOC

        +
        @Deprecated +public static final String CACHE_MAX_BYTES_BUFFERING_DOC
        +
        Deprecated.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        CLIENT_ID_CONFIG

        +
        public static final String CLIENT_ID_CONFIG
        +
        client.id
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        COMMIT_INTERVAL_MS_CONFIG

        +
        public static final String COMMIT_INTERVAL_MS_CONFIG
        +
        commit.interval.ms
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        CONNECTIONS_MAX_IDLE_MS_CONFIG

        +
        public static final String CONNECTIONS_MAX_IDLE_MS_CONFIG
        +
        connections.max.idle.ms
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_CLIENT_SUPPLIER_CONFIG

        +
        public static final String DEFAULT_CLIENT_SUPPLIER_CONFIG
        +
        default.client.supplier
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_CLIENT_SUPPLIER_DOC

        +
        @Deprecated +public static final String DEFAULT_CLIENT_SUPPLIER_DOC
        +
        Deprecated.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG

        +
        @Deprecated +public static final String DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG
        +
        Deprecated. + +
        +
        default.deserialization.exception.handler
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_DOC

        +
        @Deprecated +public static final String DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_DOC
        +
        Deprecated.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG

        +
        public static final String DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG
        +
        deserialization.exception.handler
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_PRODUCTION_EXCEPTION_HANDLER_CLASS_CONFIG

        +
        @Deprecated +public static final String DEFAULT_PRODUCTION_EXCEPTION_HANDLER_CLASS_CONFIG
        +
        Deprecated. + +
        +
        default.production.exception.handler
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        PRODUCTION_EXCEPTION_HANDLER_CLASS_CONFIG

        +
        public static final String PRODUCTION_EXCEPTION_HANDLER_CLASS_CONFIG
        +
        production.exception.handler
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_DSL_STORE_CONFIG

        +
        @Deprecated +public static final String DEFAULT_DSL_STORE_CONFIG
        +
        Deprecated. +
        Since 3.7. Use DSL_STORE_SUPPLIERS_CLASS_CONFIG instead.
        +
        +
        default.dsl.store
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_DSL_STORE_DOC

        +
        @Deprecated +public static final String DEFAULT_DSL_STORE_DOC
        +
        Deprecated.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        ROCKS_DB

        +
        @Deprecated +public static final String ROCKS_DB
        +
        Deprecated.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        IN_MEMORY

        +
        @Deprecated +public static final String IN_MEMORY
        +
        Deprecated.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_DSL_STORE

        +
        @Deprecated +public static final String DEFAULT_DSL_STORE
        +
        Deprecated.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DSL_STORE_SUPPLIERS_CLASS_CONFIG

        +
        public static final String DSL_STORE_SUPPLIERS_CLASS_CONFIG
        +
        dsl.store.suppliers.class
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_KEY_SERDE_CLASS_CONFIG

        +
        public static final String DEFAULT_KEY_SERDE_CLASS_CONFIG
        +
        default key.serde
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_VALUE_SERDE_CLASS_CONFIG

        +
        public static final String DEFAULT_VALUE_SERDE_CLASS_CONFIG
        +
        default.value.serde
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG

        +
        public static final String DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG
        +
        default.timestamp.extractor
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_DOC

        +
        @Deprecated +public static final String DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_DOC
        +
        Deprecated.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        ENABLE_METRICS_PUSH_CONFIG

        +
        public static final String ENABLE_METRICS_PUSH_CONFIG
        +
        enable.metrics.push
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        ENABLE_METRICS_PUSH_DOC

        +
        @Deprecated +public static final String ENABLE_METRICS_PUSH_DOC
        +
        Deprecated.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        ENSURE_EXPLICIT_INTERNAL_RESOURCE_NAMING_CONFIG

        +
        public static final String ENSURE_EXPLICIT_INTERNAL_RESOURCE_NAMING_CONFIG
        +
        ensure.explicit.internal.resource.naming
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        GROUP_PROTOCOL_CONFIG

        +
        public static final String GROUP_PROTOCOL_CONFIG
        +
        group.protocol
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_GROUP_PROTOCOL

        +
        public static final String DEFAULT_GROUP_PROTOCOL
        +
        +
      • +
      • +
        +

        LOG_SUMMARY_INTERVAL_MS_CONFIG

        +
        public static final String LOG_SUMMARY_INTERVAL_MS_CONFIG
        +
        log.summary.interval.ms
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MAX_TASK_IDLE_MS_CONFIG

        +
        public static final String MAX_TASK_IDLE_MS_CONFIG
        +
        max.task.idle.ms
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MAX_TASK_IDLE_MS_DOC

        +
        @Deprecated +public static final String MAX_TASK_IDLE_MS_DOC
        +
        Deprecated.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        MAX_WARMUP_REPLICAS_CONFIG

        +
        public static final String MAX_WARMUP_REPLICAS_CONFIG
        +
        max.warmup.replicas
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        METADATA_MAX_AGE_CONFIG

        +
        public static final String METADATA_MAX_AGE_CONFIG
        +
        metadata.max.age.ms
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        METRICS_NUM_SAMPLES_CONFIG

        +
        public static final String METRICS_NUM_SAMPLES_CONFIG
        +
        metrics.num.samples
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        METRICS_RECORDING_LEVEL_CONFIG

        +
        public static final String METRICS_RECORDING_LEVEL_CONFIG
        +
        metrics.record.level
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        METRIC_REPORTER_CLASSES_CONFIG

        +
        public static final String METRIC_REPORTER_CLASSES_CONFIG
        +
        metric.reporters
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        METRICS_SAMPLE_WINDOW_MS_CONFIG

        +
        public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG
        +
        metrics.sample.window.ms
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        NUM_STANDBY_REPLICAS_CONFIG

        +
        public static final String NUM_STANDBY_REPLICAS_CONFIG
        +
        num.standby.replicas
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        NUM_STREAM_THREADS_CONFIG

        +
        public static final String NUM_STREAM_THREADS_CONFIG
        +
        num.stream.threads
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        POLL_MS_CONFIG

        +
        public static final String POLL_MS_CONFIG
        +
        poll.ms
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        PROBING_REBALANCE_INTERVAL_MS_CONFIG

        +
        public static final String PROBING_REBALANCE_INTERVAL_MS_CONFIG
        +
        probing.rebalance.interval.ms
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        PROCESSING_EXCEPTION_HANDLER_CLASS_CONFIG

        +
        public static final String PROCESSING_EXCEPTION_HANDLER_CLASS_CONFIG
        +
        processing.exception.handler
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        PROCESSING_EXCEPTION_HANDLER_CLASS_DOC

        +
        @Deprecated +public static final String PROCESSING_EXCEPTION_HANDLER_CLASS_DOC
        +
        Deprecated.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        PROCESSING_GUARANTEE_CONFIG

        +
        public static final String PROCESSING_GUARANTEE_CONFIG
        +
        processing.guarantee
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        PROCESSOR_WRAPPER_CLASS_CONFIG

        +
        public static final String PROCESSOR_WRAPPER_CLASS_CONFIG
        +
        processor.wrapper.class
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        REPARTITION_PURGE_INTERVAL_MS_CONFIG

        +
        public static final String REPARTITION_PURGE_INTERVAL_MS_CONFIG
        +
        repartition.purge.interval.ms
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        RECEIVE_BUFFER_CONFIG

        +
        public static final String RECEIVE_BUFFER_CONFIG
        +
        receive.buffer.bytes
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG

        +
        public static final String RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG
        +
        rack.aware.assignment.non_overlap_cost
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_DOC

        +
        @Deprecated +public static final String RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_DOC
        +
        Deprecated.
        +
        +
      • +
      • +
        +

        RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG

        +
        public static final String RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG
        +
        rack.aware.assignment.strategy
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        RACK_AWARE_ASSIGNMENT_STRATEGY_DOC

        +
        @Deprecated +public static final String RACK_AWARE_ASSIGNMENT_STRATEGY_DOC
        +
        Deprecated.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        RACK_AWARE_ASSIGNMENT_TAGS_CONFIG

        +
        public static final String RACK_AWARE_ASSIGNMENT_TAGS_CONFIG
        +
        rack.aware.assignment.tags
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG

        +
        public static final String RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG
        +
        rack.aware.assignment.traffic_cost
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_DOC

        +
        @Deprecated +public static final String RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_DOC
        +
        Deprecated.
        +
        +
      • +
      • +
        +

        RECONNECT_BACKOFF_MS_CONFIG

        +
        public static final String RECONNECT_BACKOFF_MS_CONFIG
        +
        reconnect.backoff.ms
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        RECONNECT_BACKOFF_MAX_MS_CONFIG

        +
        public static final String RECONNECT_BACKOFF_MAX_MS_CONFIG
        +
        reconnect.backoff.max
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        REPLICATION_FACTOR_CONFIG

        +
        public static final String REPLICATION_FACTOR_CONFIG
        +
        replication.factor
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        REQUEST_TIMEOUT_MS_CONFIG

        +
        public static final String REQUEST_TIMEOUT_MS_CONFIG
        +
        request.timeout.ms
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        RETRY_BACKOFF_MS_CONFIG

        +
        public static final String RETRY_BACKOFF_MS_CONFIG
        +
        retry.backoff.ms
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        ROCKSDB_CONFIG_SETTER_CLASS_CONFIG

        +
        public static final String ROCKSDB_CONFIG_SETTER_CLASS_CONFIG
        +
        rocksdb.config.setter
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SECURITY_PROTOCOL_CONFIG

        +
        public static final String SECURITY_PROTOCOL_CONFIG
        +
        security.protocol
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        SEND_BUFFER_CONFIG

        +
        public static final String SEND_BUFFER_CONFIG
        +
        send.buffer.bytes
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        STATE_CLEANUP_DELAY_MS_CONFIG

        +
        public static final String STATE_CLEANUP_DELAY_MS_CONFIG
        +
        state.cleanup.delay
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        STATE_DIR_CONFIG

        +
        public static final String STATE_DIR_CONFIG
        +
        state.dir
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        STATESTORE_CACHE_MAX_BYTES_CONFIG

        +
        public static final String STATESTORE_CACHE_MAX_BYTES_CONFIG
        +
        statestore.cache.max.bytes
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        STATESTORE_CACHE_MAX_BYTES_DOC

        +
        @Deprecated +public static final String STATESTORE_CACHE_MAX_BYTES_DOC
        +
        Deprecated.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        TASK_ASSIGNOR_CLASS_CONFIG

        +
        public static final String TASK_ASSIGNOR_CLASS_CONFIG
        +
        task.assignor.class
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        TASK_TIMEOUT_MS_CONFIG

        +
        public static final String TASK_TIMEOUT_MS_CONFIG
        +
        task.timeout.ms
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        TASK_TIMEOUT_MS_DOC

        +
        @Deprecated +public static final String TASK_TIMEOUT_MS_DOC
        +
        Deprecated.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        UPGRADE_FROM_CONFIG

        +
        public static final String UPGRADE_FROM_CONFIG
        +
        upgrade.from
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        TOPOLOGY_OPTIMIZATION_CONFIG

        +
        public static final String TOPOLOGY_OPTIMIZATION_CONFIG
        +
        topology.optimization
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        WINDOWED_INNER_CLASS_SERDE

        +
        @Deprecated +public static final String WINDOWED_INNER_CLASS_SERDE
        + +
        windowed.inner.class.serde
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        WINDOW_SIZE_MS_CONFIG

        +
        @Deprecated +public static final String WINDOW_SIZE_MS_CONFIG
        + +
        window.size.ms
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG

        +
        public static final String WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG
        +
        windowstore.changelog.additional.retention.ms
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StreamsConfig

        +
        public StreamsConfig(Map<?,?> props)
        +
        Create a new StreamsConfig using the given properties.
        +
        +
        Parameters:
        +
        props - properties that specify Kafka Streams and internal consumer/producer configuration
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        consumerPrefix

        +
        public static String consumerPrefix(String consumerProp)
        +
        Prefix a property with CONSUMER_PREFIX. This is used to isolate consumer configs + from other client configs.
        +
        +
        Parameters:
        +
        consumerProp - the consumer property to be masked
        +
        Returns:
        +
        CONSUMER_PREFIX + consumerProp
        +
        +
        +
      • +
      • +
        +

        mainConsumerPrefix

        +
        public static String mainConsumerPrefix(String consumerProp)
        +
        Prefix a property with MAIN_CONSUMER_PREFIX. This is used to isolate main consumer configs + from other client configs.
        +
        +
        Parameters:
        +
        consumerProp - the consumer property to be masked
        +
        Returns:
        +
        MAIN_CONSUMER_PREFIX + consumerProp
        +
        +
        +
      • +
      • +
        +

        restoreConsumerPrefix

        +
        public static String restoreConsumerPrefix(String consumerProp)
        +
        Prefix a property with RESTORE_CONSUMER_PREFIX. This is used to isolate restore consumer configs + from other client configs.
        +
        +
        Parameters:
        +
        consumerProp - the consumer property to be masked
        +
        Returns:
        +
        RESTORE_CONSUMER_PREFIX + consumerProp
        +
        +
        +
      • +
      • +
        +

        clientTagPrefix

        +
        public static String clientTagPrefix(String clientTagKey)
        +
        Prefix a client tag key with CLIENT_TAG_PREFIX.
        +
        +
        Parameters:
        +
        clientTagKey - client tag key
        +
        Returns:
        +
        CLIENT_TAG_PREFIX + clientTagKey
        +
        +
        +
      • +
      • +
        +

        globalConsumerPrefix

        +
        public static String globalConsumerPrefix(String consumerProp)
        +
        Prefix a property with GLOBAL_CONSUMER_PREFIX. This is used to isolate global consumer configs + from other client configs.
        +
        +
        Parameters:
        +
        consumerProp - the consumer property to be masked
        +
        Returns:
        +
        GLOBAL_CONSUMER_PREFIX + consumerProp
        +
        +
        +
      • +
      • +
        +

        producerPrefix

        +
        public static String producerPrefix(String producerProp)
        +
        Prefix a property with PRODUCER_PREFIX. This is used to isolate producer configs + from other client configs.
        +
        +
        Parameters:
        +
        producerProp - the producer property to be masked
        +
        Returns:
        +
        PRODUCER_PREFIX + producerProp
        +
        +
        +
      • +
      • +
        +

        adminClientPrefix

        +
        public static String adminClientPrefix(String adminClientProp)
        +
        Prefix a property with ADMIN_CLIENT_PREFIX. This is used to isolate admin configs + from other client configs.
        +
        +
        Parameters:
        +
        adminClientProp - the admin client property to be masked
        +
        Returns:
        +
        ADMIN_CLIENT_PREFIX + adminClientProp
        +
        +
        +
      • +
      • +
        +

        topicPrefix

        +
        public static String topicPrefix(String topicProp)
        +
        Prefix a property with TOPIC_PREFIX + used to provide default topic configs to be applied when creating internal topics.
        +
        +
        Parameters:
        +
        topicProp - the topic property to be masked
        +
        Returns:
        +
        TOPIC_PREFIX + topicProp
        +
        +
        +
      • +
      • +
        +

        configDef

        +
        public static ConfigDef configDef()
        +
        Return a copy of the config definition.
        +
        +
        Returns:
        +
        a copy of the config definition
        +
        +
        +
      • +
      • +
        +

        getMainConsumerConfigs

        +
        public Map<String,Object> getMainConsumerConfigs(String groupId, + String clientId, + int threadIdx)
        +
        Get the configs to the main consumer. + Properties using the prefix MAIN_CONSUMER_PREFIX will be used in favor over + the properties prefixed with CONSUMER_PREFIX and the non-prefixed versions + (read the override precedence ordering in MAIN_CONSUMER_PREFIX) + except in the case of ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG where we always use the non-prefixed + version as we only support reading/writing from/to the same Kafka Cluster. + If not specified by MAIN_CONSUMER_PREFIX, main consumer will share the general consumer configs + prefixed by CONSUMER_PREFIX.
        +
        +
        Parameters:
        +
        groupId - consumer groupId
        +
        clientId - clientId
        +
        threadIdx - stream thread index
        +
        Returns:
        +
        Map of the consumer configuration.
        +
        +
        +
      • +
      • +
        +

        getRestoreConsumerConfigs

        +
        public Map<String,Object> getRestoreConsumerConfigs(String clientId)
        +
        Get the configs for the restore-consumer. + Properties using the prefix RESTORE_CONSUMER_PREFIX will be used in favor over + the properties prefixed with CONSUMER_PREFIX and the non-prefixed versions + (read the override precedence ordering in RESTORE_CONSUMER_PREFIX) + except in the case of ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG where we always use the non-prefixed + version as we only support reading/writing from/to the same Kafka Cluster. + If not specified by RESTORE_CONSUMER_PREFIX, restore consumer will share the general consumer configs + prefixed by CONSUMER_PREFIX.
        +
        +
        Parameters:
        +
        clientId - clientId
        +
        Returns:
        +
        Map of the restore consumer configuration.
        +
        +
        +
      • +
      • +
        +

        getGlobalConsumerConfigs

        +
        public Map<String,Object> getGlobalConsumerConfigs(String clientId)
        +
        Get the configs for the global consumer. + Properties using the prefix GLOBAL_CONSUMER_PREFIX will be used in favor over + the properties prefixed with CONSUMER_PREFIX and the non-prefixed versions + (read the override precedence ordering in GLOBAL_CONSUMER_PREFIX) + except in the case of ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG where we always use the non-prefixed + version as we only support reading/writing from/to the same Kafka Cluster. + If not specified by GLOBAL_CONSUMER_PREFIX, global consumer will share the general consumer configs + prefixed by CONSUMER_PREFIX.
        +
        +
        Parameters:
        +
        clientId - clientId
        +
        Returns:
        +
        Map of the global consumer configuration.
        +
        +
        +
      • +
      • +
        +

        getProducerConfigs

        +
        public Map<String,Object> getProducerConfigs(String clientId)
        +
        Get the configs for the producer. + Properties using the prefix PRODUCER_PREFIX will be used in favor over their non-prefixed versions + except in the case of ProducerConfig.BOOTSTRAP_SERVERS_CONFIG where we always use the non-prefixed + version as we only support reading/writing from/to the same Kafka Cluster.
        +
        +
        Parameters:
        +
        clientId - clientId
        +
        Returns:
        +
        Map of the producer configuration.
        +
        +
        +
      • +
      • +
        +

        getAdminConfigs

        +
        public Map<String,Object> getAdminConfigs(String clientId)
        +
        Get the configs for the admin client.
        +
        +
        Parameters:
        +
        clientId - clientId
        +
        Returns:
        +
        Map of the admin client configuration.
        +
        +
        +
      • +
      • +
        +

        getClientTags

        +
        public Map<String,String> getClientTags()
        +
        Get the configured client tags set with CLIENT_TAG_PREFIX prefix.
        +
        +
        Returns:
        +
        Map of the client tags.
        +
        +
        +
      • +
      • +
        +

        verifyTopologyOptimizationConfigs

        +
        public static Set<String> verifyTopologyOptimizationConfigs(String config)
        +
        +
      • +
      • +
        +

        getKafkaClientSupplier

        +
        public KafkaClientSupplier getKafkaClientSupplier()
        +
        Return configured KafkaClientSupplier
        +
        +
        Returns:
        +
        Configured KafkaClientSupplier
        +
        +
        +
      • +
      • +
        +

        defaultKeySerde

        +
        public Serde<?> defaultKeySerde()
        +
        Return an configured instance of key Serde + class.
        +
        +
        Returns:
        +
        a configured instance of key Serde class
        +
        +
        +
      • +
      • +
        +

        defaultValueSerde

        +
        public Serde<?> defaultValueSerde()
        +
        Return an configured instance of value + Serde class.
        +
        +
        Returns:
        +
        an configured instance of value Serde class
        +
        +
        +
      • +
      • +
        +

        defaultTimestampExtractor

        +
        public TimestampExtractor defaultTimestampExtractor()
        +
        +
      • +
      • +
        +

        deserializationExceptionHandler

        +
        public DeserializationExceptionHandler deserializationExceptionHandler()
        +
        +
      • +
      • +
        +

        defaultDeserializationExceptionHandler

        +
        @Deprecated +public DeserializationExceptionHandler defaultDeserializationExceptionHandler()
        +
        Deprecated. +
        Since 4.0. Use deserializationExceptionHandler() instead.
        +
        +
        +
      • +
      • +
        +

        productionExceptionHandler

        +
        public ProductionExceptionHandler productionExceptionHandler()
        +
        +
      • +
      • +
        +

        defaultProductionExceptionHandler

        +
        @Deprecated +public ProductionExceptionHandler defaultProductionExceptionHandler()
        +
        Deprecated. +
        Since 4.0. Use productionExceptionHandler() instead.
        +
        +
        +
      • +
      • +
        +

        processingExceptionHandler

        +
        public ProcessingExceptionHandler processingExceptionHandler()
        +
        +
      • +
      • +
        +

        main

        +
        public static void main(String[] args)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/StreamsMetadata.html b/static/41/javadoc/org/apache/kafka/streams/StreamsMetadata.html new file mode 100644 index 000000000..8176be24f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/StreamsMetadata.html @@ -0,0 +1,277 @@ + + + + +StreamsMetadata (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface StreamsMetadata

    +
    +
    +
    +
    public interface StreamsMetadata
    +
    Metadata of a Kafka Streams client.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        hostInfo

        +
        HostInfo hostInfo()
        +
        The value of StreamsConfig.APPLICATION_SERVER_CONFIG configured for the Streams + client.
        +
        +
        Returns:
        +
        HostInfo corresponding to the Streams client
        +
        +
        +
      • +
      • +
        +

        stateStoreNames

        +
        Set<String> stateStoreNames()
        +
        Names of the state stores assigned to active tasks of the Streams client.
        +
        +
        Returns:
        +
        names of the state stores assigned to active tasks
        +
        +
        +
      • +
      • +
        +

        topicPartitions

        +
        Set<TopicPartition> topicPartitions()
        +
        Source topic partitions of the active tasks of the Streams client.
        +
        +
        Returns:
        +
        source topic partitions of the active tasks
        +
        +
        +
      • +
      • +
        +

        standbyTopicPartitions

        +
        Set<TopicPartition> standbyTopicPartitions()
        +
        Source topic partitions for which the instance acts as standby.
        +
        +
        Returns:
        +
        source topic partitions of the standby tasks
        +
        +
        +
      • +
      • +
        +

        standbyStateStoreNames

        +
        Set<String> standbyStateStoreNames()
        +
        Names of the state stores assigned to standby tasks of the Streams client.
        +
        +
        Returns:
        +
        names of the state stores assigned to standby tasks
        +
        +
        +
      • +
      • +
        +

        host

        +
        String host()
        +
        Host where the Streams client runs. + +

        This method is equivalent to StreamsMetadata.hostInfo().host();

        +
        +
        Returns:
        +
        the host where the Streams client runs
        +
        +
        +
      • +
      • +
        +

        port

        +
        int port()
        +
        Port on which the Streams client listens. + +

        This method is equivalent to StreamsMetadata.hostInfo().port();

        +
        +
        Returns:
        +
        the port on which Streams client listens
        +
        +
        +
      • +
      • +
        +

        equals

        +
        boolean equals(Object o)
        +
        Compares the specified object with this StreamsMetadata. Returns true if and only if the specified object is + also a StreamsMetadata and for both hostInfo() are equal, and stateStoreNames(), topicPartitions(), + standbyStateStoreNames(), and standbyTopicPartitions() contain the same elements.
        +
        +
        Overrides:
        +
        equals in class Object
        +
        Returns:
        +
        true if this object is the same as the obj argument; false otherwise.
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        int hashCode()
        +
        Returns the hash code value for this TaskMetadata. The hash code of a list is defined to be the result of the following calculation: +
        + 
        + Objects.hash(hostInfo(), stateStoreNames(), topicPartitions(), standbyStateStoreNames(), standbyTopicPartitions());
        + 
        + 
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        Returns:
        +
        a hash code value for this object.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/StreamsMetrics.html b/static/41/javadoc/org/apache/kafka/streams/StreamsMetrics.html new file mode 100644 index 000000000..13cf8009e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/StreamsMetrics.html @@ -0,0 +1,334 @@ + + + + +StreamsMetrics (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface StreamsMetrics

    +
    +
    +
    +
    public interface StreamsMetrics
    +
    The Kafka Streams metrics interface for adding metric sensors and collecting metric values.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      addLatencyRateTotalSensor(String scopeName, + String entityName, + String operationName, + Sensor.RecordingLevel recordingLevel, + String... tags)
      +
      +
      Add a latency, rate and total sensor for a specific operation, which will include the following metrics: + + average latency + max latency + invocation rate (num.operations / seconds) + total invocation count + + Whenever a user records this sensor via Sensor.record(double) etc., it will be counted as one invocation + of the operation, and hence the rate / count metrics will be updated accordingly; and the recorded latency value + will be used to update the average / max latency as well.
      +
      + +
      addRateTotalSensor(String scopeName, + String entityName, + String operationName, + Sensor.RecordingLevel recordingLevel, + String... tags)
      +
      +
      Add a rate and a total sensor for a specific operation, which will include the following metrics: + + invocation rate (num.operations / time unit) + total invocation count + + Whenever a user records this sensor via Sensor.record(double) etc., + it will be counted as one invocation of the operation, and hence the rate / count metrics will be updated accordingly.
      +
      + +
      addSensor(String name, + Sensor.RecordingLevel recordingLevel)
      +
      +
      Generic method to create a sensor.
      +
      + +
      addSensor(String name, + Sensor.RecordingLevel recordingLevel, + Sensor... parents)
      +
      +
      Generic method to create a sensor with parent sensors.
      +
      +
      Map<MetricName,? extends Metric>
      + +
      +
      Get read-only handle on global metrics registry.
      +
      +
      void
      + +
      +
      Remove a sensor.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/TaskMetadata.html b/static/41/javadoc/org/apache/kafka/streams/TaskMetadata.html new file mode 100644 index 000000000..48222f1ac --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/TaskMetadata.html @@ -0,0 +1,238 @@ + + + + +TaskMetadata (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface TaskMetadata

    +
    +
    +
    +
    public interface TaskMetadata
    +
    Metadata of a task.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        taskId

        +
        TaskId taskId()
        +
        Task ID of the task.
        +
        +
        Returns:
        +
        task ID consisting of subtopology and partition ID
        +
        +
        +
      • +
      • +
        +

        topicPartitions

        +
        Set<TopicPartition> topicPartitions()
        +
        Source topic partitions of the task.
        +
        +
        Returns:
        +
        source topic partitions
        +
        +
        +
      • +
      • +
        +

        committedOffsets

        +
        Map<TopicPartition,Long> committedOffsets()
        +
        Offsets of the source topic partitions committed so far by the task.
        +
        +
        Returns:
        +
        map from source topic partitions to committed offsets
        +
        +
        +
      • +
      • +
        +

        endOffsets

        +
        Map<TopicPartition,Long> endOffsets()
        +
        End offsets of the source topic partitions of the task.
        +
        +
        Returns:
        +
        map source topic partition to end offsets
        +
        +
        +
      • +
      • +
        +

        timeCurrentIdlingStarted

        +
        Optional<Long> timeCurrentIdlingStarted()
        +
        Time task idling started. If the task is not currently idling it will return empty.
        +
        +
        Returns:
        +
        time when task idling started, empty Optional if the task is currently not idling
        +
        +
        +
      • +
      • +
        +

        equals

        +
        boolean equals(Object o)
        +
        Compares the specified object with this TaskMetadata. Returns true if and only if the specified object is + also a TaskMetadata and both taskId() and topicPartitions() are equal.
        +
        +
        Overrides:
        +
        equals in class Object
        +
        Returns:
        +
        true if this object is the same as the obj argument; false otherwise.
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        int hashCode()
        +
        Returns the hash code value for this TaskMetadata. The hash code of a list is defined to be the result of the following calculation: +
        + 
        + Objects.hash(taskId(), topicPartitions());
        + 
        + 
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        Returns:
        +
        a hash code value for this object.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/TestInputTopic.html b/static/41/javadoc/org/apache/kafka/streams/TestInputTopic.html new file mode 100644 index 000000000..144000b37 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/TestInputTopic.html @@ -0,0 +1,404 @@ + + + + +TestInputTopic (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TestInputTopic<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.TestInputTopic<K,V>
    +
    +
    +
    +
    Type Parameters:
    +
    K - the type of the record key
    +
    V - the type of the record value
    +
    +
    +
    public class TestInputTopic<K,V> +extends Object
    +
    TestInputTopic is used to pipe records to topic in TopologyTestDriver. + To use TestInputTopic create a new instance via + TopologyTestDriver.createInputTopic(String, Serializer, Serializer). + In actual test code, you can pipe new record values, keys and values or list of KeyValue pairs. + If you have multiple source topics, you need to create a TestInputTopic for each. + +

    Processing messages

    +
    
    +     private TestInputTopic<Long, String> inputTopic;
    +     ...
    +     inputTopic = testDriver.createInputTopic(INPUT_TOPIC, longSerializer, stringSerializer);
    +     ...
    +     inputTopic.pipeInput("Hello");
    + 
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
      +
      Advances the internally tracked event time of this input topic.
      +
      +
      void
      +
      pipeInput(K key, + V value)
      +
      +
      Send an input record with the given key and value on the topic and then commit the records.
      +
      +
      void
      +
      pipeInput(K key, + V value, + long timestampMs)
      +
      +
      Send an input record with the given key, value and timestamp on the topic and then commit the records.
      +
      +
      void
      +
      pipeInput(K key, + V value, + Instant timestamp)
      +
      +
      Send an input record with the given key, value and timestamp on the topic and then commit the records.
      +
      +
      void
      + +
      +
      Send an input record with the given record on the topic and then commit the records.
      +
      +
      void
      +
      pipeInput(V value)
      +
      +
      Send an input record with the given value on the topic and then commit the records.
      +
      +
      void
      +
      pipeInput(V value, + Instant timestamp)
      +
      +
      Send an input record with the given value and timestamp on the topic and then commit the records.
      +
      +
      void
      + +
      +
      Send input records with the given KeyValue list on the topic then commit each record individually.
      +
      +
      void
      +
      pipeKeyValueList(List<KeyValue<K,V>> keyValues, + Instant startTimestamp, + Duration advance)
      +
      +
      Send input records with the given KeyValue list on the topic then commit each record individually.
      +
      +
      void
      +
      pipeRecordList(List<? extends TestRecord<K,V>> records)
      +
      +
      Send input records with the given KeyValue list on the topic then commit each record individually.
      +
      +
      void
      +
      pipeValueList(List<V> values)
      +
      +
      Send input records with the given value list on the topic then commit each record individually.
      +
      +
      void
      +
      pipeValueList(List<V> values, + Instant startTimestamp, + Duration advance)
      +
      +
      Send input records with the given value list on the topic then commit each record individually.
      +
      + + +
       
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        advanceTime

        +
        public void advanceTime(Duration advance)
        +
        Advances the internally tracked event time of this input topic. + Each time a record without explicitly defined timestamp is piped, + the current topic event time is used as record timestamp. +

        + Note: advancing the event time on the input topic, does not advance the tracked stream time in + TopologyTestDriver as long as no new input records are piped. + Furthermore, it does not advance the wall-clock time of TopologyTestDriver.

        +
        +
        Parameters:
        +
        advance - the duration of time to advance
        +
        +
        +
      • +
      • +
        +

        pipeInput

        +
        public void pipeInput(TestRecord<K,V> record)
        +
        Send an input record with the given record on the topic and then commit the records. + May auto advance topic time.
        +
        +
        Parameters:
        +
        record - the record to sent
        +
        +
        +
      • +
      • +
        +

        pipeInput

        +
        public void pipeInput(V value)
        +
        Send an input record with the given value on the topic and then commit the records. + May auto advance topic time.
        +
        +
        Parameters:
        +
        value - the record value
        +
        +
        +
      • +
      • +
        +

        pipeInput

        +
        public void pipeInput(K key, + V value)
        +
        Send an input record with the given key and value on the topic and then commit the records. + May auto advance topic time
        +
        +
        Parameters:
        +
        key - the record key
        +
        value - the record value
        +
        +
        +
      • +
      • +
        +

        pipeInput

        +
        public void pipeInput(V value, + Instant timestamp)
        +
        Send an input record with the given value and timestamp on the topic and then commit the records. + Does not auto advance internally tracked time.
        +
        +
        Parameters:
        +
        value - the record value
        +
        timestamp - the record timestamp
        +
        +
        +
      • +
      • +
        +

        pipeInput

        +
        public void pipeInput(K key, + V value, + long timestampMs)
        +
        Send an input record with the given key, value and timestamp on the topic and then commit the records. + Does not auto advance internally tracked time.
        +
        +
        Parameters:
        +
        key - the record key
        +
        value - the record value
        +
        timestampMs - the record timestamp
        +
        +
        +
      • +
      • +
        +

        pipeInput

        +
        public void pipeInput(K key, + V value, + Instant timestamp)
        +
        Send an input record with the given key, value and timestamp on the topic and then commit the records. + Does not auto advance internally tracked time.
        +
        +
        Parameters:
        +
        key - the record key
        +
        value - the record value
        +
        timestamp - the record timestamp
        +
        +
        +
      • +
      • +
        +

        pipeRecordList

        +
        public void pipeRecordList(List<? extends TestRecord<K,V>> records)
        +
        Send input records with the given KeyValue list on the topic then commit each record individually. + The timestamp will be generated based on the constructor provided start time and time will auto advance.
        +
        +
        Parameters:
        +
        records - the list of TestRecord records
        +
        +
        +
      • +
      • +
        +

        pipeKeyValueList

        +
        public void pipeKeyValueList(List<KeyValue<K,V>> keyValues)
        +
        Send input records with the given KeyValue list on the topic then commit each record individually. + The timestamp will be generated based on the constructor provided start time and time will auto advance based on + autoAdvance setting.
        +
        +
        Parameters:
        +
        keyValues - the List of KeyValue records
        +
        +
        +
      • +
      • +
        +

        pipeValueList

        +
        public void pipeValueList(List<V> values)
        +
        Send input records with the given value list on the topic then commit each record individually. + The timestamp will be generated based on the constructor provided start time and time will auto advance based on + autoAdvance setting.
        +
        +
        Parameters:
        +
        values - the List of KeyValue records
        +
        +
        +
      • +
      • +
        +

        pipeKeyValueList

        +
        public void pipeKeyValueList(List<KeyValue<K,V>> keyValues, + Instant startTimestamp, + Duration advance)
        +
        Send input records with the given KeyValue list on the topic then commit each record individually. + Does not auto advance internally tracked time.
        +
        +
        Parameters:
        +
        keyValues - the List of KeyValue records
        +
        startTimestamp - the timestamp for the first generated record
        +
        advance - the time difference between two consecutive generated records
        +
        +
        +
      • +
      • +
        +

        pipeValueList

        +
        public void pipeValueList(List<V> values, + Instant startTimestamp, + Duration advance)
        +
        Send input records with the given value list on the topic then commit each record individually. + The timestamp will be generated based on the constructor provided start time and time will auto advance based on + autoAdvance setting.
        +
        +
        Parameters:
        +
        values - the List of values
        +
        startTimestamp - the timestamp for the first generated record
        +
        advance - the time difference between two consecutive generated records
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/TestOutputTopic.html b/static/41/javadoc/org/apache/kafka/streams/TestOutputTopic.html new file mode 100644 index 000000000..79527e575 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/TestOutputTopic.html @@ -0,0 +1,320 @@ + + + + +TestOutputTopic (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TestOutputTopic<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.TestOutputTopic<K,V>
    +
    +
    +
    +
    Type Parameters:
    +
    K - the type of the record key
    +
    V - the type of the record value
    +
    +
    +
    public class TestOutputTopic<K,V> +extends Object
    +
    TestOutputTopic is used to read records from a topic in TopologyTestDriver. + To use TestOutputTopic create a new instance via + TopologyTestDriver.createOutputTopic(String, Deserializer, Deserializer). + In actual test code, you can read record values, keys, KeyValue or TestRecord + If you have multiple source topics, you need to create a TestOutputTopic for each. +

    + If you need to test key, value and headers, use readRecord() methods. + Using readKeyValue() you get a KeyValue pair, and thus, don't get access to the record's + timestamp or headers. + Similarly using readValue() you only get the value of a record. + +

    Processing records

    +
    
    +     private TestOutputTopic<String, Long> outputTopic;
    +      ...
    +     outputTopic = testDriver.createOutputTopic(OUTPUT_TOPIC, stringDeserializer, longDeserializer);
    +     ...
    +     assertThat(outputTopic.readValue()).isEqual(1);
    + 
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        readValue

        +
        public V readValue()
        +
        Read one record from the output topic and return record's value.
        +
        +
        Returns:
        +
        Next value for output topic.
        +
        +
        +
      • +
      • +
        +

        readKeyValue

        +
        public KeyValue<K,V> readKeyValue()
        +
        Read one record from the output topic and return its key and value as pair.
        +
        +
        Returns:
        +
        Next output as KeyValue.
        +
        +
        +
      • +
      • +
        +

        readRecord

        +
        public TestRecord<K,V> readRecord()
        +
        Read one Record from output topic.
        +
        +
        Returns:
        +
        Next output as TestRecord.
        +
        +
        +
      • +
      • +
        +

        readRecordsToList

        +
        public List<TestRecord<K,V>> readRecordsToList()
        +
        Read output to List. + This method can be used if the result is considered a stream. + If the result is considered a table, the list will contain all updated, ie, a key might be contained multiple times. + If you are only interested in the last table update (ie, the final table state), + you can use readKeyValuesToMap() instead.
        +
        +
        Returns:
        +
        List of output.
        +
        +
        +
      • +
      • +
        +

        readKeyValuesToMap

        +
        public Map<K,V> readKeyValuesToMap()
        +
        Read output to map. + This method can be used if the result is considered a table, + when you are only interested in the last table update (ie, the final table state). + If the result is considered a stream, you can use readRecordsToList() instead. + The list will contain all updated, ie, a key might be contained multiple times. + If the last update to a key is a delete/tombstone, the key will still be in the map (with null-value).
        +
        +
        Returns:
        +
        Map of output by key.
        +
        +
        +
      • +
      • +
        +

        readKeyValuesToList

        +
        public List<KeyValue<K,V>> readKeyValuesToList()
        +
        Read all KeyValues from topic to List.
        +
        +
        Returns:
        +
        List of output KeyValues.
        +
        +
        +
      • +
      • +
        +

        readValuesToList

        +
        public List<V> readValuesToList()
        +
        Read all values from topic to List.
        +
        +
        Returns:
        +
        List of output values.
        +
        +
        +
      • +
      • +
        +

        getQueueSize

        +
        public final long getQueueSize()
        +
        Get size of unread record in the topic queue.
        +
        +
        Returns:
        +
        size of topic queue.
        +
        +
        +
      • +
      • +
        +

        isEmpty

        +
        public final boolean isEmpty()
        +
        Verify if the topic queue is empty.
        +
        +
        Returns:
        +
        true if no more record in the topic queue.
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/ThreadMetadata.html b/static/41/javadoc/org/apache/kafka/streams/ThreadMetadata.html new file mode 100644 index 000000000..1e9f0017d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/ThreadMetadata.html @@ -0,0 +1,297 @@ + + + + +ThreadMetadata (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ThreadMetadata

    +
    +
    +
    +
    public interface ThreadMetadata
    +
    Metadata of a stream thread.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        threadState

        +
        String threadState()
        +
        State of the stream thread
        +
        +
        Returns:
        +
        the state
        +
        +
        +
      • +
      • +
        +

        threadName

        +
        String threadName()
        +
        Name of the stream thread
        +
        +
        Returns:
        +
        the name
        +
        +
        +
      • +
      • +
        +

        activeTasks

        +
        Set<TaskMetadata> activeTasks()
        +
        Metadata of the active tasks assigned to the stream thread.
        +
        +
        Returns:
        +
        metadata of the active tasks
        +
        +
        +
      • +
      • +
        +

        standbyTasks

        +
        Set<TaskMetadata> standbyTasks()
        +
        Metadata of the standby tasks assigned to the stream thread.
        +
        +
        Returns:
        +
        metadata of the standby tasks
        +
        +
        +
      • +
      • +
        +

        consumerClientId

        +
        String consumerClientId()
        +
        Client ID of the Kafka consumer used by the stream thread.
        +
        +
        Returns:
        +
        client ID of the Kafka consumer
        +
        +
        +
      • +
      • +
        +

        restoreConsumerClientId

        +
        String restoreConsumerClientId()
        +
        Client ID of the restore Kafka consumer used by the stream thread
        +
        +
        Returns:
        +
        client ID of the restore Kafka consumer
        +
        +
        +
      • +
      • +
        +

        producerClientIds

        +
        Set<String> producerClientIds()
        +
        Client IDs of the Kafka producers used by the stream thread.
        +
        +
        Returns:
        +
        client IDs of the Kafka producers
        +
        +
        +
      • +
      • +
        +

        adminClientId

        +
        String adminClientId()
        +
        Client ID of the admin client used by the stream thread.
        +
        +
        Returns:
        +
        client ID of the admin client
        +
        +
        +
      • +
      • +
        +

        equals

        +
        boolean equals(Object o)
        +
        Compares the specified object with this ThreadMetadata. Returns true if and only if the specified object is + also a ThreadMetadata and both threadName() are equal, threadState() are equal, activeTasks() contain the same + elements, standbyTasks() contain the same elements, mainConsumerClientId() are equal, restoreConsumerClientId() + are equal, producerClientIds() are equal, producerClientIds contain the same elements, and adminClientId() are equal.
        +
        +
        Overrides:
        +
        equals in class Object
        +
        Returns:
        +
        true if this object is the same as the obj argument; false otherwise.
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        int hashCode()
        +
        Returns the hash code value for this ThreadMetadata. The hash code of a list is defined to be the result of the following calculation: +
        + 
        + Objects.hash(
        +             threadName,
        +             threadState,
        +             activeTasks,
        +             standbyTasks,
        +             mainConsumerClientId,
        +             restoreConsumerClientId,
        +             producerClientIds,
        +             adminClientId
        +             );
        + 
        + 
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        Returns:
        +
        a hash code value for this object.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/Topology.AutoOffsetReset.html b/static/41/javadoc/org/apache/kafka/streams/Topology.AutoOffsetReset.html new file mode 100644 index 000000000..5e43409ba --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/Topology.AutoOffsetReset.html @@ -0,0 +1,238 @@ + + + + +Topology.AutoOffsetReset (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class Topology.AutoOffsetReset

    +
    +
    java.lang.Object +
    java.lang.Enum<Topology.AutoOffsetReset> +
    org.apache.kafka.streams.Topology.AutoOffsetReset
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<Topology.AutoOffsetReset>, Constable
    +
    +
    +
    Enclosing class:
    +
    Topology
    +
    +
    +
    @Deprecated +public static enum Topology.AutoOffsetReset +extends Enum<Topology.AutoOffsetReset>
    +
    Deprecated. +
    Since 4.0. Use AutoOffsetReset instead.
    +
    +
    Sets the auto.offset.reset configuration when + adding a source processor or when creating KStream + or KTable via StreamsBuilder.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static Topology.AutoOffsetReset[] values()
        +
        Deprecated.
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static Topology.AutoOffsetReset valueOf(String name)
        +
        Deprecated.
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/Topology.html b/static/41/javadoc/org/apache/kafka/streams/Topology.html new file mode 100644 index 000000000..10b09045f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/Topology.html @@ -0,0 +1,1314 @@ + + + + +Topology (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Topology

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.Topology
    +
    +
    +
    +
    public class Topology +extends Object
    +
    A logical representation of a ProcessorTopology. + A topology is a graph of sources, processors, and sinks. + A SourceNode is a node in the graph that consumes one or more Kafka topics and forwards them to its + successor nodes. + A Processor is a node in the graph that receives input records from upstream nodes, processes the + records, and optionally forwarding new records to one, multiple, or all of its downstream nodes. + Finally, a SinkNode is a node in the graph that receives records from upstream nodes and writes them to + a Kafka topic. + A Topology allows you to construct a graph of these nodes, and then passed into a new + KafkaStreams instance that will then begin consuming, processing, and producing + records.
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/TopologyConfig.TaskConfig.html b/static/41/javadoc/org/apache/kafka/streams/TopologyConfig.TaskConfig.html new file mode 100644 index 000000000..b357f152c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/TopologyConfig.TaskConfig.html @@ -0,0 +1,187 @@ + + + + +TopologyConfig.TaskConfig (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TopologyConfig.TaskConfig

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.TopologyConfig.TaskConfig
    +
    +
    +
    +
    Enclosing class:
    +
    TopologyConfig
    +
    +
    +
    public static class TopologyConfig.TaskConfig +extends Object
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        maxTaskIdleMs

        +
        public final long maxTaskIdleMs
        +
        +
      • +
      • +
        +

        taskTimeoutMs

        +
        public final long taskTimeoutMs
        +
        +
      • +
      • +
        +

        maxBufferedSize

        +
        public final int maxBufferedSize
        +
        +
      • +
      • +
        +

        timestampExtractor

        +
        public final TimestampExtractor timestampExtractor
        +
        +
      • +
      • +
        +

        deserializationExceptionHandler

        +
        public final DeserializationExceptionHandler deserializationExceptionHandler
        +
        +
      • +
      • +
        +

        processingExceptionHandler

        +
        public final ProcessingExceptionHandler processingExceptionHandler
        +
        +
      • +
      • +
        +

        eosEnabled

        +
        public final boolean eosEnabled
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/TopologyConfig.html b/static/41/javadoc/org/apache/kafka/streams/TopologyConfig.html new file mode 100644 index 000000000..a51ba100d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/TopologyConfig.html @@ -0,0 +1,387 @@ + + + + +TopologyConfig (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TopologyConfig

    +
    +
    java.lang.Object +
    org.apache.kafka.common.config.AbstractConfig +
    org.apache.kafka.streams.TopologyConfig
    +
    +
    +
    +
    +
    public final class TopologyConfig +extends AbstractConfig
    +
    Streams configs that apply at the topology level. The values in the StreamsConfig parameter of the + KafkaStreams constructor or the KafkaStreamsNamedTopologyWrapper constructor (deprecated) + will determine the defaults, which can then be overridden for specific topologies by passing them in when creating the + topology builders via the StreamsBuilder(TopologyConfig) constructor for DSL applications, + or the Topology(TopologyConfig) for PAPI applications. +

    + Note that some configs, such as the processor.wrapper.class config, can only take effect while the + topology is being built, which means they have to be passed in as a TopologyConfig to the + Topology(TopologyConfig) constructor (PAPI) or the + StreamsBuilder(TopologyConfig) constructor (DSL). + If they are only set in the configs passed in to the KafkaStreams constructor, it will be too late for them + to be applied and the config will be ignored.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        topologyName

        +
        public final String topologyName
        +
        +
      • +
      • +
        +

        eosEnabled

        +
        public final boolean eosEnabled
        +
        +
      • +
      • +
        +

        applicationConfigs

        +
        public final StreamsConfig applicationConfigs
        +
        +
      • +
      • +
        +

        topologyOverrides

        +
        public final Properties topologyOverrides
        +
        +
      • +
      • +
        +

        maxBufferedSize

        +
        public final int maxBufferedSize
        +
        +
      • +
      • +
        +

        cacheSize

        +
        public final long cacheSize
        +
        +
      • +
      • +
        +

        maxTaskIdleMs

        +
        public final long maxTaskIdleMs
        +
        +
      • +
      • +
        +

        taskTimeoutMs

        +
        public final long taskTimeoutMs
        +
        +
      • +
      • +
        +

        storeType

        +
        public final String storeType
        +
        +
      • +
      • +
        +

        dslStoreSuppliers

        +
        public final Class<?> dslStoreSuppliers
        +
        +
      • +
      • +
        +

        timestampExtractorSupplier

        +
        public final Supplier<TimestampExtractor> timestampExtractorSupplier
        +
        +
      • +
      • +
        +

        deserializationExceptionHandlerSupplier

        +
        public final Supplier<DeserializationExceptionHandler> deserializationExceptionHandlerSupplier
        +
        +
      • +
      • +
        +

        processingExceptionHandlerSupplier

        +
        public final Supplier<ProcessingExceptionHandler> processingExceptionHandlerSupplier
        +
        +
      • +
      • +
        +

        ensureExplicitInternalResourceNaming

        +
        public final boolean ensureExplicitInternalResourceNaming
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TopologyConfig

        +
        public TopologyConfig(StreamsConfig configs)
        +
        +
      • +
      • +
        +

        TopologyConfig

        +
        public TopologyConfig(String topologyName, + StreamsConfig globalAppConfigs, + Properties topologyOverrides)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/TopologyDescription.GlobalStore.html b/static/41/javadoc/org/apache/kafka/streams/TopologyDescription.GlobalStore.html new file mode 100644 index 000000000..7b8f81b20 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/TopologyDescription.GlobalStore.html @@ -0,0 +1,166 @@ + + + + +TopologyDescription.GlobalStore (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface TopologyDescription.GlobalStore

    +
    +
    +
    +
    Enclosing interface:
    +
    TopologyDescription
    +
    +
    +
    public static interface TopologyDescription.GlobalStore
    +
    Represents a global store. + Adding a global store results in adding a source node and one stateful processor node. + Note, that all added global stores form a single unit (similar to a TopologyDescription.Subtopology) even if different + global stores are not connected to each other. + Furthermore, global stores are available to all processors without connecting them explicitly, and thus global + stores will never be part of any TopologyDescription.Subtopology.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        source

        + +
        The source node reading from a "global" topic.
        +
        +
        Returns:
        +
        the "global" source node
        +
        +
        +
      • +
      • +
        +

        processor

        + +
        The processor node maintaining the global store.
        +
        +
        Returns:
        +
        the "global" processor node
        +
        +
        +
      • +
      • +
        +

        id

        +
        int id()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/TopologyDescription.Node.html b/static/41/javadoc/org/apache/kafka/streams/TopologyDescription.Node.html new file mode 100644 index 000000000..35b04ce14 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/TopologyDescription.Node.html @@ -0,0 +1,176 @@ + + + + +TopologyDescription.Node (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface TopologyDescription.Node

    +
    +
    +
    +
    All Known Subinterfaces:
    +
    TopologyDescription.Processor, TopologyDescription.Sink, TopologyDescription.Source
    +
    +
    +
    Enclosing interface:
    +
    TopologyDescription
    +
    +
    +
    public static interface TopologyDescription.Node
    +
    A node of a topology. Can be a source, sink, or processor node.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        name

        +
        String name()
        +
        The name of the node. Will never be null.
        +
        +
        Returns:
        +
        the name of the node
        +
        +
        +
      • +
      • +
        +

        predecessors

        +
        Set<TopologyDescription.Node> predecessors()
        +
        The predecessors of this node within a sub-topology. + Note, sources do not have any predecessors. + Will never be null.
        +
        +
        Returns:
        +
        set of all predecessors
        +
        +
        +
      • +
      • +
        +

        successors

        + +
        The successor of this node within a sub-topology. + Note, sinks do not have any successors. + Will never be null.
        +
        +
        Returns:
        +
        set of all successor
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/TopologyDescription.Processor.html b/static/41/javadoc/org/apache/kafka/streams/TopologyDescription.Processor.html new file mode 100644 index 000000000..3e99ff12b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/TopologyDescription.Processor.html @@ -0,0 +1,144 @@ + + + + +TopologyDescription.Processor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface TopologyDescription.Processor

    +
    +
    +
    +
    All Superinterfaces:
    +
    TopologyDescription.Node
    +
    +
    +
    Enclosing interface:
    +
    TopologyDescription
    +
    +
    +
    public static interface TopologyDescription.Processor +extends TopologyDescription.Node
    +
    A processor node of a topology.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        stores

        +
        Set<String> stores()
        +
        The names of all connected stores.
        +
        +
        Returns:
        +
        set of store names
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/TopologyDescription.Sink.html b/static/41/javadoc/org/apache/kafka/streams/TopologyDescription.Sink.html new file mode 100644 index 000000000..7a72ff2c4 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/TopologyDescription.Sink.html @@ -0,0 +1,162 @@ + + + + +TopologyDescription.Sink (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface TopologyDescription.Sink

    +
    +
    +
    +
    All Superinterfaces:
    +
    TopologyDescription.Node
    +
    +
    +
    Enclosing interface:
    +
    TopologyDescription
    +
    +
    +
    public static interface TopologyDescription.Sink +extends TopologyDescription.Node
    +
    A sink node of a topology.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        topic

        +
        String topic()
        +
        The topic name this sink node is writing to. + Could be null if the topic name can only be dynamically determined based on TopicNameExtractor
        +
        +
        Returns:
        +
        a topic name
        +
        +
        +
      • +
      • +
        +

        topicNameExtractor

        +
        TopicNameExtractor<?,?> topicNameExtractor()
        +
        The TopicNameExtractor class that this sink node uses to dynamically extract the topic name to write to. + Could be null if the topic name is not dynamically determined.
        +
        +
        Returns:
        +
        the TopicNameExtractor class used get the topic name
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/TopologyDescription.Source.html b/static/41/javadoc/org/apache/kafka/streams/TopologyDescription.Source.html new file mode 100644 index 000000000..fe5b79ff9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/TopologyDescription.Source.html @@ -0,0 +1,160 @@ + + + + +TopologyDescription.Source (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface TopologyDescription.Source

    +
    +
    +
    +
    All Superinterfaces:
    +
    TopologyDescription.Node
    +
    +
    +
    Enclosing interface:
    +
    TopologyDescription
    +
    +
    +
    public static interface TopologyDescription.Source +extends TopologyDescription.Node
    +
    A source node of a topology.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        topicSet

        +
        Set<String> topicSet()
        +
        The topic names this source node is reading from.
        +
        +
        Returns:
        +
        a set of topic names
        +
        +
        +
      • +
      • +
        +

        topicPattern

        +
        Pattern topicPattern()
        +
        The pattern used to match topic names that is reading from.
        +
        +
        Returns:
        +
        the pattern used to match topic names
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/TopologyDescription.Subtopology.html b/static/41/javadoc/org/apache/kafka/streams/TopologyDescription.Subtopology.html new file mode 100644 index 000000000..85c27f5bf --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/TopologyDescription.Subtopology.html @@ -0,0 +1,157 @@ + + + + +TopologyDescription.Subtopology (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface TopologyDescription.Subtopology

    +
    +
    +
    +
    Enclosing interface:
    +
    TopologyDescription
    +
    +
    +
    public static interface TopologyDescription.Subtopology
    +
    A connected sub-graph of a Topology. +

    + Nodes of a Subtopology are connected + directly or indirectly via + state stores + (i.e., if multiple processors share the same state).

    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      int
      +
      id()
      +
      +
      Internally assigned unique ID.
      +
      + + +
      +
      All nodes of this sub-topology.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        id

        +
        int id()
        +
        Internally assigned unique ID.
        +
        +
        Returns:
        +
        the ID of the sub-topology
        +
        +
        +
      • +
      • +
        +

        nodes

        + +
        All nodes of this sub-topology.
        +
        +
        Returns:
        +
        set of all nodes within the sub-topology
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/TopologyDescription.html b/static/41/javadoc/org/apache/kafka/streams/TopologyDescription.html new file mode 100644 index 000000000..e99cd3d2c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/TopologyDescription.html @@ -0,0 +1,199 @@ + + + + +TopologyDescription (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface TopologyDescription

    +
    +
    +
    +
    public interface TopologyDescription
    +
    A meta representation of a topology. +

    + The nodes of a topology are grouped into sub-topologies if they are connected. + In contrast, two sub-topologies are not connected but can be linked to each other via topics, i.e., if one + sub-topology writes into a topic and another sub-topology + reads from the same topic. + Message forwards using custom Processors are not considered in the topology graph. +

    + When KafkaStreams.start() is called, different sub-topologies will be constructed and executed as independent + tasks.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/TopologyTestDriver.html b/static/41/javadoc/org/apache/kafka/streams/TopologyTestDriver.html new file mode 100644 index 000000000..e4b3aa9be --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/TopologyTestDriver.html @@ -0,0 +1,794 @@ + + + + +TopologyTestDriver (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TopologyTestDriver

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.TopologyTestDriver
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable
    +
    +
    +
    public class TopologyTestDriver +extends Object +implements Closeable
    +
    This class makes it easier to write tests to verify the behavior of topologies created with Topology or + StreamsBuilder. + You can test simple topologies that have a single processor, or very complex topologies that have multiple sources, + processors, sinks, or sub-topologies. + Best of all, the class works without a real Kafka broker, so the tests execute very quickly with very little overhead. +

    + Using the TopologyTestDriver in tests is easy: simply instantiate the driver and provide a Topology + (cf. StreamsBuilder.build()) and config, create + and use a TestInputTopic to supply an input records to the topology, + and then create and use a TestOutputTopic to read and + verify any output records by the topology. +

    + Although the driver doesn't use a real Kafka broker, it does simulate Kafka consumers and + producers that read and write raw byte[] messages. + You can let TestInputTopic and TestOutputTopic to handle conversion + form regular Java objects to raw bytes. + +

    Driver setup

    + In order to create a TopologyTestDriver instance, you need a Topology and a config. + The configuration needs to be representative of what you'd supply to the real topology, so that means including + several key properties (cf. StreamsConfig). + For example, the following code fragment creates a configuration that specifies a timestamp extractor, + default serializers and deserializers for string keys and values: + +
    
    + Properties props = new Properties();
    + props.setProperty(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, CustomTimestampExtractor.class.getName());
    + props.setProperty(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    + props.setProperty(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    + Topology topology = ...
    + TopologyTestDriver driver = new TopologyTestDriver(topology, props);
    + 
    + +

    Note that the TopologyTestDriver processes input records synchronously. + This implies that commit.interval.ms and + cache.max.bytes.buffering configuration have no effect. + The driver behaves as if both configs would be set to zero, i.e., as if a "commit" (and thus "flush") would happen + after each input record. + +

    Processing messages

    +

    + Your test can supply new input records on any of the topics that the topology's sources consume. + This test driver simulates single-partitioned input topics. + Here's an example of an input message on the topic named input-topic: + +

    
    + TestInputTopic<String, String> inputTopic = driver.createInputTopic("input-topic", stringSerdeSerializer, stringSerializer);
    + inputTopic.pipeInput("key1", "value1");
    + 
    + + When TestInputTopic.pipeInput(Object, Object) is called, the driver passes the input message through to the appropriate source that + consumes the named topic, and will invoke the processor(s) downstream of the source. + If your topology's processors forward messages to sinks, your test can then consume these output messages to verify + they match the expected outcome. + For example, if our topology should have generated 2 messages on output-topic-1 and 1 message on + output-topic-2, then our test can obtain these messages using the + TestOutputTopic.readKeyValue() method: + +
    
    + TestOutputTopic<String, String> outputTopic1 = driver.createOutputTopic("output-topic-1", stringDeserializer, stringDeserializer);
    + TestOutputTopic<String, String> outputTopic2 = driver.createOutputTopic("output-topic-2", stringDeserializer, stringDeserializer);
    +
    + KeyValue<String, String> record1 = outputTopic1.readKeyValue();
    + KeyValue<String, String> record2 = outputTopic2.readKeyValue();
    + KeyValue<String, String> record3 = outputTopic1.readKeyValue();
    + 
    + + Again, our example topology generates messages with string keys and values, so we supply our string deserializer + instance for use on both the keys and values. Your test logic can then verify whether these output records are + correct. +

    + Note, that calling pipeInput() will also trigger event-time base + punctuation callbacks. + However, you won't trigger wall-clock type punctuations that you must + trigger manually via advanceWallClockTime(Duration). +

    + Finally, when completed, make sure your tests close() the driver to release all resources and + processors. + +

    Processor state

    +

    + Some processors use Kafka state storage, so this driver class provides the generic + getStateStore(String) as well as store-type specific methods so that your tests can check the underlying + state store(s) used by your topology's processors. + In our previous example, after we supplied a single input message and checked the three output messages, our test + could also check the key value store to verify the processor correctly added, removed, or updated internal state. + Or, our test might have pre-populated some state before submitting the input message, and verified afterward + that the processor(s) correctly updated the state.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/BrokerNotFoundException.html b/static/41/javadoc/org/apache/kafka/streams/errors/BrokerNotFoundException.html new file mode 100644 index 000000000..07b5310ba --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/BrokerNotFoundException.html @@ -0,0 +1,176 @@ + + + + +BrokerNotFoundException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class BrokerNotFoundException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class BrokerNotFoundException +extends StreamsException
    +
    Indicates that none of the specified brokers + could be found.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        BrokerNotFoundException

        +
        public BrokerNotFoundException(String message)
        +
        +
      • +
      • +
        +

        BrokerNotFoundException

        +
        public BrokerNotFoundException(String message, + Throwable throwable)
        +
        +
      • +
      • +
        +

        BrokerNotFoundException

        +
        public BrokerNotFoundException(Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/DefaultProductionExceptionHandler.html b/static/41/javadoc/org/apache/kafka/streams/errors/DefaultProductionExceptionHandler.html new file mode 100644 index 000000000..c0a6723ad --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/DefaultProductionExceptionHandler.html @@ -0,0 +1,245 @@ + + + + +DefaultProductionExceptionHandler (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DefaultProductionExceptionHandler

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.errors.DefaultProductionExceptionHandler
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Configurable, ProductionExceptionHandler
    +
    +
    +
    public class DefaultProductionExceptionHandler +extends Object +implements ProductionExceptionHandler
    +
    ProductionExceptionHandler that always instructs streams to fail when an exception + happens while attempting to produce result records.
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/DeserializationExceptionHandler.DeserializationHandlerResponse.html b/static/41/javadoc/org/apache/kafka/streams/errors/DeserializationExceptionHandler.DeserializationHandlerResponse.html new file mode 100644 index 000000000..ed1f9e6ac --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/DeserializationExceptionHandler.DeserializationHandlerResponse.html @@ -0,0 +1,272 @@ + + + + +DeserializationExceptionHandler.DeserializationHandlerResponse (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class DeserializationExceptionHandler.DeserializationHandlerResponse

    +
    +
    java.lang.Object +
    java.lang.Enum<DeserializationExceptionHandler.DeserializationHandlerResponse> +
    org.apache.kafka.streams.errors.DeserializationExceptionHandler.DeserializationHandlerResponse
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<DeserializationExceptionHandler.DeserializationHandlerResponse>, Constable
    +
    +
    +
    Enclosing interface:
    +
    DeserializationExceptionHandler
    +
    +
    +
    public static enum DeserializationExceptionHandler.DeserializationHandlerResponse +extends Enum<DeserializationExceptionHandler.DeserializationHandlerResponse>
    +
    Enumeration that describes the response from the exception handler.
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/DeserializationExceptionHandler.html b/static/41/javadoc/org/apache/kafka/streams/errors/DeserializationExceptionHandler.html new file mode 100644 index 000000000..7f7123b74 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/DeserializationExceptionHandler.html @@ -0,0 +1,204 @@ + + + + +DeserializationExceptionHandler (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface DeserializationExceptionHandler

    +
    +
    +
    +
    All Superinterfaces:
    +
    Configurable
    +
    +
    +
    All Known Implementing Classes:
    +
    LogAndContinueExceptionHandler, LogAndFailExceptionHandler
    +
    +
    +
    public interface DeserializationExceptionHandler +extends Configurable
    +
    Interface that specifies how an exception from source node deserialization + (e.g., reading from Kafka) should be handled.
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/ErrorHandlerContext.html b/static/41/javadoc/org/apache/kafka/streams/errors/ErrorHandlerContext.html new file mode 100644 index 000000000..f064000b9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/ErrorHandlerContext.html @@ -0,0 +1,347 @@ + + + + +ErrorHandlerContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ErrorHandlerContext

    +
    +
    +
    +
    public interface ErrorHandlerContext
    +
    This interface allows user code to inspect the context of a record that has failed during processing. + +

    ErrorHandlerContext instances are passed into DeserializationExceptionHandler, + ProcessingExceptionHandler, or ProductionExceptionHandler dependent on what error occurred.

    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Return the headers of the current source record; could be an empty header if it is not + available.
      +
      +
      long
      + +
      +
      Return the offset of the current input record; could be -1 if it is not + available.
      +
      +
      int
      + +
      +
      Return the partition ID of the current input record; could be -1 if it is not + available.
      +
      + + +
      +
      Return the current processor node ID.
      +
      +
      byte[]
      + +
      +
      Return the non-deserialized byte[] of the input message key if the context has been triggered by a message.
      +
      +
      byte[]
      + +
      +
      Return the non-deserialized byte[] of the input message value if the context has been triggered by a message.
      +
      + + +
      +
      Return the task ID.
      +
      +
      long
      + +
      +
      Return the current timestamp; could be -1 if it is not available.
      +
      + + +
      +
      Return the topic name of the current input record; could be null if it is not + available.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        topic

        +
        String topic()
        +
        Return the topic name of the current input record; could be null if it is not + available. + +

        For example, if this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, the record won't have an associated topic. + Another example is + KTable.transformValues(ValueTransformerWithKeySupplier, String...) + (and siblings), that do not always guarantee to provide a valid topic name, as they might be + executed "out-of-band" due to some internal optimizations applied by the Kafka Streams DSL. + Additionally, when writing into a changelog topic, there is no associated input record, + and thus no topic name is available.

        +
        +
        Returns:
        +
        The topic name.
        +
        +
        +
      • +
      • +
        +

        partition

        +
        int partition()
        +
        Return the partition ID of the current input record; could be -1 if it is not + available. + +

        For example, if this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, the record won't have an associated partition ID. + Another example is + KTable.transformValues(ValueTransformerWithKeySupplier, String...) + (and siblings), that do not always guarantee to provide a valid partition ID, as they might be + executed "out-of-band" due to some internal optimizations applied by the Kafka Streams DSL. + Additionally, when writing into a changelog topic, there is no associated input record, + and thus no partition is available.

        +
        +
        Returns:
        +
        The partition ID.
        +
        +
        +
      • +
      • +
        +

        offset

        +
        long offset()
        +
        Return the offset of the current input record; could be -1 if it is not + available. + +

        For example, if this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, the record won't have an associated offset. + Another example is + KTable.transformValues(ValueTransformerWithKeySupplier, String...) + (and siblings), that do not always guarantee to provide a valid offset, as they might be + executed "out-of-band" due to some internal optimizations applied by the Kafka Streams DSL. + Additionally, when writing into a changelog topic, there is no associated input record, + and thus no offset is available.

        +
        +
        Returns:
        +
        The offset.
        +
        +
        +
      • +
      • +
        +

        headers

        +
        Headers headers()
        +
        Return the headers of the current source record; could be an empty header if it is not + available. + +

        For example, if this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, the record might not have any associated headers. + Another example is + KTable.transformValues(ValueTransformerWithKeySupplier, String...) + (and siblings), that do not always guarantee to provide valid headers, as they might be + executed "out-of-band" due to some internal optimizations applied by the Kafka Streams DSL. + Additionally, when writing into a changelog topic, there is no associated input record, + and thus no headers are available.

        +
        +
        Returns:
        +
        The headers.
        +
        +
        +
      • +
      • +
        +

        processorNodeId

        +
        String processorNodeId()
        +
        Return the current processor node ID.
        +
        +
        Returns:
        +
        The processor node ID.
        +
        +
        +
      • +
      • +
        +

        taskId

        +
        TaskId taskId()
        +
        Return the task ID.
        +
        +
        Returns:
        +
        The task ID.
        +
        +
        +
      • +
      • +
        +

        timestamp

        +
        long timestamp()
        +
        Return the current timestamp; could be -1 if it is not available. + +

        For example, when writing into a changelog topic, there is no associated input record, + and thus no timestamp is available. + +

        If it is triggered while processing a record streamed from the source processor, + timestamp is defined as the timestamp of the current input record; the timestamp is extracted from + ConsumerRecord by TimestampExtractor. + Note, that an upstream Processor might have set a new timestamp by calling + forward(record.withTimestamp(...)). + In particular, some Kafka Streams DSL operators set result record timestamps explicitly, + to guarantee deterministic results. + +

        If it is triggered while processing a record generated not from the source processor (for example, + if this method is invoked from the punctuate call): +

        + +

        If it is triggered from a deserialization failure, timestamp is defined as the timestamp of the + current rawRecord ConsumerRecord.

        +
        +
        Returns:
        +
        The timestamp.
        +
        +
        +
      • +
      • +
        +

        sourceRawKey

        +
        byte[] sourceRawKey()
        +
        Return the non-deserialized byte[] of the input message key if the context has been triggered by a message. + +

        If this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, it will return null. + +

        If this method is invoked in a sub-topology due to a repartition, the returned key would be one sent + to the repartition topic. + +

        Always returns null if this method is invoked within a + ProductionExceptionHandler.handle(ErrorHandlerContext, ProducerRecord, Exception)

        +
        +
        Returns:
        +
        the raw byte of the key of the source message
        +
        +
        +
      • +
      • +
        +

        sourceRawValue

        +
        byte[] sourceRawValue()
        +
        Return the non-deserialized byte[] of the input message value if the context has been triggered by a message. + +

        If this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, it will return null. + +

        If this method is invoked in a sub-topology due to a repartition, the returned key would be one sent + to the repartition topic. + +

        Always returns null if this method is invoked within a + ProductionExceptionHandler.handle(ErrorHandlerContext, ProducerRecord, Exception)

        +
        +
        Returns:
        +
        the raw byte of the value of the source message
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/InvalidStateStoreException.html b/static/41/javadoc/org/apache/kafka/streams/errors/InvalidStateStoreException.html new file mode 100644 index 000000000..7b3f8764e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/InvalidStateStoreException.html @@ -0,0 +1,179 @@ + + + + +InvalidStateStoreException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidStateStoreException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    Direct Known Subclasses:
    +
    InvalidStateStorePartitionException, StateStoreMigratedException, StateStoreNotAvailableException, StreamsNotStartedException, StreamsRebalancingException, StreamsStoppedException, UnknownStateStoreException
    +
    +
    +
    public class InvalidStateStoreException +extends StreamsException
    +
    Indicates that there was a problem when trying to access a StateStore. + InvalidStateStoreException is not thrown directly but only its following subclasses.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidStateStoreException

        +
        public InvalidStateStoreException(String message)
        +
        +
      • +
      • +
        +

        InvalidStateStoreException

        +
        public InvalidStateStoreException(String message, + Throwable throwable)
        +
        +
      • +
      • +
        +

        InvalidStateStoreException

        +
        public InvalidStateStoreException(Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/InvalidStateStorePartitionException.html b/static/41/javadoc/org/apache/kafka/streams/errors/InvalidStateStorePartitionException.html new file mode 100644 index 000000000..eb08a98a0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/InvalidStateStorePartitionException.html @@ -0,0 +1,171 @@ + + + + +InvalidStateStorePartitionException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InvalidStateStorePartitionException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class InvalidStateStorePartitionException +extends InvalidStateStoreException
    +
    Indicates that the specific state store being queried via + StoreQueryParameters used a partitioning that is not assigned to this instance. + You can use KafkaStreams.metadataForAllStreamsClients() to discover the correct instance + that hosts the requested partition.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        InvalidStateStorePartitionException

        +
        public InvalidStateStorePartitionException(String message)
        +
        +
      • +
      • +
        +

        InvalidStateStorePartitionException

        +
        public InvalidStateStorePartitionException(String message, + Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/LockException.html b/static/41/javadoc/org/apache/kafka/streams/errors/LockException.html new file mode 100644 index 000000000..5ff2919bb --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/LockException.html @@ -0,0 +1,175 @@ + + + + +LockException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class LockException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class LockException +extends StreamsException
    +
    Indicates that the state store directory lock could not be acquired because another thread holds the lock.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        LockException

        +
        public LockException(String message)
        +
        +
      • +
      • +
        +

        LockException

        +
        public LockException(String message, + Throwable throwable)
        +
        +
      • +
      • +
        +

        LockException

        +
        public LockException(Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/LogAndContinueExceptionHandler.html b/static/41/javadoc/org/apache/kafka/streams/errors/LogAndContinueExceptionHandler.html new file mode 100644 index 000000000..dde5f0757 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/LogAndContinueExceptionHandler.html @@ -0,0 +1,246 @@ + + + + +LogAndContinueExceptionHandler (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class LogAndContinueExceptionHandler

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.errors.LogAndContinueExceptionHandler
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Configurable, DeserializationExceptionHandler
    +
    +
    +
    public class LogAndContinueExceptionHandler +extends Object +implements DeserializationExceptionHandler
    +
    Deserialization handler that logs a deserialization exception and then + signals the processing pipeline to continue processing more records.
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/LogAndContinueProcessingExceptionHandler.html b/static/41/javadoc/org/apache/kafka/streams/errors/LogAndContinueProcessingExceptionHandler.html new file mode 100644 index 000000000..ffa81147a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/LogAndContinueProcessingExceptionHandler.html @@ -0,0 +1,209 @@ + + + + +LogAndContinueProcessingExceptionHandler (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class LogAndContinueProcessingExceptionHandler

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.errors.LogAndContinueProcessingExceptionHandler
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Configurable, ProcessingExceptionHandler
    +
    +
    +
    public class LogAndContinueProcessingExceptionHandler +extends Object +implements ProcessingExceptionHandler
    +
    Processing exception handler that logs a processing exception and then + signals the processing pipeline to continue processing more records.
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/LogAndFailExceptionHandler.html b/static/41/javadoc/org/apache/kafka/streams/errors/LogAndFailExceptionHandler.html new file mode 100644 index 000000000..ccc065d8a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/LogAndFailExceptionHandler.html @@ -0,0 +1,246 @@ + + + + +LogAndFailExceptionHandler (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class LogAndFailExceptionHandler

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.errors.LogAndFailExceptionHandler
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Configurable, DeserializationExceptionHandler
    +
    +
    +
    public class LogAndFailExceptionHandler +extends Object +implements DeserializationExceptionHandler
    +
    Deserialization handler that logs a deserialization exception and then + signals the processing pipeline to stop processing more records and fail.
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/LogAndFailProcessingExceptionHandler.html b/static/41/javadoc/org/apache/kafka/streams/errors/LogAndFailProcessingExceptionHandler.html new file mode 100644 index 000000000..15d1a866f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/LogAndFailProcessingExceptionHandler.html @@ -0,0 +1,209 @@ + + + + +LogAndFailProcessingExceptionHandler (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class LogAndFailProcessingExceptionHandler

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.errors.LogAndFailProcessingExceptionHandler
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Configurable, ProcessingExceptionHandler
    +
    +
    +
    public class LogAndFailProcessingExceptionHandler +extends Object +implements ProcessingExceptionHandler
    +
    Processing exception handler that logs a processing exception and then + signals the processing pipeline to stop processing more records and fail.
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/MissingSourceTopicException.html b/static/41/javadoc/org/apache/kafka/streams/errors/MissingSourceTopicException.html new file mode 100644 index 000000000..b320293e1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/MissingSourceTopicException.html @@ -0,0 +1,155 @@ + + + + +MissingSourceTopicException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class MissingSourceTopicException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class MissingSourceTopicException +extends StreamsException
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        MissingSourceTopicException

        +
        public MissingSourceTopicException(String message)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/ProcessingExceptionHandler.ProcessingHandlerResponse.html b/static/41/javadoc/org/apache/kafka/streams/errors/ProcessingExceptionHandler.ProcessingHandlerResponse.html new file mode 100644 index 000000000..f457bf8c6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/ProcessingExceptionHandler.ProcessingHandlerResponse.html @@ -0,0 +1,271 @@ + + + + +ProcessingExceptionHandler.ProcessingHandlerResponse (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class ProcessingExceptionHandler.ProcessingHandlerResponse

    +
    +
    java.lang.Object +
    java.lang.Enum<ProcessingExceptionHandler.ProcessingHandlerResponse> +
    org.apache.kafka.streams.errors.ProcessingExceptionHandler.ProcessingHandlerResponse
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<ProcessingExceptionHandler.ProcessingHandlerResponse>, Constable
    +
    +
    +
    Enclosing interface:
    +
    ProcessingExceptionHandler
    +
    +
    +
    public static enum ProcessingExceptionHandler.ProcessingHandlerResponse +extends Enum<ProcessingExceptionHandler.ProcessingHandlerResponse>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        name

        +
        public final String name
        +
        An english description for the used option. This is for debugging only and may change.
        +
        +
      • +
      • +
        +

        id

        +
        public final int id
        +
        The permanent and immutable id for the used option. This can't change ever.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        + +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        + +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/ProcessingExceptionHandler.html b/static/41/javadoc/org/apache/kafka/streams/errors/ProcessingExceptionHandler.html new file mode 100644 index 000000000..a4df0d296 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/ProcessingExceptionHandler.html @@ -0,0 +1,167 @@ + + + + +ProcessingExceptionHandler (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ProcessingExceptionHandler

    +
    +
    +
    +
    All Superinterfaces:
    +
    Configurable
    +
    +
    +
    All Known Implementing Classes:
    +
    LogAndContinueProcessingExceptionHandler, LogAndFailProcessingExceptionHandler
    +
    +
    +
    public interface ProcessingExceptionHandler +extends Configurable
    +
    An interface that allows user code to inspect a record that has failed processing
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/ProcessorStateException.html b/static/41/javadoc/org/apache/kafka/streams/errors/ProcessorStateException.html new file mode 100644 index 000000000..bf973c1b4 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/ProcessorStateException.html @@ -0,0 +1,175 @@ + + + + +ProcessorStateException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ProcessorStateException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class ProcessorStateException +extends StreamsException
    +
    Indicates a processor state operation (e.g. put, get) has failed.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ProcessorStateException

        +
        public ProcessorStateException(String message)
        +
        +
      • +
      • +
        +

        ProcessorStateException

        +
        public ProcessorStateException(String message, + Throwable throwable)
        +
        +
      • +
      • +
        +

        ProcessorStateException

        +
        public ProcessorStateException(Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/ProductionExceptionHandler.ProductionExceptionHandlerResponse.html b/static/41/javadoc/org/apache/kafka/streams/errors/ProductionExceptionHandler.ProductionExceptionHandlerResponse.html new file mode 100644 index 000000000..e1cfbb150 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/ProductionExceptionHandler.ProductionExceptionHandlerResponse.html @@ -0,0 +1,296 @@ + + + + +ProductionExceptionHandler.ProductionExceptionHandlerResponse (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class ProductionExceptionHandler.ProductionExceptionHandlerResponse

    +
    +
    java.lang.Object +
    java.lang.Enum<ProductionExceptionHandler.ProductionExceptionHandlerResponse> +
    org.apache.kafka.streams.errors.ProductionExceptionHandler.ProductionExceptionHandlerResponse
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<ProductionExceptionHandler.ProductionExceptionHandlerResponse>, Constable
    +
    +
    +
    Enclosing interface:
    +
    ProductionExceptionHandler
    +
    +
    +
    public static enum ProductionExceptionHandler.ProductionExceptionHandlerResponse +extends Enum<ProductionExceptionHandler.ProductionExceptionHandlerResponse>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        name

        +
        public final String name
        +
        An english description for the used option. This is for debugging only and may change.
        +
        +
      • +
      • +
        +

        id

        +
        public final int id
        +
        The permanent and immutable id for the used option. This can't change ever.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        + +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        + +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/ProductionExceptionHandler.SerializationExceptionOrigin.html b/static/41/javadoc/org/apache/kafka/streams/errors/ProductionExceptionHandler.SerializationExceptionOrigin.html new file mode 100644 index 000000000..ef902a3a3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/ProductionExceptionHandler.SerializationExceptionOrigin.html @@ -0,0 +1,227 @@ + + + + +ProductionExceptionHandler.SerializationExceptionOrigin (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class ProductionExceptionHandler.SerializationExceptionOrigin

    +
    +
    java.lang.Object +
    java.lang.Enum<ProductionExceptionHandler.SerializationExceptionOrigin> +
    org.apache.kafka.streams.errors.ProductionExceptionHandler.SerializationExceptionOrigin
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<ProductionExceptionHandler.SerializationExceptionOrigin>, Constable
    +
    +
    +
    Enclosing interface:
    +
    ProductionExceptionHandler
    +
    +
    +
    public static enum ProductionExceptionHandler.SerializationExceptionOrigin +extends Enum<ProductionExceptionHandler.SerializationExceptionOrigin>
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/ProductionExceptionHandler.html b/static/41/javadoc/org/apache/kafka/streams/errors/ProductionExceptionHandler.html new file mode 100644 index 000000000..951c2d8a5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/ProductionExceptionHandler.html @@ -0,0 +1,257 @@ + + + + +ProductionExceptionHandler (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ProductionExceptionHandler

    +
    +
    +
    +
    All Superinterfaces:
    +
    Configurable
    +
    +
    +
    All Known Implementing Classes:
    +
    DefaultProductionExceptionHandler
    +
    +
    +
    public interface ProductionExceptionHandler +extends Configurable
    +
    Interface that specifies how an exception when attempting to produce a result to + Kafka should be handled.
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/StateStoreMigratedException.html b/static/41/javadoc/org/apache/kafka/streams/errors/StateStoreMigratedException.html new file mode 100644 index 000000000..94673f189 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/StateStoreMigratedException.html @@ -0,0 +1,172 @@ + + + + +StateStoreMigratedException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StateStoreMigratedException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class StateStoreMigratedException +extends InvalidStateStoreException
    +
    Indicates that the state store being queried is closed although the Kafka Streams state is + RUNNING or + REBALANCING. + This could happen because the store moved to some other instance during a rebalance so + rediscovery of the state store is required before retrying.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StateStoreMigratedException

        +
        public StateStoreMigratedException(String message)
        +
        +
      • +
      • +
        +

        StateStoreMigratedException

        +
        public StateStoreMigratedException(String message, + Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/StateStoreNotAvailableException.html b/static/41/javadoc/org/apache/kafka/streams/errors/StateStoreNotAvailableException.html new file mode 100644 index 000000000..067152cf2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/StateStoreNotAvailableException.html @@ -0,0 +1,171 @@ + + + + +StateStoreNotAvailableException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StateStoreNotAvailableException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class StateStoreNotAvailableException +extends InvalidStateStoreException
    +
    Indicates that the state store being queried is already closed. This could happen when Kafka Streams is in + PENDING_SHUTDOWN or + NOT_RUNNING or + ERROR state.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StateStoreNotAvailableException

        +
        public StateStoreNotAvailableException(String message)
        +
        +
      • +
      • +
        +

        StateStoreNotAvailableException

        +
        public StateStoreNotAvailableException(String message, + Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/StreamsException.html b/static/41/javadoc/org/apache/kafka/streams/errors/StreamsException.html new file mode 100644 index 000000000..cf95ea924 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/StreamsException.html @@ -0,0 +1,254 @@ + + + + +StreamsException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StreamsException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    Direct Known Subclasses:
    +
    BrokerNotFoundException, InvalidStateStoreException, LockException, MissingSourceTopicException, ProcessorStateException, TaskAssignmentException, TaskCorruptedException, TaskIdFormatException, TaskMigratedException, TopologyException, UnknownTopologyException
    +
    +
    +
    public class StreamsException +extends KafkaException
    +
    StreamsException is the top-level exception type generated by Kafka Streams, and indicates errors have + occurred during a StreamThread's processing. It + is guaranteed that any exception thrown up to the StreamsUncaughtExceptionHandler will be of the type + StreamsException. For example, any user exceptions will be wrapped as a StreamsException.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StreamsException

        +
        public StreamsException(String message)
        +
        +
      • +
      • +
        +

        StreamsException

        +
        public StreamsException(String message, + TaskId taskId)
        +
        +
      • +
      • +
        +

        StreamsException

        +
        public StreamsException(String message, + Throwable throwable)
        +
        +
      • +
      • +
        +

        StreamsException

        +
        public StreamsException(String message, + Throwable throwable, + TaskId taskId)
        +
        +
      • +
      • +
        +

        StreamsException

        +
        public StreamsException(Throwable throwable)
        +
        +
      • +
      • +
        +

        StreamsException

        +
        public StreamsException(Throwable throwable, + TaskId taskId)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        taskId

        +
        public Optional<TaskId> taskId()
        +
        +
        Returns:
        +
        The TaskId that this exception originated from, or Optional.empty() if the exception + cannot be traced back to a particular task. Note that the TaskId being empty does not + guarantee that the exception wasn't directly related to a specific task.
        +
        +
        +
      • +
      • +
        +

        setTaskId

        +
        public void setTaskId(TaskId taskId)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/StreamsNotStartedException.html b/static/41/javadoc/org/apache/kafka/streams/errors/StreamsNotStartedException.html new file mode 100644 index 000000000..933c83e09 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/StreamsNotStartedException.html @@ -0,0 +1,170 @@ + + + + +StreamsNotStartedException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StreamsNotStartedException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class StreamsNotStartedException +extends InvalidStateStoreException
    +
    Indicates that Kafka Streams is in state CREATED and thus state stores cannot be queries yet. + To query state stores, it's required to first start Kafka Streams via KafkaStreams.start(). + You can retry to query the state after the state transitioned to RUNNING.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StreamsNotStartedException

        +
        public StreamsNotStartedException(String message)
        +
        +
      • +
      • +
        +

        StreamsNotStartedException

        +
        public StreamsNotStartedException(String message, + Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/StreamsRebalancingException.html b/static/41/javadoc/org/apache/kafka/streams/errors/StreamsRebalancingException.html new file mode 100644 index 000000000..c4508ed39 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/StreamsRebalancingException.html @@ -0,0 +1,170 @@ + + + + +StreamsRebalancingException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StreamsRebalancingException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class StreamsRebalancingException +extends InvalidStateStoreException
    +
    Indicates that Kafka Streams is in state REBALANCING and thus + cannot be queried by default. You can retry to query after the rebalance finished. As an alternative, you can also query + (potentially stale) state stores during a rebalance via StoreQueryParameters.enableStaleStores().
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StreamsRebalancingException

        +
        public StreamsRebalancingException(String message)
        +
        +
      • +
      • +
        +

        StreamsRebalancingException

        +
        public StreamsRebalancingException(String message, + Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/StreamsStoppedException.html b/static/41/javadoc/org/apache/kafka/streams/errors/StreamsStoppedException.html new file mode 100644 index 000000000..972f2fe7c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/StreamsStoppedException.html @@ -0,0 +1,169 @@ + + + + +StreamsStoppedException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StreamsStoppedException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class StreamsStoppedException +extends InvalidStateStoreException
    +
    Indicates that Kafka Streams is in a terminating or terminal state, such as KafkaStreams.State.PENDING_SHUTDOWN,KafkaStreams.State.PENDING_ERROR,KafkaStreams.State.NOT_RUNNING, or KafkaStreams.State.ERROR. This Streams instance will need to be discarded and replaced before it can + serve queries. The caller may wish to query a different instance.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StreamsStoppedException

        +
        public StreamsStoppedException(String message)
        +
        +
      • +
      • +
        +

        StreamsStoppedException

        +
        public StreamsStoppedException(String message, + Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.html b/static/41/javadoc/org/apache/kafka/streams/errors/StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.html new file mode 100644 index 000000000..77c6ee4e1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.html @@ -0,0 +1,283 @@ + + + + +StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse

    +
    +
    java.lang.Object +
    java.lang.Enum<StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse> +
    org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse>, Constable
    +
    +
    +
    Enclosing interface:
    +
    StreamsUncaughtExceptionHandler
    +
    +
    +
    public static enum StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse +extends Enum<StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse>
    +
    Enumeration that describes the response from the exception handler.
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/StreamsUncaughtExceptionHandler.html b/static/41/javadoc/org/apache/kafka/streams/errors/StreamsUncaughtExceptionHandler.html new file mode 100644 index 000000000..17e9a0874 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/StreamsUncaughtExceptionHandler.html @@ -0,0 +1,150 @@ + + + + +StreamsUncaughtExceptionHandler (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface StreamsUncaughtExceptionHandler

    +
    +
    +
    +
    public interface StreamsUncaughtExceptionHandler
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/TaskAssignmentException.html b/static/41/javadoc/org/apache/kafka/streams/errors/TaskAssignmentException.html new file mode 100644 index 000000000..fd76319a2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/TaskAssignmentException.html @@ -0,0 +1,176 @@ + + + + +TaskAssignmentException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TaskAssignmentException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class TaskAssignmentException +extends StreamsException
    +
    Indicates a run time error incurred while trying to assign + stream tasks to + threads.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TaskAssignmentException

        +
        public TaskAssignmentException(String message)
        +
        +
      • +
      • +
        +

        TaskAssignmentException

        +
        public TaskAssignmentException(String message, + Throwable throwable)
        +
        +
      • +
      • +
        +

        TaskAssignmentException

        +
        public TaskAssignmentException(Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/TaskCorruptedException.html b/static/41/javadoc/org/apache/kafka/streams/errors/TaskCorruptedException.html new file mode 100644 index 000000000..fb305e901 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/TaskCorruptedException.html @@ -0,0 +1,202 @@ + + + + +TaskCorruptedException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TaskCorruptedException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class TaskCorruptedException +extends StreamsException
    +
    Indicates a specific task is corrupted and need to be re-initialized. It can be thrown when: + +
      +
    • Under EOS, if the checkpoint file does not contain offsets for corresponding store's changelogs, meaning previously it was not close cleanly.
    • +
    • Out-of-range exception thrown during restoration, meaning that the changelog has been modified and we re-bootstrap the store.
    • +
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TaskCorruptedException

        +
        public TaskCorruptedException(Set<TaskId> corruptedTasks)
        +
        +
      • +
      • +
        +

        TaskCorruptedException

        +
        public TaskCorruptedException(Set<TaskId> corruptedTasks, + InvalidOffsetException e)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        corruptedTasks

        +
        public Set<TaskId> corruptedTasks()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/TaskIdFormatException.html b/static/41/javadoc/org/apache/kafka/streams/errors/TaskIdFormatException.html new file mode 100644 index 000000000..ea1b15648 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/TaskIdFormatException.html @@ -0,0 +1,176 @@ + + + + +TaskIdFormatException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TaskIdFormatException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class TaskIdFormatException +extends StreamsException
    +
    Indicates a run time error incurred while trying parse the task id + from the read string.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TaskIdFormatException

        +
        public TaskIdFormatException(String message)
        +
        +
      • +
      • +
        +

        TaskIdFormatException

        +
        public TaskIdFormatException(String message, + Throwable throwable)
        +
        +
      • +
      • +
        +

        TaskIdFormatException

        +
        public TaskIdFormatException(Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/TaskMigratedException.html b/static/41/javadoc/org/apache/kafka/streams/errors/TaskMigratedException.html new file mode 100644 index 000000000..1339a0114 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/TaskMigratedException.html @@ -0,0 +1,168 @@ + + + + +TaskMigratedException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TaskMigratedException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class TaskMigratedException +extends StreamsException
    +
    Indicates that all tasks belongs to the thread have migrated to another thread. This exception can be thrown when + the thread gets fenced (either by the consumer coordinator or by the transaction coordinator), which means it is + no longer part of the group but a "zombie" already
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TaskMigratedException

        +
        public TaskMigratedException(String message)
        +
        +
      • +
      • +
        +

        TaskMigratedException

        +
        public TaskMigratedException(String message, + Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/TopologyException.html b/static/41/javadoc/org/apache/kafka/streams/errors/TopologyException.html new file mode 100644 index 000000000..dd13fec34 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/TopologyException.html @@ -0,0 +1,175 @@ + + + + +TopologyException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TopologyException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class TopologyException +extends StreamsException
    +
    Indicates a pre run time error occurred while parsing the logical topology + to construct the physical processor topology.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TopologyException

        +
        public TopologyException(String message)
        +
        +
      • +
      • +
        +

        TopologyException

        +
        public TopologyException(String message, + Throwable throwable)
        +
        +
      • +
      • +
        +

        TopologyException

        +
        public TopologyException(Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/UnknownStateStoreException.html b/static/41/javadoc/org/apache/kafka/streams/errors/UnknownStateStoreException.html new file mode 100644 index 000000000..c8c8bd69e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/UnknownStateStoreException.html @@ -0,0 +1,169 @@ + + + + +UnknownStateStoreException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UnknownStateStoreException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class UnknownStateStoreException +extends InvalidStateStoreException
    +
    Indicates that the state store being queried is unknown, i.e., the state store does either not exist in your topology + or it is not queryable.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UnknownStateStoreException

        +
        public UnknownStateStoreException(String message)
        +
        +
      • +
      • +
        +

        UnknownStateStoreException

        +
        public UnknownStateStoreException(String message, + Throwable throwable)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/UnknownTopologyException.html b/static/41/javadoc/org/apache/kafka/streams/errors/UnknownTopologyException.html new file mode 100644 index 000000000..9c221db7e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/UnknownTopologyException.html @@ -0,0 +1,171 @@ + + + + +UnknownTopologyException (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UnknownTopologyException

    +
    + +
    +
    +
    All Implemented Interfaces:
    +
    Serializable
    +
    +
    +
    public class UnknownTopologyException +extends StreamsException
    +
    Indicates that the NamedTopology being + looked up does not exist in this application
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UnknownTopologyException

        +
        public UnknownTopologyException(String message, + String namedTopology)
        +
        +
      • +
      • +
        +

        UnknownTopologyException

        +
        public UnknownTopologyException(String message, + Throwable throwable, + String namedTopology)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/package-summary.html b/static/41/javadoc/org/apache/kafka/streams/errors/package-summary.html new file mode 100644 index 000000000..5cf2b9ca3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/package-summary.html @@ -0,0 +1,267 @@ + + + + +org.apache.kafka.streams.errors (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.streams.errors

    +
    +
    +
    package org.apache.kafka.streams.errors
    +
    +
    Provides common exception classes for Streams applications.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/errors/package-tree.html b/static/41/javadoc/org/apache/kafka/streams/errors/package-tree.html new file mode 100644 index 000000000..3899508dc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/errors/package-tree.html @@ -0,0 +1,148 @@ + + + + +org.apache.kafka.streams.errors Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.streams.errors

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Aggregator.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Aggregator.html new file mode 100644 index 000000000..49d1e478c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Aggregator.html @@ -0,0 +1,171 @@ + + + + +Aggregator (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Aggregator<K,V,VAgg>

    +
    +
    +
    +
    Type Parameters:
    +
    K - key type
    +
    V - input value type
    +
    VAgg - aggregate value type
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface Aggregator<K,V,VAgg>
    +
    The Aggregator interface for aggregating values of the given key. + This is a generalization of Reducer and allows to have different types for input value and aggregation + result. + Aggregator is used in combination with Initializer that provides an initial aggregation value. +

    + Aggregator can be used to implement aggregation functions like count.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      apply(K key, + V value, + VAgg aggregate)
      +
      +
      Compute a new aggregate from the key and value of a record and the current aggregate of the same key.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        apply

        +
        VAgg apply(K key, + V value, + VAgg aggregate)
        +
        Compute a new aggregate from the key and value of a record and the current aggregate of the same key.
        +
        +
        Parameters:
        +
        key - the key of the record
        +
        value - the value of the record
        +
        aggregate - the current aggregate value
        +
        Returns:
        +
        the new aggregate value
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Branched.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Branched.html new file mode 100644 index 000000000..30db8ae28 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Branched.html @@ -0,0 +1,283 @@ + + + + +Branched (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Branched<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.Branched<K,V>
    +
    +
    +
    +
    Type Parameters:
    +
    K - type of record key
    +
    V - type of record value
    +
    +
    +
    public class Branched<K,V> +extends Object
    +
    The Branched class is used to define the optional parameters when building branches with + BranchedKStream.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        as

        +
        public static <K, +V> Branched<K,V> as(String name)
        +
        Create an instance of Branched with provided branch name suffix.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        name - the branch name suffix to be used (see BranchedKStream description for details)
        +
        Returns:
        +
        a new instance of Branched
        +
        +
        +
      • +
      • +
        +

        withFunction

        +
        public static <K, +V> Branched<K,V> withFunction(Function<? super KStream<K,V>,? extends KStream<K,V>> chain)
        +
        Create an instance of Branched with provided chain function.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        chain - A function that will be applied to the branch. If the provided function returns + null, its result is ignored, otherwise it is added to the Map returned + by BranchedKStream.defaultBranch() or BranchedKStream.noDefaultBranch() (see + BranchedKStream description for details).
        +
        Returns:
        +
        a new instance of Branched
        +
        +
        +
      • +
      • +
        +

        withConsumer

        +
        public static <K, +V> Branched<K,V> withConsumer(Consumer<KStream<K,V>> chain)
        +
        Create an instance of Branched with provided chain consumer.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        chain - A consumer to which the branch will be sent. If a consumer is provided, + the respective branch will not be added to the resulting Map returned + by BranchedKStream.defaultBranch() or BranchedKStream.noDefaultBranch() (see + BranchedKStream description for details).
        +
        Returns:
        +
        a new instance of Branched
        +
        +
        +
      • +
      • +
        +

        withFunction

        +
        public static <K, +V> Branched<K,V> withFunction(Function<? super KStream<K,V>,? extends KStream<K,V>> chain, + String name)
        +
        Create an instance of Branched with provided chain function and branch name suffix.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        chain - A function that will be applied to the branch. If the provided function returns + null, its result is ignored, otherwise it is added to the Map returned + by BranchedKStream.defaultBranch() or BranchedKStream.noDefaultBranch() (see + BranchedKStream description for details).
        +
        name - the branch name suffix to be used. If null, a default branch name suffix will be generated + (see BranchedKStream description for details)
        +
        Returns:
        +
        a new instance of Branched
        +
        +
        +
      • +
      • +
        +

        withConsumer

        +
        public static <K, +V> Branched<K,V> withConsumer(Consumer<? super KStream<K,V>> chain, + String name)
        +
        Create an instance of Branched with provided chain consumer and branch name suffix.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        chain - A consumer to which the branch will be sent. If a non-null consumer is provided, + the respective branch will not be added to the resulting Map returned + by BranchedKStream.defaultBranch() or BranchedKStream.noDefaultBranch() (see + BranchedKStream description for details).
        +
        name - the branch name suffix to be used. If null, a default branch name suffix will be generated + (see BranchedKStream description for details)
        +
        Returns:
        +
        a new instance of Branched
        +
        +
        +
      • +
      • +
        +

        withName

        +
        public Branched<K,V> withName(String name)
        +
        Configure the instance of Branched with a branch name suffix.
        +
        +
        Parameters:
        +
        name - the branch name suffix to be used. If null a default branch name suffix will be generated (see + BranchedKStream description for details)
        +
        Returns:
        +
        this to facilitate method chaining
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/BranchedKStream.html b/static/41/javadoc/org/apache/kafka/streams/kstream/BranchedKStream.html new file mode 100644 index 000000000..6229694a8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/BranchedKStream.html @@ -0,0 +1,319 @@ + + + + +BranchedKStream (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface BranchedKStream<K,V>

    +
    +
    +
    +
    Type Parameters:
    +
    K - the key type of this stream
    +
    V - the value type of this stream
    +
    +
    +
    public interface BranchedKStream<K,V>
    +
    BranchedKStream is an abstraction of a branched record stream of key-value pairs. + It is an intermediate representation of a KStream in order to split the original KStream into + multiple sub-streams (called branches). + The process of routing the records to different branches is a stateless record-by-record operation. + +

    Branches are defined via branch(Predicate, Branched) or defaultBranch(Branched) methods. + Each input record is evaluated against the predicate supplied via Branched parameters, and is routed + to the first branch for which its respective predicate evaluates to true, and is included in this + branch only. + If a record does not match any predicates, it will be routed to the default branch, or dropped if no default branch + is created. + For details about multicasting/broadcasting records into more than one KStream, see KStream.split(). + +

    Each branch can be processed either by a Function or a + Consumer provided via a Branched parameter. + If certain conditions are met (see below), all created branches can be accessed from the Map returned by an + optional defaultBranch(Branched) or noDefaultBranch() method call. + +

    Rules of forming the resulting Map
    + + The keys of the Map<String, KStream<K, V>> entries returned by + defaultBranch(Branched) or noDefaultBranch() are defined by the following rules: +
      +
    • If Named parameter was provided for KStream.split(Named), its value is used as a prefix for each key. + By default, no prefix is used.
    • +
    • If a branch name is provided in branch(Predicate, Branched) via the Branched parameter, + its value is appended to the prefix to form the Map key.
    • +
    • If a name is not provided for the branch, then the key defaults to prefix + position of the branch as + a decimal number, starting from "1".
    • +
    • If a name is not provided for the defaultBranch(), then the key defaults to prefix + "0".
    • +
    + + The values of the respective Map<Stream, KStream<K, V>> entries are formed as following: + + + For example: +
    
    + Map<String, KStream<..., ...>> result =
    +   source.split(Named.as("foo-"))
    +     .branch(predicate1, Branched.as("bar"))                    // "foo-bar"
    +     .branch(predicate2, Branched.withConsumer(ks->ks.to("A"))  // no entry: a Consumer is provided
    +     .branch(predicate3, Branched.withFunction(ks->null))       // no entry: chain function returns null
    +     .branch(predicate4, Branched.withFunction(ks->ks))         // "foo-4": chain function returns non-null value
    +     .branch(predicate5)                                        // "foo-5": name defaults to the branch position
    +     .defaultBranch()                                           // "foo-0": "0" is the default name for the default branch
    + 
    + +

    Usage examples

    + +
    Direct branch processing
    + + If no single scope for all the branches is required, and each branch can be processed completely + independently of others, 'consuming' lambdas or method references in Branched parameter can be used: +
    
    + source.split()
    +   .branch(predicate1, Branched.withConsumer(ks -> ks.to("A")))
    +   .branch(predicate2, Branched.withConsumer(ks -> ks.to("B")))
    +   .defaultBranch(Branched.withConsumer(ks->ks.to("C")));
    + 
    + +
    Collecting branches in a single scope
    + + If multiple branches need to be processed in the same scope, for example for merging or joining branches again after + splitting, the Map returned by defaultBranch() or noDefaultBranch() methods provides + access to all the branches in the same scope: +
    
    + Map<String, KStream<String, String>> branches = source.split(Named.as("split-"))
    +   .branch((key, value) -> value == null, Branched.withFunction(s -> s.mapValues(v->"NULL"), "null")
    +   .defaultBranch(Branched.as("non-null"));
    +
    + KStream<String, String> merged = branches.get("split-non-null").merge(branches.get("split-null"));
    + 
    + +
    Dynamic branching
    + + There is also a case when dynamic branch creating is needed, e.g., one branch per enum value: +
    
    + BranchedKStream branched = stream.split();
    + for (RecordType recordType : RecordType.values()) {
    +   branched.branch((k, v) -> v.getRecType() == recordType, Branched.withConsumer(recordType::processRecords));
    + }
    + 
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        branch

        +
        BranchedKStream<K,V> branch(Predicate<? super K,? super V> predicate)
        +
        Define a branch for records that match the predicate.
        +
        +
        Parameters:
        +
        predicate - A Predicate instance, against which each record will be evaluated. + If this predicate returns true for a given record, the record will be + routed to the current branch and will not be evaluated against the predicates + for the remaining branches.
        +
        Returns:
        +
        this to facilitate method chaining
        +
        +
        +
      • +
      • +
        +

        branch

        +
        BranchedKStream<K,V> branch(Predicate<? super K,? super V> predicate, + Branched<K,V> branched)
        +
        Define a branch for records that match the predicate.
        +
        +
        Parameters:
        +
        predicate - A Predicate instance, against which each record will be evaluated. + If this predicate returns true for a given record, the record will be + routed to the current branch and will not be evaluated against the predicates + for the remaining branches.
        +
        branched - A Branched parameter, that allows to define a branch name, an in-place + branch consumer or branch mapper (see code examples + for BranchedKStream)
        +
        Returns:
        +
        this to facilitate method chaining
        +
        +
        +
      • +
      • +
        +

        defaultBranch

        +
        Map<String,KStream<K,V>> defaultBranch()
        +
        Finalize the construction of branches and defines the default branch for the messages not intercepted + by other branches. Calling defaultBranch or noDefaultBranch() is optional.
        +
        +
        Returns:
        +
        Map of named branches. For rules of forming the resulting map, see BranchedKStream + description.
        +
        +
        +
      • +
      • +
        +

        defaultBranch

        +
        Map<String,KStream<K,V>> defaultBranch(Branched<K,V> branched)
        +
        Finalize the construction of branches and defines the default branch for the messages not intercepted + by other branches. Calling defaultBranch or noDefaultBranch() is optional.
        +
        +
        Parameters:
        +
        branched - A Branched parameter, that allows to define a branch name, an in-place + branch consumer or branch mapper (see code examples + for BranchedKStream)
        +
        Returns:
        +
        Map of named branches. For rules of forming the resulting map, see BranchedKStream + description.
        +
        +
        +
      • +
      • +
        +

        noDefaultBranch

        +
        Map<String,KStream<K,V>> noDefaultBranch()
        +
        Finalize the construction of branches without forming a default branch. Calling #noDefaultBranch() + or defaultBranch() is optional.
        +
        +
        Returns:
        +
        Map of named branches. For rules of forming the resulting map, see BranchedKStream + description.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/CogroupedKStream.html b/static/41/javadoc/org/apache/kafka/streams/kstream/CogroupedKStream.html new file mode 100644 index 000000000..d4b690c80 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/CogroupedKStream.html @@ -0,0 +1,478 @@ + + + + +CogroupedKStream (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface CogroupedKStream<K,VOut>

    +
    +
    +
    +
    Type Parameters:
    +
    K - the key type of this co-grouped stream
    +
    VOut - the result value type of the applied aggregation
    +
    +
    +
    public interface CogroupedKStream<K,VOut>
    +
    CogroupedKStream is an abstraction of one or more grouped record streams of + key-value pairs. + +

    A CogroupedKStream can be either windowed by applying windowedBy(...) operation, + or can be aggregated into a KTable. + +

    A CogroupedKStream is initialized from a single + grouped record stream, and can be combined with one or more other + grouped record streams, + before windowing or aggregation is applied.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        cogroup

        +
        <V> CogroupedKStream<K,VOut> cogroup(KGroupedStream<K,V> groupedStream, + Aggregator<? super K,? super V,VOut> aggregator)
        +
        Add an already grouped KStream to this CogroupedKStream. +

        + The added grouped KStream must have the same number of partitions as all existing + streams of this CogroupedKStream. + If this is not the case, you would need to call KStream.repartition(Repartitioned) before + grouping the KStream and specify the "correct" number of + partitions via Repartitioned parameter. +

        + The specified Aggregator is applied in the actual aggregation step for + each input record and computes a new aggregate using the current aggregate (or for the very first record per key + using the initial intermediate aggregation result provided via the Initializer that is passed into + aggregate(Initializer)) and the record's value.

        +
        +
        Type Parameters:
        +
        V - Type of input values
        +
        Parameters:
        +
        groupedStream - a group stream
        +
        aggregator - an Aggregator that computes a new aggregate result
        +
        Returns:
        +
        a CogroupedKStream
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        KTable<K,VOut> aggregate(Initializer<VOut> initializer)
        +
        Aggregate the values of records in these streams by the grouped key. + Records with null key or value are ignored. + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view) + that can be queried by the given store name in materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + To compute the aggregation the corresponding Aggregator as specified in + cogroup(...) is used per input stream. + The specified Initializer is applied once per key, directly before the first input record per key is + processed to provide an initial intermediate aggregation result that is used to process the first record. +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to the + same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // some aggregation on value type double
        + String queryableStoreName = "storeName" // the store name should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<VOut>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<VOut>> localStore = streams.store(storeQueryParams);
        + K key = "some-key";
        + ValueAndTimestamp<VOut> aggForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to query + the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store (which always will be of type TimestampedKeyValueStore) will be backed by + an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot + contain characters other than ASCII alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is a generated value, and + "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation + result. Cannot be null.
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys, and values that + represent the latest (rolling) aggregate for each key
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        KTable<K,VOut> aggregate(Initializer<VOut> initializer, + Named named)
        +
        Aggregate the values of records in these streams by the grouped key. + Records with null key or value are ignored. + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view) + that can be queried by the given store name in materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + To compute the aggregation the corresponding Aggregator as specified in + cogroup(...) is used per input stream. + The specified Initializer is applied once per key, directly before the first input record per key is + processed to provide an initial intermediate aggregation result that is used to process the first record. + The specified Named is applied once to the processor combining the grouped streams. +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to the + same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // some aggregation on value type double
        + String queryableStoreName = "storeName" // the store name should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<VOut>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<VOut>> localStore = streams.store(storeQueryParams);
        + K key = "some-key";
        + ValueAndTimestamp<VOut> aggForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to query + the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store (which always will be of type TimestampedKeyValueStore) will be backed by + an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot + contain characters other than ASCII alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the provide store name defined + in Materialized, and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result. Cannot be null.
        +
        named - name the processor. Cannot be null.
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys, and values that + represent the latest (rolling) aggregate for each key
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        KTable<K,VOut> aggregate(Initializer<VOut> initializer, + Materialized<K,VOut,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Aggregate the values of records in these streams by the grouped key. + Records with null key or value are ignored. + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view) + that can be queried by the given store name in materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + To compute the aggregation the corresponding Aggregator as specified in + cogroup(...) is used per input stream. + The specified Initializer is applied once per key, directly before the first input record per key is + processed to provide an initial intermediate aggregation result that is used to process the first record. +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to the + same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // some aggregation on value type double
        + String queryableStoreName = "storeName" // the store name should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<VOut>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<VOut>> localStore = streams.store(storeQueryParams);
        + K key = "some-key";
        + ValueAndTimestamp<VOut> aggForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to query + the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store (which always will be of type TimestampedKeyValueStore -- regardless of what + is specified in the parameter materialized) will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot + contain characters other than ASCII alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the provide store name defined + in Materialized, and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result. Cannot be null.
        +
        materialized - an instance of Materialized used to materialize a state store. Cannot be null.
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys, and values that + represent the latest (rolling) aggregate for each key
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        KTable<K,VOut> aggregate(Initializer<VOut> initializer, + Named named, + Materialized<K,VOut,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Aggregate the values of records in these streams by the grouped key. + Records with null key or value are ignored. + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view) + that can be queried by the given store name in materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + To compute the aggregation the corresponding Aggregator as specified in + cogroup(...) is used per input stream. + The specified Initializer is applied once per key, directly before the first input record per key is + processed to provide an initial intermediate aggregation result that is used to process the first record. + The specified Named is used to name the processor combining the grouped streams. +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to the + same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // some aggregation on value type double
        + String queryableStoreName = "storeName" // the store name should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<VOut>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<VOut>> localStore = streams.store(storeQueryParams);
        + K key = "some-key";
        + ValueAndTimestamp<VOut> aggForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to query + the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store (which always will be of type TimestampedKeyValueStore -- regardless of what + is specified in the parameter materialized) will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot + contain characters other than ASCII alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the provide store name defined + in Materialized, and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result. Cannot be null.
        +
        materialized - an instance of Materialized used to materialize a state store. Cannot be null.
        +
        named - name the processors. Cannot be null.
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys, and values that + represent the latest (rolling) aggregate for each key
        +
        +
        +
      • +
      • +
        +

        windowedBy

        +
        <W extends Window> +TimeWindowedCogroupedKStream<K,VOut> windowedBy(Windows<W> windows)
        +
        Create a new TimeWindowedCogroupedKStream instance that can be used to perform windowed + aggregations.
        +
        +
        Type Parameters:
        +
        W - the window type
        +
        Parameters:
        +
        windows - the specification of the aggregation Windows
        +
        Returns:
        +
        an instance of TimeWindowedCogroupedKStream
        +
        +
        +
      • +
      • +
        +

        windowedBy

        + +
        Create a new TimeWindowedCogroupedKStream instance that can be used to perform sliding + windowed aggregations.
        +
        +
        Parameters:
        +
        windows - the specification of the aggregation SlidingWindows
        +
        Returns:
        +
        an instance of TimeWindowedCogroupedKStream
        +
        +
        +
      • +
      • +
        +

        windowedBy

        + +
        Create a new SessionWindowedCogroupedKStream instance that can be used to perform session + windowed aggregations.
        +
        +
        Parameters:
        +
        windows - the specification of the aggregation SessionWindows
        +
        Returns:
        +
        an instance of SessionWindowedCogroupedKStream
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Consumed.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Consumed.html new file mode 100644 index 000000000..f19a33158 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Consumed.html @@ -0,0 +1,483 @@ + + + + +Consumed (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Consumed<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.Consumed<K,V>
    +
    +
    +
    +
    Type Parameters:
    +
    K - type of record key
    +
    V - type of record value
    +
    +
    +
    public class Consumed<K,V> +extends Object
    +
    The Consumed class is used to define the optional parameters when using StreamsBuilder to + build instances of KStream, KTable, and GlobalKTable. +

    + For example, you can read a topic as KStream with a custom timestamp extractor and specify the corresponding + key and value serdes like: +

    
    + StreamsBuilder builder = new StreamsBuilder();
    + KStream<String, Long> stream = builder.stream(
    +   "topicName",
    +   Consumed.with(Serdes.String(), Serdes.Long())
    +           .withTimestampExtractor(new LogAndSkipOnInvalidTimestamp()));
    + 
    + Similarly, you can read a topic as KTable with a custom auto.offset.reset configuration and force a + state store materialization to access the content via + interactive queries: +
    
    + StreamsBuilder builder = new StreamsBuilder();
    + KTable<Integer, Integer> table = builder.table(
    +   "topicName",
    +   Consumed.with(AutoOffsetReset.LATEST),
    +   Materialized.as("queryable-store-name"));
    + 
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        with

        +
        @Deprecated +public static <K, +V> Consumed<K,V> with(Serde<K> keySerde, + Serde<V> valueSerde, + TimestampExtractor timestampExtractor, + Topology.AutoOffsetReset resetPolicy)
        +
        Deprecated. + +
        +
        Create an instance of Consumed with the supplied arguments. null values are acceptable.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        keySerde - the key serde. If null the default key serde from config will be used
        +
        valueSerde - the value serde. If null the default value serde from config will be used
        +
        timestampExtractor - the timestamp extractor to used. If null the default timestamp extractor from config will be used
        +
        resetPolicy - the offset reset policy to be used. If null the default reset policy from config will be used
        +
        Returns:
        +
        a new instance of Consumed
        +
        +
        +
      • +
      • +
        +

        with

        +
        public static <K, +V> Consumed<K,V> with(Serde<K> keySerde, + Serde<V> valueSerde, + TimestampExtractor timestampExtractor, + AutoOffsetReset resetPolicy)
        +
        Create an instance of Consumed with the supplied arguments. null values are acceptable.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        keySerde - the key serde. If null the default key serde from config will be used
        +
        valueSerde - the value serde. If null the default value serde from config will be used
        +
        timestampExtractor - the timestamp extractor to used. If null the default timestamp extractor from config will be used
        +
        resetPolicy - the offset reset policy to be used. If null the default reset policy from config will be used
        +
        Returns:
        +
        a new instance of Consumed
        +
        +
        +
      • +
      • +
        +

        with

        +
        public static <K, +V> Consumed<K,V> with(Serde<K> keySerde, + Serde<V> valueSerde)
        +
        Create an instance of Consumed with key and value Serdes.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        keySerde - the key serde. If null the default key serde from config will be used
        +
        valueSerde - the value serde. If null the default value serde from config will be used
        +
        Returns:
        +
        a new instance of Consumed
        +
        +
        +
      • +
      • +
        +

        with

        +
        public static <K, +V> Consumed<K,V> with(TimestampExtractor timestampExtractor)
        +
        Create an instance of Consumed with a TimestampExtractor.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        timestampExtractor - the timestamp extractor to used. If null the default timestamp extractor from config will be used
        +
        Returns:
        +
        a new instance of Consumed
        +
        +
        +
      • +
      • +
        +

        with

        +
        @Deprecated +public static <K, +V> Consumed<K,V> with(Topology.AutoOffsetReset resetPolicy)
        +
        Deprecated. +
        Since 4.0. Use with(AutoOffsetReset) instead.
        +
        +
        Create an instance of Consumed with a Topology.AutoOffsetReset.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        resetPolicy - the offset reset policy to be used. If null the default reset policy from config will be used
        +
        Returns:
        +
        a new instance of Consumed
        +
        +
        +
      • +
      • +
        +

        with

        +
        public static <K, +V> Consumed<K,V> with(AutoOffsetReset resetPolicy)
        +
        Create an instance of Consumed with a Topology.AutoOffsetReset.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        resetPolicy - the offset reset policy to be used. If null the default reset policy from config will be used
        +
        Returns:
        +
        a new instance of Consumed
        +
        +
        +
      • +
      • +
        +

        as

        +
        public static <K, +V> Consumed<K,V> as(String processorName)
        +
        Create an instance of Consumed with provided processor name.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        processorName - the processor name to be used. If null a default processor name will be generated
        +
        Returns:
        +
        a new instance of Consumed
        +
        +
        +
      • +
      • +
        +

        withKeySerde

        +
        public Consumed<K,V> withKeySerde(Serde<K> keySerde)
        +
        Configure the instance of Consumed with a key Serde.
        +
        +
        Parameters:
        +
        keySerde - the key serde. If null the default key serde from config will be used
        +
        Returns:
        +
        a new instance of Consumed
        +
        +
        +
      • +
      • +
        +

        withValueSerde

        +
        public Consumed<K,V> withValueSerde(Serde<V> valueSerde)
        +
        Configure the instance of Consumed with a value Serde.
        +
        +
        Parameters:
        +
        valueSerde - the value serde. If null the default value serde from config will be used
        +
        Returns:
        +
        a new instance of Consumed
        +
        +
        +
      • +
      • +
        +

        withTimestampExtractor

        +
        public Consumed<K,V> withTimestampExtractor(TimestampExtractor timestampExtractor)
        +
        Configure the instance of Consumed with a TimestampExtractor.
        +
        +
        Parameters:
        +
        timestampExtractor - the timestamp extractor to used. If null the default timestamp extractor from config will be used
        +
        Returns:
        +
        a new instance of Consumed
        +
        +
        +
      • +
      • +
        +

        withOffsetResetPolicy

        +
        @Deprecated +public Consumed<K,V> withOffsetResetPolicy(Topology.AutoOffsetReset resetPolicy)
        +
        Deprecated. + +
        +
        Configure the instance of Consumed with a Topology.AutoOffsetReset.
        +
        +
        Parameters:
        +
        resetPolicy - the offset reset policy to be used. If null the default reset policy from config will be used
        +
        Returns:
        +
        a new instance of Consumed
        +
        +
        +
      • +
      • +
        +

        withOffsetResetPolicy

        +
        public Consumed<K,V> withOffsetResetPolicy(AutoOffsetReset resetPolicy)
        +
        Configure the instance of Consumed with a Topology.AutoOffsetReset.
        +
        +
        Parameters:
        +
        resetPolicy - the offset reset policy to be used. If null the default reset policy from config will be used
        +
        Returns:
        +
        a new instance of Consumed
        +
        +
        +
      • +
      • +
        +

        withName

        +
        public Consumed<K,V> withName(String processorName)
        +
        Configure the instance of Consumed with a processor name.
        +
        +
        Parameters:
        +
        processorName - the processor name to be used. If null a default processor name will be generated
        +
        Returns:
        +
        a new instance of Consumed
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/EmitStrategy.StrategyType.html b/static/41/javadoc/org/apache/kafka/streams/kstream/EmitStrategy.StrategyType.html new file mode 100644 index 000000000..07253ea24 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/EmitStrategy.StrategyType.html @@ -0,0 +1,230 @@ + + + + +EmitStrategy.StrategyType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class EmitStrategy.StrategyType

    +
    +
    java.lang.Object +
    java.lang.Enum<EmitStrategy.StrategyType> +
    org.apache.kafka.streams.kstream.EmitStrategy.StrategyType
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<EmitStrategy.StrategyType>, Constable
    +
    +
    +
    Enclosing interface:
    +
    EmitStrategy
    +
    +
    +
    public static enum EmitStrategy.StrategyType +extends Enum<EmitStrategy.StrategyType>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static EmitStrategy.StrategyType[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static EmitStrategy.StrategyType valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      • +
        +

        forType

        +
        public static EmitStrategy forType(EmitStrategy.StrategyType type)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/EmitStrategy.html b/static/41/javadoc/org/apache/kafka/streams/kstream/EmitStrategy.html new file mode 100644 index 000000000..f4cc056d2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/EmitStrategy.html @@ -0,0 +1,236 @@ + + + + +EmitStrategy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface EmitStrategy

    +
    +
    +
    +
    public interface EmitStrategy
    +
    This interface controls the strategy that can be used to control how we emit results in a processor.
    +
    +
    +
      + +
    • +
      +

      Nested Class Summary

      +
      Nested Classes
      +
      +
      Modifier and Type
      +
      Interface
      +
      Description
      +
      static enum 
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Field Summary

      +
      Fields
      +
      +
      Modifier and Type
      +
      Field
      +
      Description
      +
      static final org.slf4j.Logger
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      This strategy indicates that the aggregated result for a window will only be emitted when the + window closes instead of when there's an update to the window.
      +
      + + +
      +
      This strategy indicates that the aggregated result for a window will be emitted every time + when there's an update to the window instead of when the window closes.
      +
      + + +
      +
      Returns the strategy type.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        log

        +
        static final org.slf4j.Logger log
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        type

        + +
        Returns the strategy type.
        +
        +
        Returns:
        +
        Emit strategy type
        +
        +
        +
      • +
      • +
        +

        onWindowClose

        +
        static EmitStrategy onWindowClose()
        +
        This strategy indicates that the aggregated result for a window will only be emitted when the + window closes instead of when there's an update to the window. Window close means that current + event time is larger than (window end time + grace period). + +

        This strategy should only be used for windows which can close. An exception will be thrown + if it's used with UnlimitedWindow.

        +
        +
        Returns:
        +
        "window close" EmitStrategy instance
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        onWindowUpdate

        +
        static EmitStrategy onWindowUpdate()
        +
        This strategy indicates that the aggregated result for a window will be emitted every time + when there's an update to the window instead of when the window closes.
        +
        +
        Returns:
        +
        "window update" EmitStrategy instance
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/ForeachAction.html b/static/41/javadoc/org/apache/kafka/streams/kstream/ForeachAction.html new file mode 100644 index 000000000..7ba8effab --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/ForeachAction.html @@ -0,0 +1,158 @@ + + + + +ForeachAction (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ForeachAction<K,V>

    +
    +
    +
    +
    Type Parameters:
    +
    K - key type
    +
    V - value type
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface ForeachAction<K,V>
    +
    The ForeachAction interface for performing an action on a key-value + pair. + This is a stateless record-by-record operation, i.e, apply(Object, Object) is invoked individually for each + record of a stream. + If stateful processing is required, consider using + KStream#process(...).
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      +
      apply(K key, + V value)
      +
      +
      Perform an action for each record of a stream.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        apply

        +
        void apply(K key, + V value)
        +
        Perform an action for each record of a stream.
        +
        +
        Parameters:
        +
        key - the key of the record
        +
        value - the value of the record
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/ForeachProcessor.html b/static/41/javadoc/org/apache/kafka/streams/kstream/ForeachProcessor.html new file mode 100644 index 000000000..52405ff72 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/ForeachProcessor.html @@ -0,0 +1,185 @@ + + + + +ForeachProcessor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ForeachProcessor<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.ForeachProcessor<K,V>
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Processor<K,V,Void,Void>
    +
    +
    +
    @Deprecated +public class ForeachProcessor<K,V> +extends Object +implements Processor<K,V,Void,Void>
    +
    Deprecated. +
    Since 4.0 and should not be used any longer.
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ForeachProcessor

        +
        public ForeachProcessor(ForeachAction<K,V> action)
        +
        Deprecated.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        process

        +
        public void process(Record<K,V> record)
        +
        Deprecated.
        +
        Description copied from interface: Processor
        +
        Process the record. Note that record metadata is undefined in cases such as a forward call from a punctuator.
        +
        +
        Specified by:
        +
        process in interface Processor<K,V,Void,Void>
        +
        Parameters:
        +
        record - the record to process
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/GlobalKTable.html b/static/41/javadoc/org/apache/kafka/streams/kstream/GlobalKTable.html new file mode 100644 index 000000000..88f61c5a8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/GlobalKTable.html @@ -0,0 +1,185 @@ + + + + +GlobalKTable (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface GlobalKTable<K,V>

    +
    +
    +
    +
    Type Parameters:
    +
    K - the key type of this table
    +
    V - the value type of this table
    +
    +
    +
    public interface GlobalKTable<K,V>
    +
    GlobalKTable is an abstraction of a changelog stream from a primary-keyed table. + Each record in this changelog stream is an update on the primary-keyed table with the record key as the primary key. + Primary-keys in a table cannot be null, and thus, null-key key-value pairs are not + supported, and corresponding records will be dropped. + KTables follow Kafka "tombstone" semantics, and null-value key-value pairs are + interpreted and processed as deletes for the corresponding key. + +

    A GlobalKTable is defined from a single Kafka topic that is + consumed message by message. + +

    A GlobalKTable can only be used as right-hand side input for a + stream-globalTable join. + +

    In contrast to a KTable that is partitioned over all KafkaStreams instances, a GlobalKTable + is fully replicated per KafkaStreams instance. + Every partition of the underlying topic is consumed by each GlobalKTable, such that the full set of data is + available in every KafkaStreams instance. + This provides the ability to perform joins with KStream without having to repartition the input stream. + Furthermore, GlobalKTable are "bootstrapped" on startup, and are maintained by a separate thread. + Thus, updates to a GlobalKTable are not "stream-time synchronized" what may lead to non-deterministic results. + +

    Furthermore, all GlobalKTable have an internal state store which can be accessed from + "outside" using the Interactive Queries (IQ) API (see KafkaStreams#store(...) + and KafkaStreams#query(...) [new API; evolving] for details). + For example: +

    
    + builder.globalTable("topic-name", "queryable-store-name");
    + ...
    + KafkaStreams streams = ...;
    + streams.start()
    + ...
    + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>>> storeQueryParams =
    +   StoreQueryParameters.fromNameAndType("queryable-store-name", QueryableStoreTypes.timestampedKeyValueStore());
    + ReadOnlyKeyValueStore view = streams.store(storeQueryParams);
    +
    + // query the value for a key
    + ValueAndTimestamp value = view.get(key);
    +
    + + Note that in contrast to KTable a GlobalKTable's state holds a full copy of the underlying topic, + thus all keys can be queried locally.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Get the name of the local state store that can be used to query this GlobalKTable.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        queryableStoreName

        +
        String queryableStoreName()
        +
        Get the name of the local state store that can be used to query this GlobalKTable.
        +
        +
        Returns:
        +
        the underlying state store name, or null if this GlobalKTable cannot be queried.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Grouped.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Grouped.html new file mode 100644 index 000000000..38e966565 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Grouped.html @@ -0,0 +1,351 @@ + + + + +Grouped (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Grouped<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.Grouped<K,V>
    +
    +
    +
    +
    Type Parameters:
    +
    K - the key type
    +
    V - the value type
    +
    +
    +
    public class Grouped<K,V> +extends Object
    +
    The class that is used to capture the key and value Serdes and set the part of name used for + repartition topics when performing KStream.groupBy(KeyValueMapper, Grouped), KStream.groupByKey(Grouped), or KTable.groupBy(KeyValueMapper, Grouped) operations. Note + that Kafka Streams does not always create repartition topics for grouping operations.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        as

        +
        public static <K, +V> Grouped<K,V> as(String name)
        +
        Create a Grouped instance with the provided name used as part of the repartition topic if required.
        +
        +
        Type Parameters:
        +
        K - the key type
        +
        V - the value type
        +
        Parameters:
        +
        name - the name used for a repartition topic if required
        +
        Returns:
        +
        a new Grouped configured with the name
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        keySerde

        +
        public static <K, +V> Grouped<K,V> keySerde(Serde<K> keySerde)
        +
        Create a Grouped instance with the provided keySerde. If null the default key serde from config will be used.
        +
        +
        Type Parameters:
        +
        K - the key type
        +
        V - the value type
        +
        Parameters:
        +
        keySerde - the Serde used for serializing the key. If null the default key serde from config will be used
        +
        Returns:
        +
        a new Grouped configured with the keySerde
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        valueSerde

        +
        public static <K, +V> Grouped<K,V> valueSerde(Serde<V> valueSerde)
        +
        Create a Grouped instance with the provided valueSerde. If null the default value serde from config will be used.
        +
        +
        Type Parameters:
        +
        K - the key type
        +
        V - the value type
        +
        Parameters:
        +
        valueSerde - the Serde used for serializing the value. If null the default value serde from config will be used
        +
        Returns:
        +
        a new Grouped configured with the valueSerde
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        with

        +
        public static <K, +V> Grouped<K,V> with(String name, + Serde<K> keySerde, + Serde<V> valueSerde)
        +
        Create a Grouped instance with the provided name, keySerde, and valueSerde. If the keySerde and/or the valueSerde is + null the default value for the respective serde from config will be used.
        +
        +
        Type Parameters:
        +
        K - the key type
        +
        V - the value type
        +
        Parameters:
        +
        name - the name used as part of the repartition topic name if required
        +
        keySerde - the Serde used for serializing the key. If null the default key serde from config will be used
        +
        valueSerde - the Serde used for serializing the value. If null the default value serde from config will be used
        +
        Returns:
        +
        a new Grouped configured with the name, keySerde, and valueSerde
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        with

        +
        public static <K, +V> Grouped<K,V> with(Serde<K> keySerde, + Serde<V> valueSerde)
        +
        Create a Grouped instance with the provided keySerde and valueSerde. If the keySerde and/or the valueSerde is + null the default value for the respective serde from config will be used.
        +
        +
        Type Parameters:
        +
        K - the key type
        +
        V - the value type
        +
        Parameters:
        +
        keySerde - the Serde used for serializing the key. If null the default key serde from config will be used
        +
        valueSerde - the Serde used for serializing the value. If null the default value serde from config will be used
        +
        Returns:
        +
        a new Grouped configured with the keySerde, and valueSerde
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        withName

        +
        public Grouped<K,V> withName(String name)
        +
        Perform the grouping operation with the name for a repartition topic if required. Note + that Kafka Streams does not always create repartition topics for grouping operations.
        +
        +
        Parameters:
        +
        name - the name used for the processor name and as part of the repartition topic name if required
        +
        Returns:
        +
        a new Grouped instance configured with the name
        +
        +
        +
      • +
      • +
        +

        withKeySerde

        +
        public Grouped<K,V> withKeySerde(Serde<K> keySerde)
        +
        Perform the grouping operation using the provided keySerde for serializing the key.
        +
        +
        Parameters:
        +
        keySerde - Serde to use for serializing the key. If null the default key serde from config will be used
        +
        Returns:
        +
        a new Grouped instance configured with the keySerde
        +
        +
        +
      • +
      • +
        +

        withValueSerde

        +
        public Grouped<K,V> withValueSerde(Serde<V> valueSerde)
        +
        Perform the grouping operation using the provided valueSerde for serializing the value.
        +
        +
        Parameters:
        +
        valueSerde - Serde to use for serializing the value. If null the default value serde from config will be used
        +
        Returns:
        +
        a new Grouped instance configured with the valueSerde
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Initializer.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Initializer.html new file mode 100644 index 000000000..dc0143c8d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Initializer.html @@ -0,0 +1,156 @@ + + + + +Initializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Initializer<VAgg>

    +
    +
    +
    +
    Type Parameters:
    +
    VAgg - aggregate value type
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface Initializer<VAgg>
    +
    The Initializer interface for creating an initial value in aggregations. + Initializer is used in combination with Aggregator.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Return the initial value for an aggregation.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        apply

        +
        VAgg apply()
        +
        Return the initial value for an aggregation.
        +
        +
        Returns:
        +
        the initial value for an aggregation
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/JoinWindows.html b/static/41/javadoc/org/apache/kafka/streams/kstream/JoinWindows.html new file mode 100644 index 000000000..7bb0a62c4 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/JoinWindows.html @@ -0,0 +1,483 @@ + + + + +JoinWindows (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class JoinWindows

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.Windows<Window> +
    org.apache.kafka.streams.kstream.JoinWindows
    +
    +
    +
    +
    +
    public class JoinWindows +extends Windows<Window>
    +
    The window specifications used for joins. +

    + A JoinWindows instance defines a maximum time difference for a join over two streams on the same key. + In SQL-style you would express this join as +

    
    +     SELECT * FROM stream1, stream2
    +     WHERE
    +       stream1.key = stream2.key
    +       AND
    +       stream1.ts - before <= stream2.ts AND stream2.ts <= stream1.ts + after
    + 
    + There are three different window configuration supported: +
      +
    • before = after = time-difference
    • +
    • before = 0 and after = time-difference
    • +
    • before = time-difference and after = 0
    • +
    + A join is symmetric in the sense, that a join specification on the first stream returns the same result record as + a join specification on the second stream with flipped before and after values. +

    + Both values (before and after) must not result in an "inverse" window, i.e., upper-interval bound cannot be smaller + than lower-interval bound. +

    + JoinWindows are sliding windows, thus, they are aligned to the actual record timestamps. + This implies, that each input record defines its own window with start and end time being relative to the record's + timestamp. +

    + For time semantics, see TimestampExtractor.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Field Summary

      +
      Fields
      +
      +
      Modifier and Type
      +
      Field
      +
      Description
      +
      final long
      + +
      +
      Maximum time difference for tuples that are after the join tuple.
      +
      +
      final long
      + +
      +
      Maximum time difference for tuples that are before the join tuple.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      after(Duration timeDifference)
      +
      +
      Changes the end window boundary to timeDifference but keep the start window boundary as is.
      +
      + +
      before(Duration timeDifference)
      +
      +
      Changes the start window boundary to timeDifference but keep the end window boundary as is.
      +
      +
      boolean
      + +
       
      + +
      grace(Duration afterWindowEnd)
      +
      +
      Deprecated. +
      Since 3.0.
      +
      +
      +
      long
      + +
      +
      Return the window grace period (the time to admit + out-of-order events after the end of the window.) + + Delay is defined as (stream_time - record_timestamp).
      +
      +
      int
      + +
       
      + +
      of(Duration timeDifference)
      +
      +
      Deprecated. +
      Since 3.0.
      +
      +
      + +
      ofTimeDifferenceAndGrace(Duration timeDifference, + Duration afterWindowEnd)
      +
      +
      Specifies that records of the same key are joinable if their timestamps are within timeDifference, + i.e., the timestamp of a record from the secondary stream is max timeDifference before or after + the timestamp of the record from the primary stream.
      +
      + + +
      +
      Specifies that records of the same key are joinable if their timestamps are within timeDifference, + i.e., the timestamp of a record from the secondary stream is max timeDifference before or after + the timestamp of the record from the primary stream.
      +
      +
      long
      + +
      +
      Return the size of the specified windows in milliseconds.
      +
      + + +
       
      + +
      windowsFor(long timestamp)
      +
      +
      Not supported by JoinWindows.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +getClass, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        beforeMs

        +
        public final long beforeMs
        +
        Maximum time difference for tuples that are before the join tuple.
        +
        +
      • +
      • +
        +

        afterMs

        +
        public final long afterMs
        +
        Maximum time difference for tuples that are after the join tuple.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        ofTimeDifferenceAndGrace

        +
        public static JoinWindows ofTimeDifferenceAndGrace(Duration timeDifference, + Duration afterWindowEnd)
        +
        Specifies that records of the same key are joinable if their timestamps are within timeDifference, + i.e., the timestamp of a record from the secondary stream is max timeDifference before or after + the timestamp of the record from the primary stream. +

        + Using this method explicitly sets the grace period to the duration specified by afterWindowEnd, which + means that only out-of-order records arriving more than the grace period after the window end will be dropped. + The window close, after which any incoming records are considered late and will be rejected, is defined as + windowEnd + afterWindowEnd

        +
        +
        Parameters:
        +
        timeDifference - join window interval
        +
        afterWindowEnd - The grace period to admit out-of-order events to a window.
        +
        Returns:
        +
        A new JoinWindows object with the specified window definition and grace period
        +
        Throws:
        +
        IllegalArgumentException - if timeDifference is negative or can't be represented as long milliseconds + if afterWindowEnd is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        ofTimeDifferenceWithNoGrace

        +
        public static JoinWindows ofTimeDifferenceWithNoGrace(Duration timeDifference)
        +
        Specifies that records of the same key are joinable if their timestamps are within timeDifference, + i.e., the timestamp of a record from the secondary stream is max timeDifference before or after + the timestamp of the record from the primary stream. +

        + CAUTION: Using this method implicitly sets the grace period to zero, which means that any out-of-order + records arriving after the window ends are considered late and will be dropped.

        +
        +
        Parameters:
        +
        timeDifference - join window interval
        +
        Returns:
        +
        a new JoinWindows object with the window definition and no grace period. Note that this means out-of-order records arriving after the window end will be dropped
        +
        Throws:
        +
        IllegalArgumentException - if timeDifference is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        of

        +
        @Deprecated +public static JoinWindows of(Duration timeDifference) + throws IllegalArgumentException
        +
        Deprecated. +
        Since 3.0. Use ofTimeDifferenceWithNoGrace(Duration)} instead.
        +
        +
        Specifies that records of the same key are joinable if their timestamps are within timeDifference, + i.e., the timestamp of a record from the secondary stream is max timeDifference before or after + the timestamp of the record from the primary stream.
        +
        +
        Parameters:
        +
        timeDifference - join window interval
        +
        Returns:
        +
        a new JoinWindows object with the window definition with and grace period (default to 24 hours minus timeDifference)
        +
        Throws:
        +
        IllegalArgumentException - if timeDifference is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        before

        +
        public JoinWindows before(Duration timeDifference) + throws IllegalArgumentException
        +
        Changes the start window boundary to timeDifference but keep the end window boundary as is. + Thus, records of the same key are joinable if the timestamp of a record from the secondary stream is at most + timeDifference earlier than the timestamp of the record from the primary stream. + timeDifference can be negative but its absolute value must not be larger than current window "after" + value (which would result in a negative window size).
        +
        +
        Parameters:
        +
        timeDifference - relative window start time
        +
        Throws:
        +
        IllegalArgumentException - if the resulting window size is negative or timeDifference can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        after

        +
        public JoinWindows after(Duration timeDifference) + throws IllegalArgumentException
        +
        Changes the end window boundary to timeDifference but keep the start window boundary as is. + Thus, records of the same key are joinable if the timestamp of a record from the secondary stream is at most + timeDifference later than the timestamp of the record from the primary stream. + timeDifference can be negative but its absolute value must not be larger than current window "before" + value (which would result in a negative window size).
        +
        +
        Parameters:
        +
        timeDifference - relative window end time
        +
        Throws:
        +
        IllegalArgumentException - if the resulting window size is negative or timeDifference can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        windowsFor

        +
        public Map<Long,Window> windowsFor(long timestamp)
        +
        Not supported by JoinWindows. + Throws UnsupportedOperationException.
        +
        +
        Specified by:
        +
        windowsFor in class Windows<Window>
        +
        Parameters:
        +
        timestamp - the timestamp window should get created for
        +
        Returns:
        +
        a map of windowStartTimestamp -> Window entries
        +
        Throws:
        +
        UnsupportedOperationException - at every invocation
        +
        +
        +
      • +
      • +
        +

        size

        +
        public long size()
        +
        Description copied from class: Windows
        +
        Return the size of the specified windows in milliseconds.
        +
        +
        Specified by:
        +
        size in class Windows<Window>
        +
        Returns:
        +
        the size of the specified windows
        +
        +
        +
      • +
      • +
        +

        grace

        +
        @Deprecated +public JoinWindows grace(Duration afterWindowEnd) + throws IllegalArgumentException
        +
        Deprecated. + +
        +
        Reject out-of-order events that are delayed more than afterWindowEnd + after the end of its window. +

        + Delay is defined as (stream_time - record_timestamp).

        +
        +
        Parameters:
        +
        afterWindowEnd - The grace period to admit out-of-order events to a window.
        +
        Returns:
        +
        this updated builder
        +
        Throws:
        +
        IllegalArgumentException - if the afterWindowEnd is negative or can't be represented as long milliseconds
        +
        IllegalStateException - if grace(Duration) is called after ofTimeDifferenceAndGrace(Duration, Duration) or ofTimeDifferenceWithNoGrace(Duration)
        +
        +
        +
      • +
      • +
        +

        gracePeriodMs

        +
        public long gracePeriodMs()
        +
        Description copied from class: Windows
        +
        Return the window grace period (the time to admit + out-of-order events after the end of the window.) + + Delay is defined as (stream_time - record_timestamp).
        +
        +
        Specified by:
        +
        gracePeriodMs in class Windows<Window>
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Joined.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Joined.html new file mode 100644 index 000000000..2d3aedaa3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Joined.html @@ -0,0 +1,524 @@ + + + + +Joined (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Joined<K,VLeft,VRight>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.Joined<K,VLeft,VRight>
    +
    +
    +
    +
    Type Parameters:
    +
    K - type of record key
    +
    VLeft - type of left record value
    +
    VRight - type of right record value
    +
    +
    +
    public class Joined<K,VLeft,VRight> +extends Object
    +
    The Joined class represents optional params that can be passed to + KStream#join(KTable,...) and + KStream#leftJoin(KTable,...) operations.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        with

        +
        public static <K, +VLeft, +VRight> +Joined<K,VLeft,VRight> with(Serde<K> keySerde, + Serde<VLeft> leftValueSerde, + Serde<VRight> rightValueSerde)
        +
        Create an instance of Joined with key, value, and otherValue Serde instances. + null values are accepted and will be replaced by the default serdes as defined in config.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        VLeft - left value type
        +
        VRight - right value type
        +
        Parameters:
        +
        keySerde - the key serde to use. If null the default key serde from config will be used
        +
        leftValueSerde - the value serde to use. If null the default value serde from config will be used
        +
        rightValueSerde - the otherValue serde to use. If null the default value serde from config will be used
        +
        Returns:
        +
        new Joined instance with the provided serdes
        +
        +
        +
      • +
      • +
        +

        with

        +
        public static <K, +VLeft, +VRight> +Joined<K,VLeft,VRight> with(Serde<K> keySerde, + Serde<VLeft> leftValueSerde, + Serde<VRight> rightValueSerde, + String name)
        +
        Create an instance of Joined with key, value, and otherValue Serde instances. + null values are accepted and will be replaced by the default serdes as defined in + config.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        VLeft - left value type
        +
        VRight - right value type
        +
        Parameters:
        +
        keySerde - the key serde to use. If null the default key serde from config will be used
        +
        leftValueSerde - the left value serde to use. If null the default value serde from config will be used
        +
        rightValueSerde - the right value serde to use. If null the default value serde from config will be used
        +
        name - the name used as the base for naming components of the join including any repartition topics
        +
        Returns:
        +
        new Joined instance with the provided serdes
        +
        +
        +
      • +
      • +
        +

        with

        +
        public static <K, +VLeft, +VRight> +Joined<K,VLeft,VRight> with(Serde<K> keySerde, + Serde<VLeft> leftValueSerde, + Serde<VRight> rightValueSerde, + String name, + Duration gracePeriod)
        +
        Create an instance of Joined with key, value, and otherValue Serde instances. + null values are accepted and will be replaced by the default serdes as defined in + config.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        VLeft - value value type
        +
        VRight - right value type
        +
        Parameters:
        +
        keySerde - the key serde to use. If null the default key serde from config will be used
        +
        leftValueSerde - the left value serde to use. If null the default value serde from config will be used
        +
        rightValueSerde - the right value serde to use. If null the default value serde from config will be used
        +
        name - the name used as the base for naming components of the join including any repartition topics
        +
        gracePeriod - stream buffer time
        +
        Returns:
        +
        new Joined instance with the provided serdes
        +
        +
        +
      • +
      • +
        +

        keySerde

        +
        public static <K, +VLeft, +VRight> +Joined<K,VLeft,VRight> keySerde(Serde<K> keySerde)
        +
        Create an instance of Joined with a key Serde. + null values are accepted and will be replaced by the default key serde as defined in config.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        VLeft - value value type
        +
        VRight - right value type
        +
        Parameters:
        +
        keySerde - the key serde to use. If null the default key serde from config will be used
        +
        Returns:
        +
        new Joined instance configured with the keySerde
        +
        +
        +
      • +
      • +
        +

        valueSerde

        +
        public static <K, +VLeft, +VRight> +Joined<K,VLeft,VRight> valueSerde(Serde<VLeft> leftValueSerde)
        +
        Create an instance of Joined with a value Serde. + null values are accepted and will be replaced by the default value serde as defined in config.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        VLeft - left value type
        +
        VRight - right value type
        +
        Parameters:
        +
        leftValueSerde - the left value serde to use. If null the default value serde from config will be used
        +
        Returns:
        +
        new Joined instance configured with the valueSerde
        +
        +
        +
      • +
      • +
        +

        otherValueSerde

        +
        public static <K, +VLeft, +VRight> +Joined<K,VLeft,VRight> otherValueSerde(Serde<VRight> rightValueSerde)
        +
        Create an instance of Joined with another value Serde. + null values are accepted and will be replaced by the default value serde as defined in config.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        VLeft - value type
        +
        VRight - right value type
        +
        Parameters:
        +
        rightValueSerde - the right value serde to use. If null the default value serde from config will be used
        +
        Returns:
        +
        new Joined instance configured with the otherValueSerde
        +
        +
        +
      • +
      • +
        +

        as

        +
        public static <K, +VLeft, +VRight> +Joined<K,VLeft,VRight> as(String name)
        +
        Create an instance of Joined with base name for all components of the join, this may + include any repartition topics created to complete the join.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        VLeft - left value type
        +
        VRight - right value type
        +
        Parameters:
        +
        name - the name used as the base for naming components of the join including any repartition topics
        +
        Returns:
        +
        new Joined instance configured with the name
        +
        +
        +
      • +
      • +
        +

        withKeySerde

        +
        public Joined<K,VLeft,VRight> withKeySerde(Serde<K> keySerde)
        +
        Set the key Serde to be used. Null values are accepted and will be replaced by the default + key serde as defined in config
        +
        +
        Parameters:
        +
        keySerde - the key serde to use. If null the default key serde from config will be used
        +
        Returns:
        +
        new Joined instance configured with the name
        +
        +
        +
      • +
      • +
        +

        withValueSerde

        +
        public Joined<K,VLeft,VRight> withValueSerde(Serde<VLeft> leftValueSerde)
        +
        Set the value Serde to be used. Null values are accepted and will be replaced by the default + value serde as defined in config
        +
        +
        Parameters:
        +
        leftValueSerde - the left value serde to use. If null the default value serde from config will be used
        +
        Returns:
        +
        new Joined instance configured with the valueSerde
        +
        +
        +
      • +
      • +
        +

        withOtherValueSerde

        +
        public Joined<K,VLeft,VRight> withOtherValueSerde(Serde<VRight> rightValueSerde)
        +
        Set the otherValue Serde to be used. Null values are accepted and will be replaced by the default + value serde as defined in config
        +
        +
        Parameters:
        +
        rightValueSerde - the right value serde to use. If null the default value serde from config will be used
        +
        Returns:
        +
        new Joined instance configured with the valueSerde
        +
        +
        +
      • +
      • +
        +

        withName

        +
        public Joined<K,VLeft,VRight> withName(String name)
        +
        Set the base name used for all components of the join, this may include any repartition topics + created to complete the join.
        +
        +
        Parameters:
        +
        name - the name used as the base for naming components of the join including any repartition topics
        +
        Returns:
        +
        new Joined instance configured with the name
        +
        +
        +
      • +
      • +
        +

        withGracePeriod

        +
        public Joined<K,VLeft,VRight> withGracePeriod(Duration gracePeriod)
        +
        Set the grace period on the stream side of the join. Records will enter a buffer before being processed. + Out of order records in the grace period will be processed in timestamp order. Late records, out of the + grace period, will be executed right as they come in, if it is past the table history retention this could + result in a null join. Long gaps in stream side arriving records will cause + records to be delayed in processing.
        +
        +
        Parameters:
        +
        gracePeriod - the duration of the grace period. Must be less than the joining table's history retention.
        +
        Returns:
        +
        new Joined instance configured with the gracePeriod
        +
        +
        +
      • +
      • +
        +

        gracePeriod

        +
        @Deprecated +public Duration gracePeriod()
        +
        Deprecated. +
        Since 4.0 and should not be used any longer.
        +
        +
        +
      • +
      • +
        +

        keySerde

        +
        @Deprecated +public Serde<K> keySerde()
        +
        Deprecated. +
        Since 4.0 and should not be used any longer.
        +
        +
        +
      • +
      • +
        +

        valueSerde

        +
        @Deprecated +public Serde<VLeft> valueSerde()
        +
        Deprecated. +
        Since 4.0 and should not be used any longer.
        +
        +
        +
      • +
      • +
        +

        otherValueSerde

        +
        @Deprecated +public Serde<VRight> otherValueSerde()
        +
        Deprecated. +
        Since 4.0 and should not be used any longer.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/KGroupedStream.html b/static/41/javadoc/org/apache/kafka/streams/kstream/KGroupedStream.html new file mode 100644 index 000000000..a9d53c6b7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/KGroupedStream.html @@ -0,0 +1,820 @@ + + + + +KGroupedStream (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface KGroupedStream<K,V>

    +
    +
    +
    +
    Type Parameters:
    +
    K - the key type of this grouped stream
    +
    V - the value type of this grouped stream
    +
    +
    +
    public interface KGroupedStream<K,V>
    +
    KGroupedStream is an abstraction of a grouped record stream of key-value pairs. + It is an intermediate representation of a KStream in order to apply a (windowed) aggregation operation + on the original KStream records. + +

    A KGroupedStream can be either co-grouped with other + grouped record streams, windowed by applying + windowedBy(...) operation, or can be aggregated into a KTable. + +

    A KGroupedStream is obtained from a KStream via groupByKey() or + groupBy(...).

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        count

        +
        KTable<K,Long> count()
        +
        Count the number of records in this stream by the grouped key. + Records with null key or value are ignored. + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view). + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + For failure and recovery the store (which always will be of type TimestampedKeyValueStore) will be backed by + an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. + + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys and Long values that + represent the latest (rolling) count (i.e., number of records) for each key
        +
        +
        +
      • +
      • +
        +

        count

        +
        KTable<K,Long> count(Named named)
        +
        Count the number of records in this stream by the grouped key. + Records with null key or value are ignored. + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view). + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + For failure and recovery the store (which always will be of type TimestampedKeyValueStore) will be backed by + an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. + + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        named - a Named config used to name the processor in the topology
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys and Long values that + represent the latest (rolling) count (i.e., number of records) for each key
        +
        +
        +
      • +
      • +
        +

        count

        +
        KTable<K,Long> count(Materialized<K,Long,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Count the number of records in this stream by the grouped key. + Records with null key or value are ignored. + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view) + provided by the given store name in materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...). +

        
        + KafkaStreams streams = ... // counting words
        + String queryableStoreName = "storeName"; // the store name should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<Long>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<Long>> localStore = streams.store(storeQueryParams);
        + K key = "some-word";
        + ValueAndTimestamp<Long> countForWord = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. + +

        + For failure and recovery the store (which always will be of type TimestampedKeyValueStore -- regardless of what + is specified in the parameter materialized) will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot contain characters other than ASCII + alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the + provide store name defined in Materialized, and "-changelog" is a fixed suffix. + + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        materialized - an instance of Materialized used to materialize a state store. Cannot be null. + Note: the valueSerde will be automatically set to Serdes#Long() + if there is no valueSerde provided
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys and Long values that + represent the latest (rolling) count (i.e., number of records) for each key
        +
        +
        +
      • +
      • +
        +

        count

        +
        KTable<K,Long> count(Named named, + Materialized<K,Long,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Count the number of records in this stream by the grouped key. + Records with null key or value are ignored. + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view) + provided by the given store name in materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...). +

        
        + KafkaStreams streams = ... // counting words
        + String queryableStoreName = "storeName"; // the store name should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<Long>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<Long>> localStore = streams.store(storeQueryParams);
        + K key = "some-word";
        + ValueAndTimestamp<Long> countForWord = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. + +

        + For failure and recovery the store (which always will be of type TimestampedKeyValueStore -- regardless of what + is specified in the parameter materialized) will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot contain characters other than ASCII + alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the + provide store name defined in Materialized, and "-changelog" is a fixed suffix. + + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        named - a Named config used to name the processor in the topology
        +
        materialized - an instance of Materialized used to materialize a state store. Cannot be null. + Note: the valueSerde will be automatically set to Serdes#Long() + if there is no valueSerde provided
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys and Long values that + represent the latest (rolling) count (i.e., number of records) for each key
        +
        +
        +
      • +
      • +
        +

        reduce

        +
        KTable<K,V> reduce(Reducer<V> reducer)
        +
        Combine the values of records in this stream by the grouped key. + Records with null key or value are ignored. + Combining implies that the type of the aggregate result is the same as the type of the input value + (cf. aggregate(Initializer, Aggregator)). +

        + The specified Reducer is applied for each input record and computes a new aggregate using the current + aggregate and the record's value. + If there is no current aggregate the Reducer is not applied and the new aggregate will be the record's + value as-is. + Thus, reduce(Reducer) can be used to compute aggregate functions like sum, min, or max. +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. + +

        + For failure and recovery the store (which always will be of type TimestampedKeyValueStore) will be backed by + an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. + + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        reducer - a Reducer that computes a new aggregate result. Cannot be null.
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys, and values that represent the + latest (rolling) aggregate for each key. If the reduce function returns null, it is then interpreted as + deletion for the key, and future messages of the same key coming from upstream operators + will be handled as newly initialized value.
        +
        +
        +
      • +
      • +
        +

        reduce

        +
        KTable<K,V> reduce(Reducer<V> reducer, + Materialized<K,V,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Combine the value of records in this stream by the grouped key. + Records with null key or value are ignored. + Combining implies that the type of the aggregate result is the same as the type of the input value + (cf. aggregate(Initializer, Aggregator, Materialized)). + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view) + provided by the given store name in materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Reducer is applied for each input record and computes a new aggregate using the current + aggregate (first argument) and the record's value (second argument): +

        
        + // At the example of a Reducer<Long>
        + new Reducer<Long>() {
        +   public Long apply(Long aggValue, Long currValue) {
        +     return aggValue + currValue;
        +   }
        + }
        + 
        +

        + If there is no current aggregate the Reducer is not applied and the new aggregate will be the record's + value as-is. + Thus, reduce(Reducer, Materialized) can be used to compute aggregate functions like sum, min, or + max. +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...). +

        
        + KafkaStreams streams = ... // compute sum
        + String queryableStoreName = "storeName" // the store name should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>> localStore = streams.store(storeQueryParams);
        + K key = "some-key";
        + ValueAndTimestamp<V> reduceForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. + +

        + For failure and recovery the store (which always will be of type TimestampedKeyValueStore -- regardless of what + is specified in the parameter materialized) will be backed by an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. + + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        reducer - a Reducer that computes a new aggregate result. Cannot be null.
        +
        materialized - an instance of Materialized used to materialize a state store. Cannot be null.
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys, and values that represent the + latest (rolling) aggregate for each key
        +
        +
        +
      • +
      • +
        +

        reduce

        +
        KTable<K,V> reduce(Reducer<V> reducer, + Named named, + Materialized<K,V,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Combine the value of records in this stream by the grouped key. + Records with null key or value are ignored. + Combining implies that the type of the aggregate result is the same as the type of the input value + (cf. aggregate(Initializer, Aggregator, Materialized)). + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view) + provided by the given store name in materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Reducer is applied for each input record and computes a new aggregate using the current + aggregate (first argument) and the record's value (second argument): +

        
        + // At the example of a Reducer<Long>
        + new Reducer<Long>() {
        +   public Long apply(Long aggValue, Long currValue) {
        +     return aggValue + currValue;
        +   }
        + }
        + 
        +

        + If there is no current aggregate the Reducer is not applied and the new aggregate will be the record's + value as-is. + Thus, reduce(Reducer, Materialized) can be used to compute aggregate functions like sum, min, or + max. +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...). +

        
        + KafkaStreams streams = ... // compute sum
        + String queryableStoreName = "storeName" // the store name should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>> localStore = streams.store(storeQueryParams);
        + K key = "some-key";
        + ValueAndTimestamp<V> reduceForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. + +

        + For failure and recovery the store (which always will be of type TimestampedKeyValueStore -- regardless of what + is specified in the parameter materialized) will be backed by an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. + + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        reducer - a Reducer that computes a new aggregate result. Cannot be null.
        +
        named - a Named config used to name the processor in the topology.
        +
        materialized - an instance of Materialized used to materialize a state store. Cannot be null.
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys, and values that represent the + latest (rolling) aggregate for each key. If the reduce function returns null, it is then interpreted as + deletion for the key, and future messages of the same key coming from upstream operators + will be handled as newly initialized value.
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        <VOut> KTable<K,VOut> aggregate(Initializer<VOut> initializer, + Aggregator<? super K,? super V,VOut> aggregator)
        +
        Aggregate the values of records in this stream by the grouped key. + Records with null key or value are ignored. + Aggregating is a generalization of combining via reduce(...) as it, for example, + allows the result to have a different type than the input values. +

        + The specified Initializer is applied once directly before the first input record is processed to + provide an initial intermediate aggregation result that is used to process the first record. + The specified Aggregator is applied for each input record and computes a new aggregate using the current + aggregate (or for the very first record using the intermediate aggregation result provided via the + Initializer) and the record's value. + Thus, aggregate(Initializer, Aggregator) can be used to compute aggregate functions like + count (cf. count()). +

        + The default value serde from config will be used for serializing the result. + If a different serde is required then you should use aggregate(Initializer, Aggregator, Materialized). +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. + +

        + For failure and recovery the store (which always will be of type TimestampedKeyValueStore) will be backed by + an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. + + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Type Parameters:
        +
        VOut - the value type of the resulting KTable
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result
        +
        aggregator - an Aggregator that computes a new aggregate result
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys, and values that represent the + latest (rolling) aggregate for each key. If the aggregate function returns null, it is then interpreted as + deletion for the key, and future messages of the same key coming from upstream operators + will be handled as newly initialized value.
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        <VOut> KTable<K,VOut> aggregate(Initializer<VOut> initializer, + Aggregator<? super K,? super V,VOut> aggregator, + Materialized<K,VOut,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Aggregate the values of records in this stream by the grouped key. + Records with null key or value are ignored. + Aggregating is a generalization of combining via reduce(...) as it, for example, + allows the result to have a different type than the input values. + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view) + that can be queried by the given store name in materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied once directly before the first input record is processed to + provide an initial intermediate aggregation result that is used to process the first record. + The specified Aggregator is applied for each input record and computes a new aggregate using the current + aggregate (or for the very first record using the intermediate aggregation result provided via the + Initializer) and the record's value. + Thus, aggregate(Initializer, Aggregator, Materialized) can be used to compute aggregate functions like + count (cf. count()). +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // some aggregation on value type double
        + String queryableStoreName = "storeName" // the store name should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<VR>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<VR>> localStore = streams.store(storeQueryParams);
        + K key = "some-key";
        + ValueAndTimestamp<VR> aggForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. + +

        + For failure and recovery the store (which always will be of type TimestampedKeyValueStore -- regardless of what + is specified in the parameter materialized) will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot contain characters other than ASCII + alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the + provide store name defined in Materialized, and "-changelog" is a fixed suffix. + + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Type Parameters:
        +
        VOut - the value type of the resulting KTable
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result
        +
        aggregator - an Aggregator that computes a new aggregate result
        +
        materialized - an instance of Materialized used to materialize a state store. Cannot be null.
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys, and values that represent the + latest (rolling) aggregate for each key
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        <VOut> KTable<K,VOut> aggregate(Initializer<VOut> initializer, + Aggregator<? super K,? super V,VOut> aggregator, + Named named, + Materialized<K,VOut,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Aggregate the values of records in this stream by the grouped key. + Records with null key or value are ignored. + Aggregating is a generalization of combining via reduce(...) as it, for example, + allows the result to have a different type than the input values. + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view) + that can be queried by the given store name in materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied once directly before the first input record is processed to + provide an initial intermediate aggregation result that is used to process the first record. + The specified Aggregator is applied for each input record and computes a new aggregate using the current + aggregate (or for the very first record using the intermediate aggregation result provided via the + Initializer) and the record's value. + Thus, aggregate(Initializer, Aggregator, Materialized) can be used to compute aggregate functions like + count (cf. count()). +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // some aggregation on value type double
        + String queryableStoreName = "storeName" // the store name should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<VR>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<VR>> localStore = streams.store(storeQueryParams);
        + K key = "some-key";
        + ValueAndTimestamp<VR> aggForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. + +

        + For failure and recovery the store (which always will be of type TimestampedKeyValueStore -- regardless of what + is specified in the parameter materialized) will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot contain characters other than ASCII + alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the + provide store name defined in Materialized, and "-changelog" is a fixed suffix. + + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Type Parameters:
        +
        VOut - the value type of the resulting KTable
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result
        +
        aggregator - an Aggregator that computes a new aggregate result
        +
        named - a Named config used to name the processor in the topology
        +
        materialized - an instance of Materialized used to materialize a state store. Cannot be null.
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys, and values that represent the + latest (rolling) aggregate for each key. If the aggregate function returns null, it is then interpreted as + deletion for the key, and future messages of the same key coming from upstream operators + will be handled as newly initialized value.
        +
        +
        +
      • +
      • +
        +

        windowedBy

        +
        <W extends Window> TimeWindowedKStream<K,V> windowedBy(Windows<W> windows)
        +
        Create a new TimeWindowedKStream instance that can be used to perform windowed aggregations.
        +
        +
        Type Parameters:
        +
        W - the window type
        +
        Parameters:
        +
        windows - the specification of the aggregation Windows
        +
        Returns:
        +
        an instance of TimeWindowedKStream
        +
        +
        +
      • +
      • +
        +

        windowedBy

        +
        TimeWindowedKStream<K,V> windowedBy(SlidingWindows windows)
        +
        Create a new TimeWindowedKStream instance that can be used to perform sliding windowed aggregations.
        +
        +
        Parameters:
        +
        windows - the specification of the aggregation SlidingWindows
        +
        Returns:
        +
        an instance of TimeWindowedKStream
        +
        +
        +
      • +
      • +
        +

        windowedBy

        +
        SessionWindowedKStream<K,V> windowedBy(SessionWindows windows)
        +
        Create a new SessionWindowedKStream instance that can be used to perform session windowed aggregations.
        +
        +
        Parameters:
        +
        windows - the specification of the aggregation SessionWindows
        +
        Returns:
        +
        an instance of TimeWindowedKStream
        +
        +
        +
      • +
      • +
        +

        cogroup

        +
        <VOut> CogroupedKStream<K,VOut> cogroup(Aggregator<? super K,? super V,VOut> aggregator)
        +
        Create a new CogroupedKStream from this grouped KStream to allow cogrouping other + KGroupedStream to it. + CogroupedKStream is an abstraction of multiple grouped record streams of KeyValue pairs. + It is an intermediate representation after a grouping of KStreams, before the + aggregations are applied to the new partitions resulting in a KTable. +

        + The specified Aggregator is applied in the actual aggregation step for each input record and computes a new aggregate using the current aggregate (or for the very + first record per key using the initial intermediate aggregation result provided via the Initializer that + is passed into CogroupedKStream.aggregate(Initializer)) and the record's value.

        +
        +
        Type Parameters:
        +
        VOut - the type of the output values
        +
        Parameters:
        +
        aggregator - an Aggregator that computes a new aggregate result
        +
        Returns:
        +
        a CogroupedKStream
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/KGroupedTable.html b/static/41/javadoc/org/apache/kafka/streams/kstream/KGroupedTable.html new file mode 100644 index 000000000..4ed0e2e16 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/KGroupedTable.html @@ -0,0 +1,916 @@ + + + + +KGroupedTable (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface KGroupedTable<K,V>

    +
    +
    +
    +
    Type Parameters:
    +
    K - the (new) grouping key type of this re-grouped table
    +
    V - the (new) value type of this re-grouped table
    +
    +
    +
    public interface KGroupedTable<K,V>
    +
    KGroupedTable is an abstraction of a re-grouped changelog stream from a primary-keyed table, + on a different grouping key than the original primary key. + It is an intermediate representation of a KTable in order to apply an aggregation operation on the original + KTable records. + +

    A KGroupedTable is obtained from a KTable via groupBy(...).

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        count

        +
        KTable<K,Long> count(Materialized<K,Long,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Count number of records of the original KTable that got mapped to + the same key into a new instance of KTable. + Records with null key are ignored. + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view) + that can be queried using the provided queryableStoreName. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // counting words
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<Long>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<Long>> localStore = streams.store(storeQueryParams);
        + K key = "some-word";
        + ValueAndTimestamp<Long> countForWord = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. + +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot contain characters other than ASCII + alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the + provide store name defined in Materialized, and "-changelog" is a fixed suffix. + + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        materialized - the instance of Materialized used to materialize the state store. Cannot be null
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys and Long values that + represent the latest (rolling) count (i.e., number of records) for each key
        +
        +
        +
      • +
      • +
        +

        count

        +
        KTable<K,Long> count(Named named, + Materialized<K,Long,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Count number of records of the original KTable that got mapped to + the same key into a new instance of KTable. + Records with null key are ignored. + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view) + that can be queried using the provided queryableStoreName. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // counting words
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<Long>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<Long>> localStore = streams.store(storeQueryParams);
        + K key = "some-word";
        + ValueAndTimestamp<Long> countForWord = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. + +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot contain characters other than ASCII + alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the + provide store name defined in Materialized, and "-changelog" is a fixed suffix. + + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        named - the Named config used to name the processor in the topology
        +
        materialized - the instance of Materialized used to materialize the state store. Cannot be null
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys and Long values that + represent the latest (rolling) count (i.e., number of records) for each key
        +
        +
        +
      • +
      • +
        +

        count

        +
        KTable<K,Long> count()
        +
        Count number of records of the original KTable that got mapped to + the same key into a new instance of KTable. + Records with null key are ignored. + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view) + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. + + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys and Long values that + represent the latest (rolling) count (i.e., number of records) for each key
        +
        +
        +
      • +
      • +
        +

        count

        +
        KTable<K,Long> count(Named named)
        +
        Count number of records of the original KTable that got mapped to + the same key into a new instance of KTable. + Records with null key are ignored. + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view) + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. + + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        named - the Named config used to name the processor in the topology
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys and Long values that + represent the latest (rolling) count (i.e., number of records) for each key
        +
        +
        +
      • +
      • +
        +

        reduce

        +
        KTable<K,V> reduce(Reducer<V> adder, + Reducer<V> subtractor, + Materialized<K,V,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Combine the value of records of the original KTable that got mapped to the same key into a new instance of KTable. + Records with null key are ignored. + Combining implies that the type of the aggregate result is the same as the type of the input value + (cf. aggregate(Initializer, Aggregator, Aggregator, Materialized)). + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view) + that can be queried using the provided queryableStoreName. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + Each update to the original KTable results in a two step update of the result KTable. + The specified adder is applied for each update record and computes a new aggregate using the + current aggregate (first argument) and the record's value (second argument) by adding the new record to the + aggregate. + The specified subtractor is applied for each "replaced" record of the original KTable + and computes a new aggregate using the current aggregate (first argument) and the record's value (second + argument) by "removing" the "replaced" record from the aggregate. + If there is no current aggregate the Reducer is not applied and the new aggregate will be the record's + value as-is. + Thus, reduce(Reducer, Reducer, String) can be used to compute aggregate functions like sum. + For sum, the adder and subtractor would work as follows: +

        
        + public class SumAdder implements Reducer<Integer> {
        +   public Integer apply(Integer currentAgg, Integer newValue) {
        +     return currentAgg + newValue;
        +   }
        + }
        +
        + public class SumSubtractor implements Reducer<Integer> {
        +   public Integer apply(Integer currentAgg, Integer oldValue) {
        +     return currentAgg - oldValue;
        +   }
        + }
        + 
        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // counting words
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>> localStore = streams.store(storeQueryParams);
        + K key = "some-word";
        + ValueAndTimestamp<V> reduceForWord = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot contain characters other than ASCII + alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the + provide store name defined in Materialized, and "-changelog" is a fixed suffix. + + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        adder - a Reducer that adds a new value to the aggregate result
        +
        subtractor - a Reducer that removed an old value from the aggregate result
        +
        materialized - the instance of Materialized used to materialize the state store. Cannot be null
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys, and values that represent the + latest (rolling) aggregate for each key
        +
        +
        +
      • +
      • +
        +

        reduce

        +
        KTable<K,V> reduce(Reducer<V> adder, + Reducer<V> subtractor, + Named named, + Materialized<K,V,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Combine the value of records of the original KTable that got mapped to the same key into a new instance of KTable. + Records with null key are ignored. + Combining implies that the type of the aggregate result is the same as the type of the input value + (cf. aggregate(Initializer, Aggregator, Aggregator, Materialized)). + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view) + that can be queried using the provided queryableStoreName. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + Each update to the original KTable results in a two step update of the result KTable. + The specified adder is applied for each update record and computes a new aggregate using the + current aggregate (first argument) and the record's value (second argument) by adding the new record to the + aggregate. + The specified subtractor is applied for each "replaced" record of the original KTable + and computes a new aggregate using the current aggregate (first argument) and the record's value (second + argument) by "removing" the "replaced" record from the aggregate. + If there is no current aggregate the Reducer is not applied and the new aggregate will be the record's + value as-is. + Thus, reduce(Reducer, Reducer, String) can be used to compute aggregate functions like sum. + For sum, the adder and subtractor would work as follows: +

        
        + public class SumAdder implements Reducer<Integer> {
        +   public Integer apply(Integer currentAgg, Integer newValue) {
        +     return currentAgg + newValue;
        +   }
        + }
        +
        + public class SumSubtractor implements Reducer<Integer> {
        +   public Integer apply(Integer currentAgg, Integer oldValue) {
        +     return currentAgg - oldValue;
        +   }
        + }
        + 
        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // counting words
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>> localStore = streams.store(storeQueryParams);
        + K key = "some-word";
        + ValueAndTimestamp<V> reduceForWord = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot contain characters other than ASCII + alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the + provide store name defined in Materialized, and "-changelog" is a fixed suffix. + + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        adder - a Reducer that adds a new value to the aggregate result
        +
        subtractor - a Reducer that removed an old value from the aggregate result
        +
        named - a Named config used to name the processor in the topology
        +
        materialized - the instance of Materialized used to materialize the state store. Cannot be null
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys, and values that represent the + latest (rolling) aggregate for each key
        +
        +
        +
      • +
      • +
        +

        reduce

        +
        KTable<K,V> reduce(Reducer<V> adder, + Reducer<V> subtractor)
        +
        Combine the value of records of the original KTable that got mapped to the same key into a new instance of KTable. + Records with null key are ignored. + Combining implies that the type of the aggregate result is the same as the type of the input value + (cf. aggregate(Initializer, Aggregator, Aggregator)). + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view) + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + Each update to the original KTable results in a two step update of the result KTable. + The specified adder is applied for each update record and computes a new aggregate using the + current aggregate and the record's value by adding the new record to the aggregate. + The specified subtractor is applied for each "replaced" record of the original KTable + and computes a new aggregate using the current aggregate and the record's value by "removing" the "replaced" + record from the aggregate. + If there is no current aggregate the Reducer is not applied and the new aggregate will be the record's + value as-is. + Thus, reduce(Reducer, Reducer) can be used to compute aggregate functions like sum. + For sum, the adder and subtractor would work as follows: +

        
        + public class SumAdder implements Reducer<Integer> {
        +   public Integer apply(Integer currentAgg, Integer newValue) {
        +     return currentAgg + newValue;
        +   }
        + }
        +
        + public class SumSubtractor implements Reducer<Integer> {
        +   public Integer apply(Integer currentAgg, Integer oldValue) {
        +     return currentAgg - oldValue;
        +   }
        + }
        + 
        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. + + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        adder - a Reducer that adds a new value to the aggregate result
        +
        subtractor - a Reducer that removed an old value from the aggregate result
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys, and values that represent the + latest (rolling) aggregate for each key
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        <VR> KTable<K,VR> aggregate(Initializer<VR> initializer, + Aggregator<? super K,? super V,VR> adder, + Aggregator<? super K,? super V,VR> subtractor, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Aggregate the value of records of the original KTable that got mapped to the same key into a new instance of KTable. + Records with null key are ignored. + Aggregating is a generalization of combining via reduce(...) as it, + for example, allows the result to have a different type than the input values. + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view) + that can be queried using the provided queryableStoreName. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied once directly before the first input record is processed to + provide an initial intermediate aggregation result that is used to process the first record. + Each update to the original KTable results in a two step update of the result KTable. + The specified adder is applied for each update record and computes a new aggregate using the + current aggregate (or for the very first record using the intermediate aggregation result provided via the + Initializer) and the record's value by adding the new record to the aggregate. + The specified subtractor is applied for each "replaced" record of the original KTable + and computes a new aggregate using the current aggregate and the record's value by "removing" the "replaced" + record from the aggregate. + Thus, aggregate(Initializer, Aggregator, Aggregator, Materialized) can be used to compute aggregate functions + like sum. + For sum, the initializer, adder, and subtractor would work as follows: +

        
        + // in this example, LongSerde.class must be set as value serde in Materialized#withValueSerde
        + public class SumInitializer implements Initializer<Long> {
        +   public Long apply() {
        +     return 0L;
        +   }
        + }
        +
        + public class SumAdder implements Aggregator<String, Integer, Long> {
        +   public Long apply(String key, Integer newValue, Long aggregate) {
        +     return aggregate + newValue;
        +   }
        + }
        +
        + public class SumSubtractor implements Aggregator<String, Integer, Long> {
        +   public Long apply(String key, Integer oldValue, Long aggregate) {
        +     return aggregate - oldValue;
        +   }
        + }
        + 
        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // counting words
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<VR>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<VR>> localStore = streams.store(storeQueryParams);
        + K key = "some-word";
        + ValueAndTimestamp<VR> aggregateForWord = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot contain characters other than ASCII + alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the + provide store name defined in Materialized, and "-changelog" is a fixed suffix. + + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Type Parameters:
        +
        VR - the value type of the aggregated KTable
        +
        Parameters:
        +
        initializer - an Initializer that provides an initial aggregate result value
        +
        adder - an Aggregator that adds a new record to the aggregate result
        +
        subtractor - an Aggregator that removed an old record from the aggregate result
        +
        materialized - the instance of Materialized used to materialize the state store. Cannot be null
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys, and values that represent the + latest (rolling) aggregate for each key
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        <VR> KTable<K,VR> aggregate(Initializer<VR> initializer, + Aggregator<? super K,? super V,VR> adder, + Aggregator<? super K,? super V,VR> subtractor, + Named named, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Aggregate the value of records of the original KTable that got mapped to the same key into a new instance of KTable. + Records with null key are ignored. + Aggregating is a generalization of combining via reduce(...) as it, + for example, allows the result to have a different type than the input values. + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view) + that can be queried using the provided queryableStoreName. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied once directly before the first input record is processed to + provide an initial intermediate aggregation result that is used to process the first record. + Each update to the original KTable results in a two step update of the result KTable. + The specified adder is applied for each update record and computes a new aggregate using the + current aggregate (or for the very first record using the intermediate aggregation result provided via the + Initializer) and the record's value by adding the new record to the aggregate. + The specified subtractor is applied for each "replaced" record of the original KTable + and computes a new aggregate using the current aggregate and the record's value by "removing" the "replaced" + record from the aggregate. + Thus, aggregate(Initializer, Aggregator, Aggregator, Materialized) can be used to compute aggregate functions + like sum. + For sum, the initializer, adder, and subtractor would work as follows: +

        
        + // in this example, LongSerde.class must be set as value serde in Materialized#withValueSerde
        + public class SumInitializer implements Initializer<Long> {
        +   public Long apply() {
        +     return 0L;
        +   }
        + }
        +
        + public class SumAdder implements Aggregator<String, Integer, Long> {
        +   public Long apply(String key, Integer newValue, Long aggregate) {
        +     return aggregate + newValue;
        +   }
        + }
        +
        + public class SumSubtractor implements Aggregator<String, Integer, Long> {
        +   public Long apply(String key, Integer oldValue, Long aggregate) {
        +     return aggregate - oldValue;
        +   }
        + }
        + 
        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // counting words
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<VR>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<VR>> localStore = streams.store(storeQueryParams);
        + K key = "some-word";
        + ValueAndTimestamp<VR> aggregateForWord = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot contain characters other than ASCII + alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the + provide store name defined in Materialized, and "-changelog" is a fixed suffix. + + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Type Parameters:
        +
        VR - the value type of the aggregated KTable
        +
        Parameters:
        +
        initializer - an Initializer that provides an initial aggregate result value
        +
        adder - an Aggregator that adds a new record to the aggregate result
        +
        subtractor - an Aggregator that removed an old record from the aggregate result
        +
        named - a Named config used to name the processor in the topology
        +
        materialized - the instance of Materialized used to materialize the state store. Cannot be null
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys, and values that represent the + latest (rolling) aggregate for each key
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        <VR> KTable<K,VR> aggregate(Initializer<VR> initializer, + Aggregator<? super K,? super V,VR> adder, + Aggregator<? super K,? super V,VR> subtractor)
        +
        Aggregate the value of records of the original KTable that got mapped to the same key into a new instance of KTable using default serializers and deserializers. + Records with null key are ignored. + Aggregating is a generalization of combining via reduce(...) as it, + for example, allows the result to have a different type than the input values. + If the result value type does not match the default value + serde you should use aggregate(Initializer, Aggregator, Aggregator, Materialized). + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view) + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied once directly before the first input record is processed to + provide an initial intermediate aggregation result that is used to process the first record. + Each update to the original KTable results in a two step update of the result KTable. + The specified adder is applied for each update record and computes a new aggregate using the + current aggregate (or for the very first record using the intermediate aggregation result provided via the + Initializer) and the record's value by adding the new record to the aggregate. + The specified subtractor is applied for each "replaced" record of the original KTable + and computes a new aggregate using the current aggregate and the record's value by "removing" the "replaced" + record from the aggregate. + Thus, aggregate(Initializer, Aggregator, Aggregator, String) can be used to compute aggregate functions + like sum. + For sum, the initializer, adder, and subtractor would work as follows: +

        
        + // in this example, LongSerde.class must be set as default value serde in StreamsConfig
        + public class SumInitializer implements Initializer<Long> {
        +   public Long apply() {
        +     return 0L;
        +   }
        + }
        +
        + public class SumAdder implements Aggregator<String, Integer, Long> {
        +   public Long apply(String key, Integer newValue, Long aggregate) {
        +     return aggregate + newValue;
        +   }
        + }
        +
        + public class SumSubtractor implements Aggregator<String, Integer, Long> {
        +   public Long apply(String key, Integer oldValue, Long aggregate) {
        +     return aggregate - oldValue;
        +   }
        + }
        + 
        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. + + You can retrieve all generated internal topic names via Topology.describe().
        +
        +
        Type Parameters:
        +
        VR - the value type of the aggregated KTable
        +
        Parameters:
        +
        initializer - a Initializer that provides an initial aggregate result value
        +
        adder - a Aggregator that adds a new record to the aggregate result
        +
        subtractor - a Aggregator that removed an old record from the aggregate result
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys, and values that represent the + latest (rolling) aggregate for each key
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        <VR> KTable<K,VR> aggregate(Initializer<VR> initializer, + Aggregator<? super K,? super V,VR> adder, + Aggregator<? super K,? super V,VR> subtractor, + Named named)
        +
        Aggregate the value of records of the original KTable that got mapped to the same key into a new instance of KTable using default serializers and deserializers. + Records with null key are ignored. + Aggregating is a generalization of combining via reduce(...) as it, + for example, allows the result to have a different type than the input values. + If the result value type does not match the default value + serde you should use aggregate(Initializer, Aggregator, Aggregator, Materialized). + The result is written into a local KeyValueStore (which is basically an ever-updating materialized view) + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied once directly before the first input record is processed to + provide an initial intermediate aggregation result that is used to process the first record. + Each update to the original KTable results in a two step update of the result KTable. + The specified adder is applied for each update record and computes a new aggregate using the + current aggregate (or for the very first record using the intermediate aggregation result provided via the + Initializer) and the record's value by adding the new record to the aggregate. + The specified subtractor is applied for each "replaced" record of the original KTable + and computes a new aggregate using the current aggregate and the record's value by "removing" the "replaced" + record from the aggregate. + Thus, aggregate(Initializer, Aggregator, Aggregator, String) can be used to compute aggregate functions + like sum. + For sum, the initializer, adder, and subtractor would work as follows: +

        
        + // in this example, LongSerde.class must be set as default value serde in StreamsConfig
        + public class SumInitializer implements Initializer<Long> {
        +   public Long apply() {
        +     return 0L;
        +   }
        + }
        +
        + public class SumAdder implements Aggregator<String, Integer, Long> {
        +   public Long apply(String key, Integer newValue, Long aggregate) {
        +     return aggregate + newValue;
        +   }
        + }
        +
        + public class SumSubtractor implements Aggregator<String, Integer, Long> {
        +   public Long apply(String key, Integer oldValue, Long aggregate) {
        +     return aggregate - oldValue;
        +   }
        + }
        + 
        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. + + You can retrieve all generated internal topic names via Topology.describe().
        +
        +
        Type Parameters:
        +
        VR - the value type of the aggregated KTable
        +
        Parameters:
        +
        initializer - a Initializer that provides an initial aggregate result value
        +
        adder - a Aggregator that adds a new record to the aggregate result
        +
        subtractor - a Aggregator that removed an old record from the aggregate result
        +
        named - a Named config used to name the processor in the topology
        +
        Returns:
        +
        a KTable that contains "update" records with unmodified keys, and values that represent the + latest (rolling) aggregate for each key
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/KStream.html b/static/41/javadoc/org/apache/kafka/streams/kstream/KStream.html new file mode 100644 index 000000000..8be35ecdb --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/KStream.html @@ -0,0 +1,2443 @@ + + + + +KStream (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface KStream<K,V>

    +
    +
    +
    +
    Type Parameters:
    +
    K - the key type of this stream
    +
    V - the value type of this stream
    +
    +
    +
    public interface KStream<K,V>
    +
    KStream is an abstraction of a record stream of key-value pairs, i.e., each record is + an independent entity/event in the real world. + For example a user X might buy two items I1 and I2, and thus there might be two records <K:I1>, <K:I2> + in the stream. + +

    A KStream is either defined from one or multiple Kafka topics that + are consumed message by message or the result of a KStream transformation. + A KTable can also be directly converted into a KStream. + +

    A KStream can be transformed record by record, joined with another KStream, KTable, + GlobalKTable, or can be aggregated into a KTable. + A KStream can also be directly converted into a KTable. + Kafka Streams DSL can be mixed-and-matched with the Processor API (PAPI) (cf. Topology) via + process(...) and processValues(...).

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        filter

        +
        KStream<K,V> filter(Predicate<? super K,? super V> predicate)
        +
        Create a new KStream that consists of all records of this stream which satisfy the given predicate. + All records that do not satisfy the predicate are dropped. + This is a stateless record-by-record operation (cf. processValues(FixedKeyProcessorSupplier, String...) + for stateful record processing or if you need access to the record's timestamp, headers, or other metadata).
        +
        +
        Parameters:
        +
        predicate - a filter Predicate that is applied to each record
        +
        Returns:
        +
        A KStream that contains only those records that satisfy the given predicate.
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        filter

        +
        KStream<K,V> filter(Predicate<? super K,? super V> predicate, + Named named)
        +
        See filter(Predicate). + +

        Takes an additional Named parameter that is used to name the processor in the topology.

        +
        +
      • +
      • +
        +

        filterNot

        +
        KStream<K,V> filterNot(Predicate<? super K,? super V> predicate)
        +
        Create a new KStream that consists all records of this stream which do not satisfy the given + predicate. + All records that do satisfy the predicate are dropped. + This is a stateless record-by-record operation (cf. processValues(FixedKeyProcessorSupplier, String...) + for stateful record processing or if you need access to the record's timestamp, headers, or other metadata).
        +
        +
        Parameters:
        +
        predicate - a filter Predicate that is applied to each record
        +
        Returns:
        +
        A KStream that contains only those records that do not satisfy the given predicate.
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        filterNot

        +
        KStream<K,V> filterNot(Predicate<? super K,? super V> predicate, + Named named)
        +
        See filterNot(Predicate). + +

        Takes an additional Named parameter that is used to name the processor in the topology.

        +
        +
      • +
      • +
        +

        selectKey

        +
        <KOut> KStream<KOut,V> selectKey(KeyValueMapper<? super K,? super V,? extends KOut> mapper)
        +
        Create a new KStream that consists of all records of this stream but with a modified key. + The provided KeyValueMapper is applied to each input record and computes a new key (possibly of a + different type) for it. + Thus, an input record <K,V> can be transformed into an output record <K':V>. + This is a stateless record-by-record operation (cf. process(ProcessorSupplier, String...) for + stateful record processing or if you need access to the record's timestamp, headers, or other metadata). + +

        For example, you can use this transformation to set a key for a key-less input record <null,V> + by extracting a key from the value within your KeyValueMapper. The example below computes the new key + as the length of the value string. +

        
        + KStream<Byte[], String> keyLessStream = builder.stream("key-less-topic");
        + KStream<Integer, String> keyedStream = keyLessStream.selectKey(new KeyValueMapper<Byte[], String, Integer> {
        +     Integer apply(Byte[] key, String value) {
        +         return value.length();
        +     }
        + });
        + 
        + Setting a new key might result in an internal data redistribution if a key-based operator (like an aggregation + or join) is applied to the result KStream.
        +
        +
        Type Parameters:
        +
        KOut - the new key type of the result KStream
        +
        Parameters:
        +
        mapper - a KeyValueMapper that computes a new key for each input record
        +
        Returns:
        +
        A KStream that contains records with new key (possibly of a different type) and unmodified value.
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        selectKey

        +
        <KOut> KStream<KOut,V> selectKey(KeyValueMapper<? super K,? super V,? extends KOut> mapper, + Named named)
        +
        See selectKey(KeyValueMapper). + +

        Takes an additional Named parameter that is used to name the processor in the topology.

        +
        +
      • +
      • +
        +

        mapValues

        +
        <VOut> KStream<K,VOut> mapValues(ValueMapper<? super V,? extends VOut> mapper)
        +
        Create a new KStream that consists of all records of this stream but with a modified value. + The provided ValueMapper is applied to each input record value and computes a new value (possibly + of a different type) for it. + Thus, an input record <K,V> can be transformed into an output record <K:V'>. + If you need read access to the input record key, use mapValues(ValueMapperWithKey). + This is a stateless record-by-record operation (cf. + processValues(FixedKeyProcessorSupplier, String...) for stateful value processing or if you need access + to the record's timestamp, headers, or other metadata). + +

        The example below counts the number of token of the value string. +

        
        + KStream<String, String> inputStream = builder.stream("topic");
        + KStream<String, Integer> outputStream = inputStream.mapValues(new ValueMapper<String, Integer> {
        +     Integer apply(String value) {
        +         return value.split(" ").length;
        +     }
        + });
        + 
        + + Setting a new value preserves data co-location with respect to the key. + Thus, no internal data redistribution is required if a key-based operator (like an aggregation + or join) is applied to the result KStream (cf. map(KeyValueMapper)).
        +
        +
        Type Parameters:
        +
        VOut - the value type of the result stream
        +
        Parameters:
        +
        mapper - a ValueMapper that computes a new value for each input record
        +
        Returns:
        +
        A KStream that contains records with unmodified key and new values (possibly of a different type).
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        mapValues

        +
        <VOut> KStream<K,VOut> mapValues(ValueMapper<? super V,? extends VOut> mapper, + Named named)
        +
        See mapValues(ValueMapper). + +

        Takes an additional Named parameter that is used to name the processor in the topology.

        +
        +
      • +
      • +
        +

        mapValues

        +
        <VOut> KStream<K,VOut> mapValues(ValueMapperWithKey<? super K,? super V,? extends VOut> mapper)
        +
        See mapValues(ValueMapper). + +

        Note that the key is read-only and must not be modified, as this can lead to corrupt partitioning and + incorrect results.

        +
        +
      • +
      • +
        +

        mapValues

        +
        <VOut> KStream<K,VOut> mapValues(ValueMapperWithKey<? super K,? super V,? extends VOut> mapper, + Named named)
        +
        See mapValues(ValueMapperWithKey). + +

        Takes an additional Named parameter that is used to name the processor in the topology.

        +
        +
      • +
      • +
        +

        map

        +
        <KOut, +VOut> KStream<KOut,VOut> map(KeyValueMapper<? super K,? super V,? extends KeyValue<? extends KOut,? extends VOut>> mapper)
        +
        Create a new KStream that consists of a modified record for each record in this stream. + The provided KeyValueMapper is applied to each input record and computes a new output record + (possibly of a different key and/or value type) for it. + Thus, an input record <K,V> can be transformed into an output record <K':V'>. + This is a stateless record-by-record operation (cf. process(ProcessorSupplier, String...) for + stateful record processing or if you need access to the record's timestamp, headers, or other metadata). + +

        The example below normalizes the String key to upper-case letters and counts the number of token of the + value string. +

        
        + KStream<String, String> inputStream = builder.stream("topic");
        + KStream<String, Integer> outputStream = inputStream.map(new KeyValueMapper<String, String, KeyValue<String, Integer>> {
        +     KeyValue<String, Integer> apply(String key, String value) {
        +         return new KeyValue<>(key.toUpperCase(), value.split(" ").length);
        +     }
        + });
        + 
        + The provided KeyValueMapper must return a KeyValue type and must not return null. + +

        Mapping records might result in an internal data redistribution if a key-based operator (like an + aggregation or join) is applied to the result KStream (cf. mapValues(ValueMapper)).

        +
        +
        Type Parameters:
        +
        KOut - the key type of the result stream
        +
        VOut - the value type of the result stream
        +
        Parameters:
        +
        mapper - a KeyValueMapper that computes a new KeyValue pair for each input record
        +
        Returns:
        +
        A KStream that contains records with new key and new value (possibly of different types).
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        map

        +
        <KOut, +VOut> KStream<KOut,VOut> map(KeyValueMapper<? super K,? super V,? extends KeyValue<? extends KOut,? extends VOut>> mapper, + Named named)
        +
        See map(KeyValueMapper). + +

        Takes an additional Named parameter that is used to name the processor in the topology.

        +
        +
      • +
      • +
        +

        flatMap

        +
        <KOut, +VOut> KStream<KOut,VOut> flatMap(KeyValueMapper<? super K,? super V,? extends Iterable<? extends KeyValue<? extends KOut,? extends VOut>>> mapper)
        +
        Create a new KStream that consists of zero or more records for each record in this stream. + The provided KeyValueMapper is applied to each input record and computes zero or more output records + (possibly of a different key and/or value type) for it. + Thus, an input record <K,V> can be transformed into output records <K':V'>, <K':V'>, .... + This is a stateless record-by-record operation (cf. process(ProcessorSupplier, String...) for + stateful record processing or if you need access to the record's timestamp, headers, or other metadata). + +

        The example below splits input records <null:String> containing sentences as values into their words + and emit a record <word:1> for each word. +

        
        + KStream<byte[], String> inputStream = builder.stream("topic");
        + KStream<String, Integer> outputStream = inputStream.flatMap(
        +     new KeyValueMapper<byte[], String, Iterable<KeyValue<String, Integer>>> {
        +         Iterable<KeyValue<String, Integer>> apply(byte[] key, String value) {
        +             String[] tokens = value.split(" ");
        +             List<KeyValue<String, Integer>> result = new ArrayList<>(tokens.length);
        +
        +             for(String token : tokens) {
        +                 result.add(new KeyValue<>(token, 1));
        +             }
        +
        +             return result;
        +         }
        +     });
        + 
        + The provided KeyValueMapper must return an Iterable (e.g., any Collection + type) and the return value must not be null. + +

        Flat-mapping records might result in an internal data redistribution if a key-based operator (like an + aggregation or join) is applied to the result KStream. (cf. flatMapValues(ValueMapper))

        +
        +
        Type Parameters:
        +
        KOut - the key type of the result stream
        +
        VOut - the value type of the result stream
        +
        Parameters:
        +
        mapper - a KeyValueMapper<K, V, Iterable<KeyValue<K', V'>>> that + computes zero of more new KeyValue pairs for each input record
        +
        Returns:
        +
        A KStream that contains more or fewer records with new keys and values (possibly of different types).
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        flatMap

        +
        <KR, +VOut> KStream<KR,VOut> flatMap(KeyValueMapper<? super K,? super V,? extends Iterable<? extends KeyValue<? extends KR,? extends VOut>>> mapper, + Named named)
        +
        See flatMap(KeyValueMapper). + +

        Takes an additional Named parameter that is used to name the processor in the topology.

        +
        +
      • +
      • +
        +

        flatMapValues

        +
        <VOut> KStream<K,VOut> flatMapValues(ValueMapper<? super V,? extends Iterable<? extends VOut>> mapper)
        +
        Create a new KStream that consists of zero or more records with modified value for each record + in this stream. + The provided ValueMapper is applied to each input record value and computes zero or more output values + (possibly of a different type) for it. + Thus, an input record <K,V> can be transformed into output records <K:V'>, <K:V'>, .... + If you need read access to the input record key, use flatMapValues(ValueMapperWithKey). + This is a stateless record-by-record operation (cf. processValues(FixedKeyProcessorSupplier, String...) + for stateful record processing or if you need access to the record's timestamp, headers, or other metadata). + +

        The example below splits input records <null:String> containing sentences as values into their words. +

        
        + KStream<byte[], String> inputStream = builder.stream("topic");
        + KStream<byte[], String> outputStream = inputStream.flatMapValues(new ValueMapper<String, Iterable<String>> {
        +     Iterable<String> apply(String value) {
        +         return Arrays.asList(value.split(" "));
        +     }
        + });
        + 
        + The provided ValueMapper must return an Iterable (e.g., any Collection type) + and the return value must not be null. + +

        Splitting a record into multiple records with the same key preserves data co-location with respect to the key. + Thus, no internal data redistribution is required if a key-based operator (like an aggregation or join) + is applied to the result KStream (cf. flatMap(KeyValueMapper)).

        +
        +
        Type Parameters:
        +
        VOut - the value type of the result stream
        +
        Parameters:
        +
        mapper - a ValueMapper<V, Iterable<V>> that computes zero or more new values + for each input record
        +
        Returns:
        +
        A KStream that contains more or fewer records with unmodified keys but new values (possibly of a different type).
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        flatMapValues

        +
        <VOut> KStream<K,VOut> flatMapValues(ValueMapper<? super V,? extends Iterable<? extends VOut>> mapper, + Named named)
        +
        See flatMapValues(ValueMapper). + +

        Takes an additional Named parameter that is used to name the processor in the topology.

        +
        +
      • +
      • +
        +

        flatMapValues

        +
        <VOut> KStream<K,VOut> flatMapValues(ValueMapperWithKey<? super K,? super V,? extends Iterable<? extends VOut>> mapper)
        +
        See flatMapValues(ValueMapper). + +

        Note that the key is read-only and must not be modified, as this can lead to corrupt partitioning and + incorrect results.

        +
        +
      • +
      • +
        +

        flatMapValues

        +
        <VOut> KStream<K,VOut> flatMapValues(ValueMapperWithKey<? super K,? super V,? extends Iterable<? extends VOut>> mapper, + Named named)
        +
        See flatMapValues(ValueMapperWithKey). + +

        Takes an additional Named parameter that is used to name the processor in the topology.

        +
        +
      • +
      • +
        +

        print

        +
        void print(Printed<K,V> printed)
        +
        Print the records of this KStream using the options provided by Printed. + Note that this is mainly for debugging/testing purposes, and it will try to flush on each record print. + It SHOULD NOT be used for production usage if performance requirements are concerned.
        +
        +
        Parameters:
        +
        printed - options for printing
        +
        +
        +
      • +
      • +
        +

        foreach

        +
        void foreach(ForeachAction<? super K,? super V> action)
        +
        Perform an action on each record of this KStream. + This is a stateless record-by-record operation (cf. process(ProcessorSupplier, String...) for + stateful record processing or if you need access to the record's timestamp, headers, or other metadata). + +

        Foreach is a terminal operation that may triggers side effects (such as logging or statistics + collection) and returns void (cf. peek(ForeachAction)). + +

        Note that this operation may execute multiple times for a single record in failure cases, + and it is not guarded by "exactly-once processing guarantees".

        +
        +
        Parameters:
        +
        action - an action to perform on each record
        +
        +
        +
      • +
      • +
        +

        foreach

        +
        void foreach(ForeachAction<? super K,? super V> action, + Named named)
        +
        See foreach(ForeachAction). + +

        Takes an additional Named parameter that is used to name the processor in the topology.

        +
        +
      • +
      • +
        +

        peek

        +
        KStream<K,V> peek(ForeachAction<? super K,? super V> action)
        +
        Perform an action on each record of this KStream. + This is a stateless record-by-record operation (cf. process(ProcessorSupplier, String...) for + stateful record processing or if you need access to the record's timestamp, headers, or other metadata). + +

        Peek is a non-terminal operation that may triggers side effects (such as logging or statistics + collection) and returns an unchanged KStream (cf. foreach(ForeachAction)). + +

        Note that this operation may execute multiple times for a single record in failure cases, + and it is not guarded by "exactly-once processing guarantees".

        +
        +
        Parameters:
        +
        action - an action to perform on each record
        +
        Returns:
        +
        An unmodified KStream.
        +
        +
        +
      • +
      • +
        +

        peek

        +
        KStream<K,V> peek(ForeachAction<? super K,? super V> action, + Named named)
        +
        See peek(ForeachAction). + +

        Takes an additional Named parameter that is used to name the processor in the topology.

        +
        +
      • +
      • +
        +

        split

        +
        BranchedKStream<K,V> split()
        +
        Split this KStream into different branches. The returned BranchedKStream instance can be used + for routing the records to different branches depending on evaluation against the supplied predicates. + Records are evaluated against the predicates in the order they are provided with the first matching predicate + accepting the record. Branching is a stateless record-by-record operation. + See BranchedKStream for a detailed description and usage example. + +

        Splitting a KStream guarantees that each input record is sent to at most one result KStream. + There is no operator for broadcasting/multicasting records into multiple result KStream. + If you want to broadcast records, you can apply multiple downstream operators to the same KStream + instance: +

        
        + // Broadcasting: every record of `stream` is sent to all three operators for processing
        + KStream<...> stream1 = stream.map(...);
        + KStream<...> stream2 = stream.mapValue(...);
        + KStream<...> stream3 = stream.flatMap(...);
        + 
        + + Multicasting can be achieved with broadcasting into multiple filter operations: +
        
        + // Multicasting: every record of `stream` is sent to all three filters, and thus, may be part of
        + // multiple result streams, `stream1`, `stream2`, and/or `stream3`
        + KStream<...> stream1 = stream.filter(predicate1);
        + KStream<...> stream2 = stream.filter(predicate2);
        + KStream<...> stream3 = stream.filter(predicate3);
        + 
        +
        +
        Returns:
        +
        A BranchedKStream that provides methods for routing the records to different branches.
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        split

        +
        BranchedKStream<K,V> split(Named named)
        +
        See split(). + +

        Takes an additional Named parameter that is used to name the processor in the topology.

        +
        +
      • +
      • +
        +

        merge

        +
        KStream<K,V> merge(KStream<K,V> otherStream)
        +
        Merge this KStream and the given KStream. + +

        There is no ordering guarantee between records from this KStream and records from + the provided KStream in the merged stream. + Relative order is preserved within each input stream though (i.e., records within one input + stream are processed in order).

        +
        +
        Parameters:
        +
        otherStream - a stream which is to be merged into this stream
        +
        Returns:
        +
        A merged stream containing all records from this and the provided KStream
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        merge

        +
        KStream<K,V> merge(KStream<K,V> otherStream, + Named named)
        +
        See merge(KStream). + +

        Takes an additional Named parameter that is used to name the processor in the topology.

        +
        +
      • +
      • +
        +

        repartition

        +
        KStream<K,V> repartition()
        +
        Materialize this stream to an auto-generated repartition topic and create a new KStream + from the auto-generated topic. + +

        The created topic is considered an internal topic and is meant to be used only by the current + Kafka Streams instance. + The topic will be named as "${applicationId}-<name>-repartition", + where "applicationId" is user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, + "<name>" is an internally generated name, and "-repartition" is a fixed suffix. + The number of partitions for the repartition topic is determined based on the upstream topics partition numbers. + Furthermore, the topic will be created with infinite retention time and data will be automatically purged + by Kafka Streams. + +

        You can retrieve all generated internal topic names via Topology.describe(). + To explicitly set key/value serdes, specify the number of used partitions or the partitioning strategy, + or to customize the name of the repartition topic, use repartition(Repartitioned).

        +
        +
        Returns:
        +
        A KStream that contains the exact same, but repartitioned records as this KStream.
        +
        +
        +
      • +
      • +
        +

        repartition

        +
        KStream<K,V> repartition(Repartitioned<K,V> repartitioned)
        + +
        +
      • +
      • +
        +

        to

        +
        void to(String topic)
        +
        Materialize this stream to a topic. + The topic should be manually created before it is used (i.e., before the Kafka Streams application is + started). + +

        To explicitly set key/value serdes or the partitioning strategy, use to(String, Produced).

        +
        +
        Parameters:
        +
        topic - the output topic name
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        to

        +
        void to(String topic, + Produced<K,V> produced)
        + +
        +
      • +
      • +
        +

        to

        +
        void to(TopicNameExtractor<K,V> topicExtractor)
        +
        Materialize the record of this stream to different topics. + The provided TopicNameExtractor is applied to each input record to compute the output topic name. + All topics should be manually created before they are used (i.e., before the Kafka Streams application is started). + +

        To explicitly set key/value serdes or the partitioning strategy, use to(TopicNameExtractor, Produced).

        +
        +
        Parameters:
        +
        topicExtractor - the extractor to determine the name of the Kafka topic to write to for each record
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        to

        +
        void to(TopicNameExtractor<K,V> topicExtractor, + Produced<K,V> produced)
        + +
        +
      • +
      • +
        +

        toTable

        +
        KTable<K,V> toTable()
        +
        Convert this stream to a KTable. + The conversion is a logical operation and only changes the "interpretation" of the records, i.e., each record of + this stream is a "fact/event" and is re-interpreted as a "change/update-per-key" now + (cf. KStream vs KTable). The resulting KTable is essentially a changelog stream. + To "upsert" the records of this stream into a materialized KTable (i.e., into a state store), + use toTable(Materialized). + +

        Note that null keys are not supported by KTables and records with null key will be dropped. + +

        If a key changing operator was used before this operation (e.g., selectKey(KeyValueMapper), + map(KeyValueMapper), flatMap(KeyValueMapper) or process(ProcessorSupplier, String...)) + Kafka Streams will automatically repartition the data, i.e., it will create an internal repartitioning topic in + Kafka and write and re-read the data via this topic such that the resulting KTable is correctly + partitioned by its key. + +

        This internal repartitioning topic will be named "${applicationId}-<name>-repartition", + where "applicationId" is user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, + "<name>" is an internally generated name, and "-repartition" is a fixed suffix. + The number of partitions for the repartition topic is determined based on the upstream topics partition numbers. + Furthermore, the topic will be created with infinite retention time and data will be automatically purged + by Kafka Streams. + +

        Note: If the result KTable is materialized, it is not possible to apply + "source topic optimization", because + repartition topics are considered transient and don't allow to recover the result KTable in case of + a failure; hence, a dedicated changelog topic is required to guarantee fault-tolerance. + +

        You can retrieve all generated internal topic names via Topology.describe(). + To customize the name of the repartition topic, use toTable(Named). + For more control over the repartitioning, use repartition(Repartitioned) before toTable().

        +
        +
        Returns:
        +
        A KTable that contains the same records as this KStream.
        +
        +
        +
      • +
      • +
        +

        toTable

        +
        KTable<K,V> toTable(Named named)
        + +
        +
      • +
      • +
        +

        toTable

        +
        KTable<K,V> toTable(Materialized<K,V,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        + +
        +
      • +
      • +
        +

        toTable

        +
        KTable<K,V> toTable(Named named, + Materialized<K,V,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        + +
        +
      • +
      • +
        +

        groupByKey

        +
        KGroupedStream<K,V> groupByKey()
        +
        Group the records by their current key into a KGroupedStream while preserving the original values. + KGroupedStream can be further grouped with other streams to form a CogroupedKStream. + (Co-)Grouping a stream on the record key is required before a windowing or aggregation operator can be applied + to the data (cf. KGroupedStream). + By default, the current key is used as grouping key, but a new grouping key can be set via + groupBy(KeyValueMapper). + In either case, if the grouping key is null, the record will be dropped. + +

        If a key changing operator was used before this operation (e.g., selectKey(KeyValueMapper), + map(KeyValueMapper), flatMap(KeyValueMapper) or + process(ProcessorSupplier, String...)) Kafka Streams will automatically repartition the data, i.e., + it will create an internal repartitioning topic in Kafka and write and re-read the data via this topic such that + the resulting KGroupedStream is correctly partitioned by the grouping key, before the downstream + windowing/aggregation will be applied. + +

        This internal repartition topic will be named "${applicationId}-<name>-repartition", + where "applicationId" is user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, + "<name>" is an internally generated name, and "-repartition" is a fixed suffix. + The number of partitions for the repartition topic is determined based on the upstream topics partition numbers. + Furthermore, the topic will be created with infinite retention time and data will be automatically purged + by Kafka Streams. + +

        You can retrieve all generated internal topic names via Topology.describe(). + To explicitly set key/value serdes or to customize the name of the repartition topic, use groupByKey(Grouped). + For more control over the repartitioning, use repartition(Repartitioned) before groupByKey().

        +
        +
        Returns:
        +
        A KGroupedStream that contains the grouped records of the original KStream.
        +
        +
        +
      • +
      • +
        +

        groupByKey

        +
        KGroupedStream<K,V> groupByKey(Grouped<K,V> grouped)
        +
        See groupByKey(). + +

        Takes an additional Grouped parameter, that allows to explicitly set key/value serdes or to customize + the name of the potentially created internal repartition topic.

        +
        +
      • +
      • +
        +

        groupBy

        +
        <KOut> KGroupedStream<KOut,V> groupBy(KeyValueMapper<? super K,? super V,KOut> keySelector)
        +
        Group the records of this KStream on a new key (in contrast to groupByKey()). + This operation is semantically equivalent to selectKey(KeyValueMapper) followed by groupByKey(). + +

        Because a new key is selected, an internal repartitioning topic will be created in Kafka. + See groupByKey() for more details about auto-repartitioning.

        +
        +
        Type Parameters:
        +
        KOut - the new key type of the result KGroupedStream
        +
        Parameters:
        +
        keySelector - a KeyValueMapper that computes a new key for grouping
        +
        +
        +
      • +
      • +
        +

        groupBy

        +
        <KOut> KGroupedStream<KOut,V> groupBy(KeyValueMapper<? super K,? super V,KOut> keySelector, + Grouped<KOut,V> grouped)
        +
        See groupBy(KeyValueMapper). + +

        Takes an additional Grouped parameter, that allows to explicitly set key/value serdes or to customize + the name of the created internal repartition topic.

        +
        +
      • +
      • +
        +

        join

        +
        <VRight, +VOut> KStream<K,VOut> join(KStream<K,VRight> rightStream, + ValueJoiner<? super V,? super VRight,? extends VOut> joiner, + JoinWindows windows)
        +
        Join records of this (left) stream with another (right) KStream's records using a windowed inner equi-join. + The join is computed using the records' key as join attribute, i.e., leftRecord.key == rightRight.key. + Furthermore, two records are only joined if their timestamps are close to each other as defined by the given + JoinWindows, i.e., the window defines an additional join predicate on the record timestamps. + +

        For each pair of records meeting both join predicates the provided ValueJoiner will be called to + compute a value (with arbitrary type) for the result record. + The key of the result record is the same as for both joining input records. + If you need read access to the join key, use join(KStream, ValueJoinerWithKey, JoinWindows). + If an input record's key or value is null the input record will be dropped, and no join computation + is triggered. + Similarly, so-called late records, i.e., records with a timestamp belonging to an already closed window (based + on stream-time progress, window size, and grace period), will be dropped. + +

        Example (assuming all input records belong to the correct windows): + + + + + + + + + + + + + + + + + + + + + +
        leftrightresult
        <K1:A>
        <K2:B><K2:b><K2:ValueJoiner(B,b)>
        <K3:c>
        + + Both KStreams (or to be more precise, their underlying source topics) need to have the same number of + partitions. + If this is not the case (and if not auto-repartitioning happens, see further below), you would need to call + repartition(Repartitioned) (for at least one of the two KStreams) before doing the join and + specify the matching number of partitions via Repartitioned parameter to align the partition count for + both inputs to each other. + Furthermore, both KStreams need to be co-partitioned on the join key (i.e., use the same partitioner). + Note: Kafka Streams cannot verify the used partitioner, so it is the user's responsibility to ensure that the + same partitioner is used for both inputs for the join. + +

        If a key changing operator was used before this operation on either input stream + (e.g., selectKey(KeyValueMapper), map(KeyValueMapper), flatMap(KeyValueMapper) or + process(ProcessorSupplier, String...)) Kafka Streams will automatically repartition the data of the + corresponding input stream, i.e., it will create an internal repartitioning topic in Kafka and write and re-read + the data via this topic such that data is correctly partitioned by the join key. + +

        The repartitioning topic(s) will be named "${applicationId}-<name>-repartition", + where "applicationId" is user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, + "<name>" is an internally generated name, and "-repartition" is a fixed suffix. + The number of partitions for the repartition topic(s) is determined based on the partition numbers of both + upstream topics, and Kafka Streams will automatically align the number of partitions if required for + co-partitioning. + Furthermore, the topic(s) will be created with infinite retention time and data will be automatically purged + by Kafka Streams. + +

        Both of the joined KStreams will be materialized in local state stores. + For failure and recovery each store will be backed by an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-<storename>-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, + "storeName" is an internally generated name, and "-changelog" is a fixed suffix. + +

        You can retrieve all generated internal topic names via Topology.describe(). + To explicitly set key/value serdes, to customize the names of the repartition and changelog topic, or to + customize the used state store, use join(KStream, ValueJoiner, JoinWindows, StreamJoined). + For more control over the repartitioning, use repartition(Repartitioned) on eiter input before join().

        +
        +
        Type Parameters:
        +
        VRight - the value type of the right stream
        +
        VOut - the value type of the result stream
        +
        Parameters:
        +
        rightStream - the KStream to be joined with this stream
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        windows - the specification of the JoinWindows
        +
        Returns:
        +
        A KStream that contains join-records, one for each matched record-pair, with the corresponding + key and a value computed by the given ValueJoiner.
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        join

        +
        <VRight, +VOut> KStream<K,VOut> join(KStream<K,VRight> rightStream, + ValueJoinerWithKey<? super K,? super V,? super VRight,? extends VOut> joiner, + JoinWindows windows)
        +
        See join(KStream, ValueJoiner, JoinWindows). + +

        Note that the key is read-only and must not be modified, as this can lead to corrupt partitioning and + incorrect results.

        +
        +
      • +
      • +
        +

        join

        +
        <VRight, +VOut> KStream<K,VOut> join(KStream<K,VRight> rightStream, + ValueJoiner<? super V,? super VRight,? extends VOut> joiner, + JoinWindows windows, + StreamJoined<K,V,VRight> streamJoined)
        + +
        +
      • +
      • +
        +

        join

        +
        <VRight, +VOut> KStream<K,VOut> join(KStream<K,VRight> rightStream, + ValueJoinerWithKey<? super K,? super V,? super VRight,? extends VOut> joiner, + JoinWindows windows, + StreamJoined<K,V,VRight> streamJoined)
        +
        See join(KStream, ValueJoiner, JoinWindows). + +

        Note that the key is read-only and must not be modified, as this can lead to corrupt partitioning and + incorrect results.

        +
        +
      • +
      • +
        +

        leftJoin

        +
        <VRight, +VOut> KStream<K,VOut> leftJoin(KStream<K,VRight> rightStream, + ValueJoiner<? super V,? super VRight,? extends VOut> joiner, + JoinWindows windows)
        +
        Join records of this (left) stream with another (right) KStream's records using a windowed left equi-join. + In contrast to an inner join, all records from this stream will + produce at least one output record (more details below). + The join is computed using the records' key as join attribute, i.e., leftRecord.key == rightRight.key. + Furthermore, two records are only joined if their timestamps are close to each other as defined by the given + JoinWindows, i.e., the window defines an additional join predicate on the record timestamps. + +

        For each pair of records meeting both join predicates the provided ValueJoiner will be called to + compute a value (with arbitrary type) for the result record. + Furthermore, for each input record of this KStream that does not have any join-partner in the right + stream (i.e., no record with the same key within the join interval), ValueJoiner will be called with a + null value for the right stream. + +

        Note: By default, non-joining records from this stream are buffered until their join window closes, and + corresponding left-join results for these records are emitted with some delay. + If you want to get left-join results without any delay, you can use JoinWindows#of(Duration) [deprecated] instead. + However, such an "eager" left-join result could be a spurious result, because the same record may find actual + join partners later, producing additional inner-join results. + +

        The key of the result record is the same as for both joining input records, + or the left input record's key for a left-join result. + If you need read access to the join key, use leftJoin(KStream, ValueJoinerWithKey, JoinWindows). + If a left input record's value is null the input record will be dropped, and no join computation + is triggered. + Note, that for left input records, null keys are supported (in contrast to + inner join), resulting in a left join result. + If a right input record's key or value is null the input record will be dropped, and no join + computation is triggered. + For input record of either side, so-called late records, i.e., records with a timestamp belonging to an already + closed window (based on stream-time progress, window size, and grace period), will be dropped. + +

        Example (assuming all input records belong to the correct windows, not taking actual emit/window-close time + for left-join results, or eager/spurious results into account): + + + + + + + + + + + + + + + + + + + + + +
        leftrightresult
        <K1:A><K1:ValueJoiner(A,null)>
        <K2:B><K2:b><K2:ValueJoiner(B,b)>
        <K3:c>
        + + For more details, about co-partitioning requirements, (auto-)repartitioning, and more see + join(KStream, ValueJoiner, JoinWindows).

        +
        +
        Returns:
        +
        A KStream that contains join-records, one for each matched record-pair plus one for each + non-matching record of this KStream, with the corresponding key and a value computed by the + given ValueJoiner.
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        leftJoin

        +
        <VRight, +VOut> KStream<K,VOut> leftJoin(KStream<K,VRight> rightStream, + ValueJoinerWithKey<? super K,? super V,? super VRight,? extends VOut> joiner, + JoinWindows windows)
        +
        See leftJoin(KStream, ValueJoiner, JoinWindows). + +

        Note that the key is read-only and must not be modified, as this can lead to corrupt partitioning and + incorrect results.

        +
        +
      • +
      • +
        +

        leftJoin

        +
        <VRight, +VOut> KStream<K,VOut> leftJoin(KStream<K,VRight> rightStream, + ValueJoiner<? super V,? super VRight,? extends VOut> joiner, + JoinWindows windows, + StreamJoined<K,V,VRight> streamJoined)
        + +
        +
      • +
      • +
        +

        leftJoin

        +
        <VRight, +VOut> KStream<K,VOut> leftJoin(KStream<K,VRight> rightStream, + ValueJoinerWithKey<? super K,? super V,? super VRight,? extends VOut> joiner, + JoinWindows windows, + StreamJoined<K,V,VRight> streamJoined)
        +
        See leftJoin(KStream, ValueJoiner, JoinWindows). + +

        Note that the key is read-only and must not be modified, as this can lead to corrupt partitioning and + incorrect results.

        +
        +
      • +
      • +
        +

        outerJoin

        +
        <VRight, +VOut> KStream<K,VOut> outerJoin(KStream<K,VRight> otherStream, + ValueJoiner<? super V,? super VRight,? extends VOut> joiner, + JoinWindows windows)
        +
        Join records of this (left) stream with another (right) KStream's records using a windowed outer equi-join. + In contrast to an inner join or + left join, all records from both stream will produce at + least one output record (more details below). + The join is computed using the records' key as join attribute, i.e., leftRecord.key == rightRight.key. + Furthermore, two records are only joined if their timestamps are close to each other as defined by the given + JoinWindows, i.e., the window defines an additional join predicate on the record timestamps. + +

        For each pair of records meeting both join predicates the provided ValueJoiner will be called to + compute a value (with arbitrary type) for the result record. + Furthermore, for each input record of either KStream that does not have any join-partner in the other + stream (i.e., no record with the same key within the join interval), ValueJoiner will be called with a + null value for right/left stream, respectively. + +

        Note: By default, non-joining records from either stream are buffered until their join window closes, and + corresponding outer-join results for these records are emitted with some delay. + If you want to get outer-join results without any delay, you can use JoinWindows#of(Duration) [deprecated] instead. + However, such an "eager" outer-join result could be a spurious result, because the same record may find actual + join partners later, producing additional inner-join results. + +

        The key of the result record is the same as for both joining input records, + or the left/right input record's key for an outer-join result, respectively. + If you need read access to the join key, use outerJoin(KStream, ValueJoinerWithKey, JoinWindows). + If an input record's value is null the input record will be dropped, and no join computation is triggered. + Note, that input records with null keys are supported (in contrast to + inner join), resulting in left/right join results. + For input record of either side, so-called late records, i.e., records with a timestamp belonging to an already + closed window (based on stream-time progress, window size, and grace period), will be dropped. + +

        Example (assuming all input records belong to the correct windows, not taking actual emit/window-close time + for outer-join result, or eager/spurious results into account): + + + + + + + + + + + + + + + + + + + + + +
        leftrightresult
        <K1:A><K1:ValueJoiner(A,null)>
        <K2:B><K2:b><K2:ValueJoiner(B,b)>
        <K3:c><K3:ValueJoiner(null,c)>
        + + For more details, about co-partitioning requirements, (auto-)repartitioning, and more see + join(KStream, ValueJoiner, JoinWindows).

        +
        +
        Returns:
        +
        A KStream that contains join-records, one for each matched record-pair plus one for each + non-matching record of either input KStream, with the corresponding key and a value computed + by the given ValueJoiner.
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        outerJoin

        +
        <VRight, +VOut> KStream<K,VOut> outerJoin(KStream<K,VRight> otherStream, + ValueJoinerWithKey<? super K,? super V,? super VRight,? extends VOut> joiner, + JoinWindows windows)
        +
        See outerJoin(KStream, ValueJoiner, JoinWindows). + +

        Note that the key is read-only and must not be modified, as this can lead to corrupt partitioning.

        +
        +
      • +
      • +
        +

        outerJoin

        +
        <VRight, +VOut> KStream<K,VOut> outerJoin(KStream<K,VRight> otherStream, + ValueJoiner<? super V,? super VRight,? extends VOut> joiner, + JoinWindows windows, + StreamJoined<K,V,VRight> streamJoined)
        + +
        +
      • +
      • +
        +

        outerJoin

        +
        <VRight, +VOut> KStream<K,VOut> outerJoin(KStream<K,VRight> otherStream, + ValueJoinerWithKey<? super K,? super V,? super VRight,? extends VOut> joiner, + JoinWindows windows, + StreamJoined<K,V,VRight> streamJoined)
        +
        See outerJoin(KStream, ValueJoiner, JoinWindows). + +

        Note that the key is read-only and must not be modified, as this can lead to corrupt partitioning.

        +
        +
      • +
      • +
        +

        join

        +
        <TableValue, +VOut> KStream<K,VOut> join(KTable<K,TableValue> table, + ValueJoiner<? super V,? super TableValue,? extends VOut> joiner)
        +
        Join records of this stream with KTable's records using non-windowed inner equi-join. + The join is a primary key table lookup join with join attribute streamRecord.key == tableRecord.key. + "Table lookup join" means, that results are only computed if KStream records are processed. + This is done by performing a lookup for matching records into the internal KTable state. + In contrast, processing KTable input records will only update the internal KTable state and + will not produce any result records. + +

        For each KStream record that finds a joining record in the KTable the provided + ValueJoiner will be called to compute a value (with arbitrary type) for the result record. + The key of the result record is the same as for both joining input records. + If you need read access to the join key, use join(KTable, ValueJoinerWithKey). + If a KStream input record's key or value is null the input record will be dropped, and no join + computation is triggered. + If a KTable input record's key is null the input record will be dropped, and the table state + won't be updated. + KTable input records with null values are considered deletes (so-called tombstone) for the table. + +

        Example: + + + + + + + + + + + + + + + + + + + + + + + + + +
        KStreamKTablestateresult
        <K1:A>
        <K1:b><K1:b>
        <K1:C><K1:b><K1:ValueJoiner(C,b)>
        + + By default, KStream records are processed by performing a lookup for matching records in the + current (i.e., processing time) internal KTable state. + This default implementation does not handle out-of-order records in either input of the join well. + See join(KTable, ValueJoiner, Joined) on how to configure a stream-table join to handle out-of-order + data. + +

        KStream and KTable (or to be more precise, their underlying source topics) need to have the + same number of partitions (cf. join(GlobalKTable, KeyValueMapper, ValueJoiner)). + If this is not the case (and if no auto-repartitioning happens for the KStream, see further below), + you would need to call repartition(Repartitioned) for this KStream before doing the join, + specifying the same number of partitions via Repartitioned parameter as the given KTable. + Furthermore, KStream and KTable need to be co-partitioned on the join key + (i.e., use the same partitioner). + Note: Kafka Streams cannot verify the used partitioner, so it is the user's responsibility to ensure + that the same partitioner is used for both inputs of the join. + +

        If a key changing operator was used on this KStream before this operation + (e.g., selectKey(KeyValueMapper), map(KeyValueMapper), flatMap(KeyValueMapper) or + process(ProcessorSupplier, String...)) Kafka Streams will automatically repartition the data of this + KStream, i.e., it will create an internal repartitioning topic in Kafka and write and re-read + the data via this topic such that data is correctly partitioned by the KTable's key. + +

        The repartitioning topic will be named "${applicationId}-<name>-repartition", + where "applicationId" is user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, + "<name>" is an internally generated name, and "-repartition" is a fixed suffix. + The number of partitions for the repartition topic is determined based on number of partitions of the + KTable. + Furthermore, the topic(s) will be created with infinite retention time and data will be automatically purged + by Kafka Streams. + +

        You can retrieve all generated internal topic names via Topology.describe(). + To explicitly set key/value serdes or to customize the names of the repartition topic, + use join(KTable, ValueJoiner, Joined). + For more control over the repartitioning, use repartition(Repartitioned) before join().

        +
        +
        Type Parameters:
        +
        TableValue - the value type of the table
        +
        VOut - the value type of the result stream
        +
        Parameters:
        +
        table - the KTable to be joined with this stream
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        Returns:
        +
        A KStream that contains join-records, one for each matched stream record, with the corresponding + key and a value computed by the given ValueJoiner.
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        join

        +
        <TableValue, +VOut> KStream<K,VOut> join(KTable<K,TableValue> table, + ValueJoinerWithKey<? super K,? super V,? super TableValue,? extends VOut> joiner)
        +
        See join(KTable, ValueJoiner). + +

        Note that the key is read-only and must not be modified, as this can lead to corrupt partitioning and + incorrect results.

        +
        +
      • +
      • +
        +

        join

        +
        <TableValue, +VOut> KStream<K,VOut> join(KTable<K,TableValue> table, + ValueJoiner<? super V,? super TableValue,? extends VOut> joiner, + Joined<K,V,TableValue> joined)
        +
        Join records of this stream with KTable's records using non-windowed inner equi-join. + In contrast to join(KTable, ValueJoiner), but only if the used KTable is backed by a + VersionedKeyValueStore, the additional + Joined parameter allows to specify a join grace-period, to handle out-of-order data gracefully. + +

        For details about stream-table semantics, including co-partitioning requirements, (auto-)repartitioning, + and more see join(KTable, ValueJoiner). + If you specify a grace-period to handle out-of-order data, see further details below. + +

        To handle out-of-order records, the input KTable must use a + VersionedKeyValueStore (specified via a + Materialized parameter when the KTable is created), and a join + grace-period must be specified. + For this case, KStream records are buffered until the end of the grace period and the KTable + lookup is performed with some delay. + Given that the KTable state is versioned, the lookup can use "event time", allowing out-of-order + KStream records, to join to the right (older) version of a KTable record with the same key. + Also, KTable out-of-order updates are handled correctly by the versioned state store. + Note, that using a join grace-period introduces the notion of late records, i.e., records with a timestamp + smaller than the defined grace-period allows; these late records will be dropped, and not join computation + is triggered. + Using a versioned state store for the KTable also implies that the defined + history retention provides + a cut-off point, and late records will be dropped, not updating the KTable state. + +

        If a join grace-period is specified, the KStream will be materialized in a local state store. + For failure and recovery this store will be backed by an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-<storename>-changelog", + where "applicationId" is user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, + "storeName" is an internally generated name, and "-changelog" is a fixed suffix. + +

        You can retrieve all generated internal topic names via Topology.describe(). + To customize the name of the changelog topic, use Joined input parameter.

        +
        +
      • +
      • +
        +

        join

        +
        <TableValue, +VOut> KStream<K,VOut> join(KTable<K,TableValue> table, + ValueJoinerWithKey<? super K,? super V,? super TableValue,? extends VOut> joiner, + Joined<K,V,TableValue> joined)
        +
        See join(KTable, ValueJoiner, Joined). + +

        Note that the key is read-only and must not be modified, as this can lead to corrupt partitioning and + incorrect results.

        +
        +
      • +
      • +
        +

        leftJoin

        +
        <VTable, +VOut> KStream<K,VOut> leftJoin(KTable<K,VTable> table, + ValueJoiner<? super V,? super VTable,? extends VOut> joiner)
        +
        Join records of this stream with KTable's records using non-windowed left equi-join. + In contrast to an inner join, all records from this stream will produce an + output record (more details below). + The join is a primary key table lookup join with join attribute streamRecord.key == tableRecord.key. + "Table lookup join" means, that results are only computed if KStream records are processed. + This is done by performing a lookup for matching records into the internal KTable state. + In contrast, processing KTable input records will only update the internal KTable state and + will not produce any result records. + +

        For each KStream record, regardless if it finds a joining record in the KTable, the provided + ValueJoiner will be called to compute a value (with arbitrary type) for the result record. + If no KTable record with matching key was found during the lookup, ValueJoiner will be called + with a null value for the table record. + The key of the result record is the same as for both joining input records, + or the KStreams input record's key for a left-join result. + If you need read access to the join key, use leftJoin(KTable, ValueJoinerWithKey). + If a KStream input record's value is null the input record will be dropped, and no join + computation is triggered. + Note, that null keys for KStream input records are supported (in contrast to + inner join) resulting in a left join result. + If a KTable input record's key is null the input record will be dropped, and the table state + won't be updated. + KTable input records with null values are considered deletes (so-called tombstone) for the table. + +

        Example: + + + + + + + + + + + + + + + + + + + + + + + + + +
        KStreamKTablestateresult
        <K1:A><K1:ValueJoiner(A,null)>
        <K1:b><K1:b>
        <K1:C><K1:b><K1:ValueJoiner(C,b)>
        + + By default, KStream records are processed by performing a lookup for matching records in the + current (i.e., processing time) internal KTable state. + This default implementation does not handle out-of-order records in either input of the join well. + See leftJoin(KTable, ValueJoiner, Joined) on how to configure a stream-table join to handle out-of-order + data. + +

        For more details, about co-partitioning requirements, (auto-)repartitioning, and more see + join(KStream, ValueJoiner, JoinWindows).

        +
        +
        Returns:
        +
        A KStream that contains join-records, one for each matched stream record plus one for each + non-matching stream record, with the corresponding key and a value computed by the given ValueJoiner.
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        leftJoin

        +
        <VTable, +VOut> KStream<K,VOut> leftJoin(KTable<K,VTable> table, + ValueJoinerWithKey<? super K,? super V,? super VTable,? extends VOut> joiner)
        +
        See leftJoin(KTable, ValueJoiner). + +

        Note that the key is read-only and must not be modified, as this can lead to corrupt partitioning and + incorrect results.

        +
        +
      • +
      • +
        +

        leftJoin

        +
        <VTable, +VOut> KStream<K,VOut> leftJoin(KTable<K,VTable> table, + ValueJoiner<? super V,? super VTable,? extends VOut> joiner, + Joined<K,V,VTable> joined)
        +
        Join records of this stream with KTable's records using non-windowed left equi-join. + In contrast to leftJoin(KTable, ValueJoiner), but only if the used KTable is backed by a + VersionedKeyValueStore, the additional + Joined parameter allows to specify a join grace-period, to handle out-of-order data gracefully. + +

        For details about left-stream-table-join semantics see leftJoin(KTable, ValueJoiner). + For co-partitioning requirements, (auto-)repartitioning, and more see join(KTable, ValueJoiner). + If you specify a grace-period to handle out-of-order data, see join(KTable, ValueJoiner, Joined).

        +
        +
      • +
      • +
        +

        leftJoin

        +
        <VTable, +VOut> KStream<K,VOut> leftJoin(KTable<K,VTable> table, + ValueJoinerWithKey<? super K,? super V,? super VTable,? extends VOut> joiner, + Joined<K,V,VTable> joined)
        +
        See leftJoin(KTable, ValueJoiner, Joined). + +

        Note that the key is read-only and must not be modified, as this can lead to corrupt partitioning and + incorrect results.

        +
        +
      • +
      • +
        +

        join

        +
        <GlobalKey, +GlobalValue, +VOut> KStream<K,VOut> join(GlobalKTable<GlobalKey,GlobalValue> globalTable, + KeyValueMapper<? super K,? super V,? extends GlobalKey> keySelector, + ValueJoiner<? super V,? super GlobalValue,? extends VOut> joiner)
        +
        Join records of this stream with GlobalKTable's records using non-windowed inner equi-join. + The join is a primary key table lookup join with join attribute + keyValueMapper.map(streamRecord) == tableRecord.key. + "Table lookup join" means, that results are only computed if KStream records are processed. + This is done by performing a lookup for matching records in the current (i.e., processing time) + internal GlobalKTable state. + In contrast, processing GlobalKTable input records will only update the internal GlobalKTable + state and will not produce any result records. + +

        For each KStream record that finds a joining record in the GlobalKTable the provided + ValueJoiner will be called to compute a value (with arbitrary type) for the result record. + The key of the result record is the same as the stream record's key. + If you need read access to the KStream key, use join(GlobalKTable, KeyValueMapper, ValueJoinerWithKey). + If a KStream input record's value is null or if the provided keySelector + returns null, the input record will be dropped, and no join computation is triggered. + If a GlobalKTable input record's key is null the input record will be dropped, and the table + state won't be updated. + GlobalKTable input records with null values are considered deletes (so-called tombstone) for + the table. + +

        Example, using the first value attribute as join key: + + + + + + + + + + + + + + + + + + + + + + + + + +
        KStreamGlobalKTablestateresult
        <K1:(GK1,A)>
        <GK1:b><GK1:b>
        <K1:(GK1,C)><GK1:b><K1:ValueJoiner((GK1,C),b)>
        + + In contrast to join(KTable, ValueJoiner), there is no co-partitioning requirement between this + KStream and the GlobalKTable. + Also note that there are no ordering guarantees between the updates on the left and the right side of this join, + since updates to the GlobalKTable are in no way synchronized. + Therefore, the result of the join is inherently non-deterministic.

        +
        +
        Type Parameters:
        +
        GlobalKey - the key type of the global table
        +
        GlobalValue - the value type of the global table
        +
        VOut - the value type of the result stream
        +
        Parameters:
        +
        globalTable - the GlobalKTable to be joined with this stream
        +
        keySelector - a KeyValueMapper that computes the join key for stream input records
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        Returns:
        +
        A KStream that contains join-records, one for each matched stream record, with the corresponding + key and a value computed by the given ValueJoiner.
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        join

        +
        <GlobalKey, +GlobalValue, +VOut> KStream<K,VOut> join(GlobalKTable<GlobalKey,GlobalValue> globalTable, + KeyValueMapper<? super K,? super V,? extends GlobalKey> keySelector, + ValueJoinerWithKey<? super K,? super V,? super GlobalValue,? extends VOut> joiner)
        +
        See join(GlobalKTable, KeyValueMapper, ValueJoiner). + +

        Note that the KStream key is read-only and must not be modified, as this can lead to corrupt + partitioning and incorrect results.

        +
        +
      • +
      • +
        +

        join

        +
        <GlobalKey, +GlobalValue, +VOut> KStream<K,VOut> join(GlobalKTable<GlobalKey,GlobalValue> globalTable, + KeyValueMapper<? super K,? super V,? extends GlobalKey> keySelector, + ValueJoiner<? super V,? super GlobalValue,? extends VOut> joiner, + Named named)
        +
        See join(GlobalKTable, KeyValueMapper, ValueJoiner). + +

        Takes an additional Named parameter that is used to name the processor in the topology.

        +
        +
      • +
      • +
        +

        join

        +
        <GlobalKey, +GlobalValue, +VOut> KStream<K,VOut> join(GlobalKTable<GlobalKey,GlobalValue> globalTable, + KeyValueMapper<? super K,? super V,? extends GlobalKey> keySelector, + ValueJoinerWithKey<? super K,? super V,? super GlobalValue,? extends VOut> joiner, + Named named)
        +
        See join(GlobalKTable, KeyValueMapper, ValueJoinerWithKey). + +

        Takes an additional Named parameter that is used to name the processor in the topology.

        +
        +
      • +
      • +
        +

        leftJoin

        +
        <GlobalKey, +GlobalValue, +VOut> KStream<K,VOut> leftJoin(GlobalKTable<GlobalKey,GlobalValue> globalTable, + KeyValueMapper<? super K,? super V,? extends GlobalKey> keySelector, + ValueJoiner<? super V,? super GlobalValue,? extends VOut> joiner)
        +
        Join records of this stream with GlobalKTable's records using non-windowed left equi-join. + In contrast to an inner join, all records from this + stream will produce an output record (more details below). + The join is a primary key table lookup join with join attribute + keyValueMapper.map(streamRecord) == tableRecord.key. + "Table lookup join" means, that results are only computed if KStream records are processed. + This is done by performing a lookup for matching records in the current (i.e., processing time) + internal GlobalKTable state. + In contrast, processing GlobalKTable input records will only update the internal GlobalKTable + state and will not produce any result records. + +

        For each KStream record, regardless if it finds a joining record in the GlobalKTable, the + provided ValueJoiner will be called to compute a value (with arbitrary type) for the result record. + If no GlobalKTable record with matching key was found during the lookup, ValueJoiner will be + called with a null value for the global table record. + The key of the result record is the same as for both joining input records, + or the KStreams input record's key for a left-join result. + If you need read access to the KStream key, use + leftJoin(GlobalKTable, KeyValueMapper, ValueJoinerWithKey). + If a KStream input record's value is null or if the provided keySelector + returns null, the input record will be dropped, and no join computation is triggered. + Note, that null keys for KStream input records are supported (in contrast to + inner join) resulting in a left join result. + If a GlobalKTable input record's key is null the input record will be dropped, and the table + state won't be updated. + GlobalKTable input records with null values are considered deletes (so-called tombstone) for + the table. + +

        Example, using the first value attribute as join key: + + + + + + + + + + + + + + + + + + + + + + + + + +
        KStreamGlobalKTablestateresult
        <K1:(GK1,A)><K1:ValueJoiner((GK1,A),null)>
        <GK1:b><GK1:b>
        <K1:(GK1,C)><GK1:b><K1:ValueJoiner((GK1,C),b)>
        + + In contrast to leftJoin(KTable, ValueJoiner), there is no co-partitioning requirement between this + KStream and the GlobalKTable. + Also note that there are no ordering guarantees between the updates on the left and the right side of this join, + since updates to the GlobalKTable are in no way synchronized. + Therefore, the result of the join is inherently non-deterministic.

        +
        +
        Type Parameters:
        +
        GlobalKey - the key type of the global table
        +
        GlobalValue - the value type of the global table
        +
        VOut - the value type of the result stream
        +
        Parameters:
        +
        globalTable - the GlobalKTable to be joined with this stream
        +
        keySelector - a KeyValueMapper that computes the join key for stream input records
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        Returns:
        +
        A KStream that contains join-records, one for each matched stream record plus one for each + non-matching stream record, with the corresponding key and a value computed by the given ValueJoiner.
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        leftJoin

        +
        <GlobalKey, +GlobalValue, +VOut> KStream<K,VOut> leftJoin(GlobalKTable<GlobalKey,GlobalValue> globalTable, + KeyValueMapper<? super K,? super V,? extends GlobalKey> keySelector, + ValueJoinerWithKey<? super K,? super V,? super GlobalValue,? extends VOut> joiner)
        +
        See leftJoin(GlobalKTable, KeyValueMapper, ValueJoiner). + +

        Note that the key is read-only and must not be modified, as this can lead to corrupt partitioning and + incorrect results.

        +
        +
      • +
      • +
        +

        leftJoin

        +
        <GlobalKey, +GlobalValue, +VOut> KStream<K,VOut> leftJoin(GlobalKTable<GlobalKey,GlobalValue> globalTable, + KeyValueMapper<? super K,? super V,? extends GlobalKey> keySelector, + ValueJoiner<? super V,? super GlobalValue,? extends VOut> joiner, + Named named)
        +
        See leftJoin(GlobalKTable, KeyValueMapper, ValueJoiner). + +

        Takes an additional Named parameter that is used to name the processor in the topology.

        +
        +
      • +
      • +
        +

        leftJoin

        +
        <GlobalKey, +GlobalValue, +VOut> KStream<K,VOut> leftJoin(GlobalKTable<GlobalKey,GlobalValue> globalTable, + KeyValueMapper<? super K,? super V,? extends GlobalKey> keySelector, + ValueJoinerWithKey<? super K,? super V,? super GlobalValue,? extends VOut> joiner, + Named named)
        +
        See leftJoin(GlobalKTable, KeyValueMapper, ValueJoinerWithKey). + +

        Takes an additional Named parameter that is used to name the processor in the topology.

        +
        +
      • +
      • +
        +

        process

        +
        <KOut, +VOut> KStream<KOut,VOut> process(ProcessorSupplier<? super K,? super V,? extends KOut,? extends VOut> processorSupplier, + String... stateStoreNames)
        +
        Process all records in this stream, one record at a time, by applying a Processor (provided by the given + ProcessorSupplier) to each input record. + The Processor can emit any number of result records via ProcessorContext.forward(Record) + (possibly of a different key and/or value type). + +

        By default, the processor is stateless (similar to flatMap(KeyValueMapper, Named), however, it also + has access to the record's timestamp and headers), but previously added + state stores can be connected by providing their names as additional parameters, making + the processor stateful. + There is two different state stores, which can be added to the underlying Topology: +

        + + If the processorSupplier provides state stores via ConnectedStoreProvider.stores(), the + corresponding StoreBuilders will be added to the topology and connected to this processor + automatically, without the need to provide the store names as parameter to this method. + Additionally, even if a processor is stateless, it can still access all + global state stores (read-only). + There is no need to connect global stores to processors. + +

        All state stores which are connected to a processor and all global stores, can be accessed via + context.getStateStore(String) + using the context provided via + Processor#init(): + +

        
        + public class MyProcessor implements Processor<String, Integer, String, Integer> {
        +     private ProcessorContext<String, Integer> context;
        +     private KeyValueStore<String, String> store;
        +
        +     @Override
        +     void init(final ProcessorContext<String, Integer> context) {
        +         this.context = context;
        +         this.store = context.getStateStore("myStore");
        +     }
        +
        +     @Override
        +     void process(final Record<String, Integer> record) {
        +         // can access this.context and this.store
        +     }
        + }
        + 
        + + Furthermore, the provided ProcessorContext gives access to topology, runtime, and + record metadata, and allows to schedule punctuations and to + request offset commits. + +

        In contrast to grouping/aggregation and joins, even if the processor is stateful and an upstream operation + was key changing, no auto-repartition is triggered. + If repartitioning is required, a call to repartition() should be performed before process(). + At the same time, this method is considered a key changing operation by itself, and might result in an internal + data redistribution if a key-based operator (like an aggregation or join) is applied to the result + KStream (cf. processValues(FixedKeyProcessorSupplier, String...)).

        +
        +
        Parameters:
        +
        processorSupplier - the supplier used to obtain Processor instances
        +
        stateStoreNames - the names of state stores that the processor should be able to access
        +
        +
        +
      • +
      • +
        +

        process

        +
        <KOut, +VOut> KStream<KOut,VOut> process(ProcessorSupplier<? super K,? super V,? extends KOut,? extends VOut> processorSupplier, + Named named, + String... stateStoreNames)
        +
        See process(ProcessorSupplier, String...). + +

        Takes an additional Named parameter that is used to name the processor in the topology.

        +
        +
      • +
      • +
        +

        processValues

        +
        <VOut> KStream<K,VOut> processValues(FixedKeyProcessorSupplier<? super K,? super V,? extends VOut> processorSupplier, + String... stateStoreNames)
        +
        Process all records in this stream, one record at a time, by applying a FixedKeyProcessor (provided by + the given FixedKeyProcessorSupplier) to each input record. + This method is similar to process(ProcessorSupplier, String...), however the key of the input + Record cannot be modified. + +

        Because the key cannot be modified, this method is not a key changing operation and preserves data + co-location with respect to the key (cf. flatMapValues(ValueMapper)). + Thus, no internal data redistribution is required if a key-based operator (like an aggregation or join) + is applied to the result KStream. + +

        However, because the key cannot be modified, some restrictions apply to a FixedKeyProcessor compared + to a Processor: for example, forwarding result records from a Punctuator is not possible.

        +
        +
      • +
      • +
        +

        processValues

        +
        <VOut> KStream<K,VOut> processValues(FixedKeyProcessorSupplier<? super K,? super V,? extends VOut> processorSupplier, + Named named, + String... stateStoreNames)
        +
        See processValues(FixedKeyProcessorSupplier, String...). + +

        Takes an additional Named parameter that is used to name the processor in the topology.

        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/KTable.html b/static/41/javadoc/org/apache/kafka/streams/kstream/KTable.html new file mode 100644 index 000000000..6b6b2f152 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/KTable.html @@ -0,0 +1,3522 @@ + + + + +KTable (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface KTable<K,V>

    +
    +
    +
    +
    Type Parameters:
    +
    K - the key type of this table
    +
    V - the value type of this table
    +
    +
    +
    public interface KTable<K,V>
    +
    KTable is an abstraction of a changelog stream from a primary-keyed table. + Each record in this changelog stream is an update on the primary-keyed table with the record key as the primary key. + Primary-keys in a table cannot be null, and thus, null-key key-value pairs are not + supported, and corresponding records will be dropped. + KTables follow Kafka "tombstone" semantics, and null-value key-value pairs are + interpreted and processed as deletes for the corresponding key. + +

    A KTable is either defined from a single Kafka topic that is + consumed message by message or the result of a KTable transformation. + A (windowed) aggregation of one or multiple KStreams also yields a KTable. + A KStream can also be directly converted into a KTable. + +

    A KTable can be transformed record by record, joined with another KTable + (or KStream, as input to a stream-table join), or + can be re-grouped and aggregated into a new KTable. + A KTable can also be directly converted into a KStream. + Kafka Streams DSL can be mixed-and-matched with the Processor API (PAPI) (cf. Topology) via + transformValues(...). + +

    Some KTables have an internal state store which can be accessed from "outside" using + the Interactive Queries (IQ) API (see KafkaStreams#store(...) and + KafkaStreams#query(...) [new API; evolving] for details). + For example: +

    
    + KTable table = ...
    + ...
    + KafkaStreams streams = ...;
    + streams.start()
    + ...
    + String queryableStoreName = table.queryableStoreName(); // returns null if KTable is not queryable
    +
    + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>>> storeQueryParams =
    +   StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
    + ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>> view = streams.store(storeQueryParams);
    +
    + // query the value for a key
    + ValueAndTimestamp value = view.get(key);
    + 
    + + Note that a KTable is partitioned, and thus not all keys can be queried locally. + See the Apache Kafka Streams + documentation + for more details.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        filter

        +
        KTable<K,V> filter(Predicate<? super K,? super V> predicate)
        +
        Create a new KTable that consists of all records of this KTable which satisfy the given + predicate, with default serializers, deserializers, and state store. + All records that do not satisfy the predicate are dropped. + For each KTable update, the filter is evaluated based on the current update + record and then an update record is produced for the result KTable. + This is a stateless record-by-record operation. +

        + Note that filter for a changelog stream works differently than record stream filters, because records with null values (so-called tombstone records) + have delete semantics. + Thus, for tombstones the provided filter predicate is not evaluated but the tombstone record is forwarded + directly if required (i.e., if there is anything to be deleted). + Furthermore, for each record that gets dropped (i.e., does not satisfy the given predicate) a tombstone record + is forwarded.

        +
        +
        Parameters:
        +
        predicate - a filter Predicate that is applied to each record
        +
        Returns:
        +
        a KTable that contains only those records that satisfy the given predicate
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        filter

        +
        KTable<K,V> filter(Predicate<? super K,? super V> predicate, + Named named)
        +
        Create a new KTable that consists of all records of this KTable which satisfy the given + predicate, with default serializers, deserializers, and state store. + All records that do not satisfy the predicate are dropped. + For each KTable update, the filter is evaluated based on the current update + record and then an update record is produced for the result KTable. + This is a stateless record-by-record operation. +

        + Note that filter for a changelog stream works differently than record stream filters, because records with null values (so-called tombstone records) + have delete semantics. + Thus, for tombstones the provided filter predicate is not evaluated but the tombstone record is forwarded + directly if required (i.e., if there is anything to be deleted). + Furthermore, for each record that gets dropped (i.e., does not satisfy the given predicate) a tombstone record + is forwarded.

        +
        +
        Parameters:
        +
        predicate - a filter Predicate that is applied to each record
        +
        named - a Named config used to name the processor in the topology
        +
        Returns:
        +
        a KTable that contains only those records that satisfy the given predicate
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        filter

        +
        KTable<K,V> filter(Predicate<? super K,? super V> predicate, + Materialized<K,V,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Create a new KTable that consists of all records of this KTable which satisfy the given + predicate, with the key serde, value serde, and the underlying + materialized state storage configured in the Materialized instance. + All records that do not satisfy the predicate are dropped. + For each KTable update, the filter is evaluated based on the current update + record and then an update record is produced for the result KTable. + This is a stateless record-by-record operation. +

        + Note that filter for a changelog stream works differently than record stream filters, because records with null values (so-called tombstone records) + have delete semantics. + Thus, for tombstones the provided filter predicate is not evaluated but the tombstone record is forwarded + directly if required (i.e., if there is anything to be deleted). + Furthermore, for each record that gets dropped (i.e., does not satisfy the given predicate) a tombstone record + is forwarded. +

        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // filtering words
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>> localStore = streams.store(storeQueryParams);
        + K key = "some-word";
        + ValueAndTimestamp<V> valueForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. + The store name to query with is specified by Materialized.as(String) or Materialized.as(KeyValueBytesStoreSupplier). +

        +
        +
        Parameters:
        +
        predicate - a filter Predicate that is applied to each record
        +
        materialized - a Materialized that describes how the StateStore for the resulting KTable + should be materialized. Cannot be null
        +
        Returns:
        +
        a KTable that contains only those records that satisfy the given predicate
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        filter

        +
        KTable<K,V> filter(Predicate<? super K,? super V> predicate, + Named named, + Materialized<K,V,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Create a new KTable that consists of all records of this KTable which satisfy the given + predicate, with the key serde, value serde, and the underlying + materialized state storage configured in the Materialized instance. + All records that do not satisfy the predicate are dropped. + For each KTable update, the filter is evaluated based on the current update + record and then an update record is produced for the result KTable. + This is a stateless record-by-record operation. +

        + Note that filter for a changelog stream works differently than record stream filters, because records with null values (so-called tombstone records) + have delete semantics. + Thus, for tombstones the provided filter predicate is not evaluated but the tombstone record is forwarded + directly if required (i.e., if there is anything to be deleted). + Furthermore, for each record that gets dropped (i.e., does not satisfy the given predicate) a tombstone record + is forwarded. +

        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // filtering words
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>> localStore = streams.store(storeQueryParams);
        + K key = "some-word";
        + ValueAndTimestamp<V> valueForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. + The store name to query with is specified by Materialized.as(String) or Materialized.as(KeyValueBytesStoreSupplier). +

        +
        +
        Parameters:
        +
        predicate - a filter Predicate that is applied to each record
        +
        named - a Named config used to name the processor in the topology
        +
        materialized - a Materialized that describes how the StateStore for the resulting KTable + should be materialized. Cannot be null
        +
        Returns:
        +
        a KTable that contains only those records that satisfy the given predicate
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        filterNot

        +
        KTable<K,V> filterNot(Predicate<? super K,? super V> predicate)
        +
        Create a new KTable that consists all records of this KTable which do not satisfy the + given predicate, with default serializers, deserializers, and state store. + All records that do satisfy the predicate are dropped. + For each KTable update, the filter is evaluated based on the current update + record and then an update record is produced for the result KTable. + This is a stateless record-by-record operation. +

        + Note that filterNot for a changelog stream works differently than record stream filters, because records with null values (so-called tombstone records) + have delete semantics. + Thus, for tombstones the provided filter predicate is not evaluated but the tombstone record is forwarded + directly if required (i.e., if there is anything to be deleted). + Furthermore, for each record that gets dropped (i.e., does satisfy the given predicate) a tombstone record is + forwarded.

        +
        +
        Parameters:
        +
        predicate - a filter Predicate that is applied to each record
        +
        Returns:
        +
        a KTable that contains only those records that do not satisfy the given predicate
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        filterNot

        +
        KTable<K,V> filterNot(Predicate<? super K,? super V> predicate, + Named named)
        +
        Create a new KTable that consists all records of this KTable which do not satisfy the + given predicate, with default serializers, deserializers, and state store. + All records that do satisfy the predicate are dropped. + For each KTable update, the filter is evaluated based on the current update + record and then an update record is produced for the result KTable. + This is a stateless record-by-record operation. +

        + Note that filterNot for a changelog stream works differently than record stream filters, because records with null values (so-called tombstone records) + have delete semantics. + Thus, for tombstones the provided filter predicate is not evaluated but the tombstone record is forwarded + directly if required (i.e., if there is anything to be deleted). + Furthermore, for each record that gets dropped (i.e., does satisfy the given predicate) a tombstone record is + forwarded.

        +
        +
        Parameters:
        +
        predicate - a filter Predicate that is applied to each record
        +
        named - a Named config used to name the processor in the topology
        +
        Returns:
        +
        a KTable that contains only those records that do not satisfy the given predicate
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        filterNot

        +
        KTable<K,V> filterNot(Predicate<? super K,? super V> predicate, + Materialized<K,V,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Create a new KTable that consists all records of this KTable which do not satisfy the + given predicate, with the key serde, value serde, and the underlying + materialized state storage configured in the Materialized instance. + All records that do satisfy the predicate are dropped. + For each KTable update, the filter is evaluated based on the current update + record and then an update record is produced for the result KTable. + This is a stateless record-by-record operation. +

        + Note that filterNot for a changelog stream works differently than record stream filters, because records with null values (so-called tombstone records) + have delete semantics. + Thus, for tombstones the provided filter predicate is not evaluated but the tombstone record is forwarded + directly if required (i.e., if there is anything to be deleted). + Furthermore, for each record that gets dropped (i.e., does satisfy the given predicate) a tombstone record is + forwarded. +

        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // filtering words
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>> localStore = streams.store(storeQueryParams);
        + K key = "some-word";
        + ValueAndTimestamp<V> valueForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. + The store name to query with is specified by Materialized.as(String) or Materialized.as(KeyValueBytesStoreSupplier). +

        +
        +
        Parameters:
        +
        predicate - a filter Predicate that is applied to each record
        +
        materialized - a Materialized that describes how the StateStore for the resulting KTable + should be materialized. Cannot be null
        +
        Returns:
        +
        a KTable that contains only those records that do not satisfy the given predicate
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        filterNot

        +
        KTable<K,V> filterNot(Predicate<? super K,? super V> predicate, + Named named, + Materialized<K,V,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Create a new KTable that consists all records of this KTable which do not satisfy the + given predicate, with the key serde, value serde, and the underlying + materialized state storage configured in the Materialized instance. + All records that do satisfy the predicate are dropped. + For each KTable update, the filter is evaluated based on the current update + record and then an update record is produced for the result KTable. + This is a stateless record-by-record operation. +

        + Note that filterNot for a changelog stream works differently than record stream filters, because records with null values (so-called tombstone records) + have delete semantics. + Thus, for tombstones the provided filter predicate is not evaluated but the tombstone record is forwarded + directly if required (i.e., if there is anything to be deleted). + Furthermore, for each record that gets dropped (i.e., does satisfy the given predicate) a tombstone record is + forwarded. +

        + To query the local ReadOnlyKeyValueStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // filtering words
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedKeyValueStore());
        + ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>> localStore = streams.store(storeQueryParams);
        + K key = "some-word";
        + ValueAndTimestamp<V> valueForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. + The store name to query with is specified by Materialized.as(String) or Materialized.as(KeyValueBytesStoreSupplier). +

        +
        +
        Parameters:
        +
        predicate - a filter Predicate that is applied to each record
        +
        named - a Named config used to name the processor in the topology
        +
        materialized - a Materialized that describes how the StateStore for the resulting KTable + should be materialized. Cannot be null
        +
        Returns:
        +
        a KTable that contains only those records that do not satisfy the given predicate
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        mapValues

        +
        <VR> KTable<K,VR> mapValues(ValueMapper<? super V,? extends VR> mapper)
        +
        Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type) in the new KTable, with default serializers, deserializers, and state store. + For each KTable update the provided ValueMapper is applied to the value of the updated record and + computes a new value for it, resulting in an updated record for the result KTable. + Thus, an input record <K,V> can be transformed into an output record <K:V'>. + This is a stateless record-by-record operation. +

        + The example below counts the number of token of the value string. +

        
        + KTable<String, String> inputTable = builder.table("topic");
        + KTable<String, Integer> outputTable = inputTable.mapValues(value -> value.split(" ").length);
        + 
        +

        + This operation preserves data co-location with respect to the key. + Thus, no internal data redistribution is required if a key based operator (like a join) is applied to + the result KTable. +

        + Note that mapValues for a changelog stream works differently than record stream filters, because records with null values (so-called tombstone records) + have delete semantics. + Thus, for tombstones the provided value-mapper is not evaluated but the tombstone record is forwarded directly to + delete the corresponding record in the result KTable.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        Parameters:
        +
        mapper - a ValueMapper that computes a new output value
        +
        Returns:
        +
        a KTable that contains records with unmodified keys and new values (possibly of different type)
        +
        +
        +
      • +
      • +
        +

        mapValues

        +
        <VR> KTable<K,VR> mapValues(ValueMapper<? super V,? extends VR> mapper, + Named named)
        +
        Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type) in the new KTable, with default serializers, deserializers, and state store. + For each KTable update the provided ValueMapper is applied to the value of the updated record and + computes a new value for it, resulting in an updated record for the result KTable. + Thus, an input record <K,V> can be transformed into an output record <K:V'>. + This is a stateless record-by-record operation. +

        + The example below counts the number of token of the value string. +

        
        + KTable<String, String> inputTable = builder.table("topic");
        + KTable<String, Integer> outputTable = inputTable.mapValues(value -> value.split(" ").length, Named.as("countTokenValue"));
        + 
        +

        + This operation preserves data co-location with respect to the key. + Thus, no internal data redistribution is required if a key based operator (like a join) is applied to + the result KTable. +

        + Note that mapValues for a changelog stream works differently than record stream filters, because records with null values (so-called tombstone records) + have delete semantics. + Thus, for tombstones the provided value-mapper is not evaluated but the tombstone record is forwarded directly to + delete the corresponding record in the result KTable.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        Parameters:
        +
        mapper - a ValueMapper that computes a new output value
        +
        named - a Named config used to name the processor in the topology
        +
        Returns:
        +
        a KTable that contains records with unmodified keys and new values (possibly of different type)
        +
        +
        +
      • +
      • +
        +

        mapValues

        +
        <VR> KTable<K,VR> mapValues(ValueMapperWithKey<? super K,? super V,? extends VR> mapper)
        +
        Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type) in the new KTable, with default serializers, deserializers, and state store. + For each KTable update the provided ValueMapperWithKey is applied to the value of the update + record and computes a new value for it, resulting in an updated record for the result KTable. + Thus, an input record <K,V> can be transformed into an output record <K:V'>. + This is a stateless record-by-record operation. +

        + The example below counts the number of token of value and key strings. +

        
        + KTable<String, String> inputTable = builder.table("topic");
        + KTable<String, Integer> outputTable =
        +  inputTable.mapValues((readOnlyKey, value) -> readOnlyKey.split(" ").length + value.split(" ").length);
        + 
        +

        + Note that the key is read-only and should not be modified, as this can lead to corrupt partitioning. + This operation preserves data co-location with respect to the key. + Thus, no internal data redistribution is required if a key based operator (like a join) is applied to + the result KTable. +

        + Note that mapValues for a changelog stream works differently than record stream filters, because records with null values (so-called tombstone records) + have delete semantics. + Thus, for tombstones the provided value-mapper is not evaluated but the tombstone record is forwarded directly to + delete the corresponding record in the result KTable.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        Parameters:
        +
        mapper - a ValueMapperWithKey that computes a new output value
        +
        Returns:
        +
        a KTable that contains records with unmodified keys and new values (possibly of different type)
        +
        +
        +
      • +
      • +
        +

        mapValues

        +
        <VR> KTable<K,VR> mapValues(ValueMapperWithKey<? super K,? super V,? extends VR> mapper, + Named named)
        +
        Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type) in the new KTable, with default serializers, deserializers, and state store. + For each KTable update the provided ValueMapperWithKey is applied to the value of the update + record and computes a new value for it, resulting in an updated record for the result KTable. + Thus, an input record <K,V> can be transformed into an output record <K:V'>. + This is a stateless record-by-record operation. +

        + The example below counts the number of token of value and key strings. +

        
        + KTable<String, String> inputTable = builder.table("topic");
        + KTable<String, Integer> outputTable =
        +  inputTable.mapValues((readOnlyKey, value) -> readOnlyKey.split(" ").length + value.split(" ").length, Named.as("countTokenValueAndKey"));
        + 
        +

        + Note that the key is read-only and should not be modified, as this can lead to corrupt partitioning. + This operation preserves data co-location with respect to the key. + Thus, no internal data redistribution is required if a key based operator (like a join) is applied to + the result KTable. +

        + Note that mapValues for a changelog stream works differently than record stream filters, because records with null values (so-called tombstone records) + have delete semantics. + Thus, for tombstones the provided value-mapper is not evaluated but the tombstone record is forwarded directly to + delete the corresponding record in the result KTable.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        Parameters:
        +
        mapper - a ValueMapperWithKey that computes a new output value
        +
        named - a Named config used to name the processor in the topology
        +
        Returns:
        +
        a KTable that contains records with unmodified keys and new values (possibly of different type)
        +
        +
        +
      • +
      • +
        +

        mapValues

        +
        <VR> KTable<K,VR> mapValues(ValueMapper<? super V,? extends VR> mapper, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type) in the new KTable, with the key serde, value serde, + and the underlying materialized state storage configured in the Materialized + instance. + For each KTable update the provided ValueMapper is applied to the value of the updated record and + computes a new value for it, resulting in an updated record for the result KTable. + Thus, an input record <K,V> can be transformed into an output record <K:V'>. + This is a stateless record-by-record operation. +

        + The example below counts the number of token of the value string. +

        
        + KTable<String, String> inputTable = builder.table("topic");
        + KTable<String, Integer> outputTable = inputTable.mapValue(new ValueMapper<String, Integer> {
        +     Integer apply(String value) {
        +         return value.split(" ").length;
        +     }
        + });
        + 
        +

        + To query the local KeyValueStore representing outputTable above it must be obtained via + KafkaStreams#store(...): + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. + The store name to query with is specified by Materialized.as(String) or Materialized.as(KeyValueBytesStoreSupplier). +

        + This operation preserves data co-location with respect to the key. + Thus, no internal data redistribution is required if a key based operator (like a join) is applied to + the result KTable. +

        + Note that mapValues for a changelog stream works differently than record stream filters, because records with null values (so-called tombstone records) + have delete semantics. + Thus, for tombstones the provided value-mapper is not evaluated but the tombstone record is forwarded directly to + delete the corresponding record in the result KTable.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        Parameters:
        +
        mapper - a ValueMapper that computes a new output value
        +
        materialized - a Materialized that describes how the StateStore for the resulting KTable + should be materialized. Cannot be null
        +
        Returns:
        +
        a KTable that contains records with unmodified keys and new values (possibly of different type)
        +
        +
        +
      • +
      • +
        +

        mapValues

        +
        <VR> KTable<K,VR> mapValues(ValueMapper<? super V,? extends VR> mapper, + Named named, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type) in the new KTable, with the key serde, value serde, + and the underlying materialized state storage configured in the Materialized + instance. + For each KTable update the provided ValueMapper is applied to the value of the updated record and + computes a new value for it, resulting in an updated record for the result KTable. + Thus, an input record <K,V> can be transformed into an output record <K:V'>. + This is a stateless record-by-record operation. +

        + The example below counts the number of token of the value string. +

        
        + KTable<String, String> inputTable = builder.table("topic");
        + KTable<String, Integer> outputTable = inputTable.mapValue(new ValueMapper<String, Integer> {
        +     Integer apply(String value) {
        +         return value.split(" ").length;
        +     }
        + });
        + 
        +

        + To query the local KeyValueStore representing outputTable above it must be obtained via + KafkaStreams#store(...): + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. + The store name to query with is specified by Materialized.as(String) or Materialized.as(KeyValueBytesStoreSupplier). +

        + This operation preserves data co-location with respect to the key. + Thus, no internal data redistribution is required if a key based operator (like a join) is applied to + the result KTable. +

        + Note that mapValues for a changelog stream works differently than record stream filters, because records with null values (so-called tombstone records) + have delete semantics. + Thus, for tombstones the provided value-mapper is not evaluated but the tombstone record is forwarded directly to + delete the corresponding record in the result KTable.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        Parameters:
        +
        mapper - a ValueMapper that computes a new output value
        +
        named - a Named config used to name the processor in the topology
        +
        materialized - a Materialized that describes how the StateStore for the resulting KTable + should be materialized. Cannot be null
        +
        Returns:
        +
        a KTable that contains records with unmodified keys and new values (possibly of different type)
        +
        +
        +
      • +
      • +
        +

        mapValues

        +
        <VR> KTable<K,VR> mapValues(ValueMapperWithKey<? super K,? super V,? extends VR> mapper, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type) in the new KTable, with the key serde, value serde, + and the underlying materialized state storage configured in the Materialized + instance. + For each KTable update the provided ValueMapperWithKey is applied to the value of the update + record and computes a new value for it, resulting in an updated record for the result KTable. + Thus, an input record <K,V> can be transformed into an output record <K:V'>. + This is a stateless record-by-record operation. +

        + The example below counts the number of token of value and key strings. +

        
        + KTable<String, String> inputTable = builder.table("topic");
        + KTable<String, Integer> outputTable = inputTable.mapValue(new ValueMapperWithKey<String, String, Integer> {
        +     Integer apply(String readOnlyKey, String value) {
        +          return readOnlyKey.split(" ").length + value.split(" ").length;
        +     }
        + });
        + 
        +

        + To query the local KeyValueStore representing outputTable above it must be obtained via + KafkaStreams.store(StoreQueryParameters) KafkaStreams#store(...)}: + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. + The store name to query with is specified by Materialized.as(String) or Materialized.as(KeyValueBytesStoreSupplier). +

        + Note that the key is read-only and should not be modified, as this can lead to corrupt partitioning. + This operation preserves data co-location with respect to the key. + Thus, no internal data redistribution is required if a key based operator (like a join) is applied to + the result KTable. +

        + Note that mapValues for a changelog stream works differently than record stream filters, because records with null values (so-called tombstone records) + have delete semantics. + Thus, for tombstones the provided value-mapper is not evaluated but the tombstone record is forwarded directly to + delete the corresponding record in the result KTable.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        Parameters:
        +
        mapper - a ValueMapperWithKey that computes a new output value
        +
        materialized - a Materialized that describes how the StateStore for the resulting KTable + should be materialized. Cannot be null
        +
        Returns:
        +
        a KTable that contains records with unmodified keys and new values (possibly of different type)
        +
        +
        +
      • +
      • +
        +

        mapValues

        +
        <VR> KTable<K,VR> mapValues(ValueMapperWithKey<? super K,? super V,? extends VR> mapper, + Named named, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type) in the new KTable, with the key serde, value serde, + and the underlying materialized state storage configured in the Materialized + instance. + For each KTable update the provided ValueMapperWithKey is applied to the value of the update + record and computes a new value for it, resulting in an updated record for the result KTable. + Thus, an input record <K,V> can be transformed into an output record <K:V'>. + This is a stateless record-by-record operation. +

        + The example below counts the number of token of value and key strings. +

        
        + KTable<String, String> inputTable = builder.table("topic");
        + KTable<String, Integer> outputTable = inputTable.mapValue(new ValueMapperWithKey<String, String, Integer> {
        +     Integer apply(String readOnlyKey, String value) {
        +          return readOnlyKey.split(" ").length + value.split(" ").length;
        +     }
        + });
        + 
        +

        + To query the local KeyValueStore representing outputTable above it must be obtained via + KafkaStreams#store(...): + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. + The store name to query with is specified by Materialized.as(String) or Materialized.as(KeyValueBytesStoreSupplier). +

        + Note that the key is read-only and should not be modified, as this can lead to corrupt partitioning. + This operation preserves data co-location with respect to the key. + Thus, no internal data redistribution is required if a key based operator (like a join) is applied to + the result KTable. +

        + Note that mapValues for a changelog stream works differently than record stream filters, because records with null values (so-called tombstone records) + have delete semantics. + Thus, for tombstones the provided value-mapper is not evaluated but the tombstone record is forwarded directly to + delete the corresponding record in the result KTable.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        Parameters:
        +
        mapper - a ValueMapperWithKey that computes a new output value
        +
        named - a Named config used to name the processor in the topology
        +
        materialized - a Materialized that describes how the StateStore for the resulting KTable + should be materialized. Cannot be null
        +
        Returns:
        +
        a KTable that contains records with unmodified keys and new values (possibly of different type)
        +
        +
        +
      • +
      • +
        +

        toStream

        +
        KStream<K,V> toStream()
        +
        Convert this changelog stream to a KStream. +

        + Note that this is a logical operation and only changes the "interpretation" of the stream, i.e., each record of + this changelog stream is no longer treated as an updated record (cf. KStream vs KTable).

        +
        +
        Returns:
        +
        a KStream that contains the same records as this KTable
        +
        +
        +
      • +
      • +
        +

        toStream

        +
        KStream<K,V> toStream(Named named)
        +
        Convert this changelog stream to a KStream. +

        + Note that this is a logical operation and only changes the "interpretation" of the stream, i.e., each record of + this changelog stream is no longer treated as an updated record (cf. KStream vs KTable).

        +
        +
        Parameters:
        +
        named - a Named config used to name the processor in the topology
        +
        Returns:
        +
        a KStream that contains the same records as this KTable
        +
        +
        +
      • +
      • +
        +

        toStream

        +
        <KR> KStream<KR,V> toStream(KeyValueMapper<? super K,? super V,? extends KR> mapper)
        +
        Convert this changelog stream to a KStream using the given KeyValueMapper to select the new key. +

        + For example, you can compute the new key as the length of the value string. +

        
        + KTable<String, String> table = builder.table("topic");
        + KStream<Integer, String> keyedStream = table.toStream(new KeyValueMapper<String, String, Integer> {
        +     Integer apply(String key, String value) {
        +         return value.length();
        +     }
        + });
        + 
        + Setting a new key might result in an internal data redistribution if a key based operator (like an aggregation or + join) is applied to the result KStream. +

        + This operation is equivalent to calling + table.toStream().selectKey(KeyValueMapper). +

        + Note that toStream() is a logical operation and only changes the "interpretation" of the stream, i.e., + each record of this changelog stream is no longer treated as an updated record (cf. KStream vs KTable).

        +
        +
        Type Parameters:
        +
        KR - the new key type of the result stream
        +
        Parameters:
        +
        mapper - a KeyValueMapper that computes a new key for each record
        +
        Returns:
        +
        a KStream that contains the same records as this KTable
        +
        +
        +
      • +
      • +
        +

        toStream

        +
        <KR> KStream<KR,V> toStream(KeyValueMapper<? super K,? super V,? extends KR> mapper, + Named named)
        +
        Convert this changelog stream to a KStream using the given KeyValueMapper to select the new key. +

        + For example, you can compute the new key as the length of the value string. +

        
        + KTable<String, String> table = builder.table("topic");
        + KTable<Integer, String> keyedStream = table.toStream(new KeyValueMapper<String, String, Integer> {
        +     Integer apply(String key, String value) {
        +         return value.length();
        +     }
        + });
        + 
        + Setting a new key might result in an internal data redistribution if a key based operator (like an aggregation or + join) is applied to the result KStream. +

        + This operation is equivalent to calling + table.toStream().selectKey(KeyValueMapper). +

        + Note that toStream() is a logical operation and only changes the "interpretation" of the stream, i.e., + each record of this changelog stream is no longer treated as an updated record (cf. KStream vs KTable).

        +
        +
        Type Parameters:
        +
        KR - the new key type of the result stream
        +
        Parameters:
        +
        mapper - a KeyValueMapper that computes a new key for each record
        +
        named - a Named config used to name the processor in the topology
        +
        Returns:
        +
        a KStream that contains the same records as this KTable
        +
        +
        +
      • +
      • +
        +

        suppress

        +
        KTable<K,V> suppress(Suppressed<? super K> suppressed)
        +
        Suppress some updates from this changelog stream, determined by the supplied Suppressed configuration. + This controls what updates downstream table and stream operations will receive. +

        + Note that suppress() cannot be applied to + versioned KTables.

        +
        +
        Parameters:
        +
        suppressed - Configuration object determining what, if any, updates to suppress
        +
        Returns:
        +
        A new KTable with the desired suppression characteristics.
        +
        +
        +
      • +
      • +
        +

        transformValues

        +
        <VR> KTable<K,VR> transformValues(ValueTransformerWithKeySupplier<? super K,? super V,? extends VR> transformerSupplier, + String... stateStoreNames)
        +
        Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type), with default serializers, deserializers, and state store. + A ValueTransformerWithKey (provided by the given ValueTransformerWithKeySupplier) is applied to each input + record value and computes a new value for it. + Thus, an input record <K,V> can be transformed into an output record <K:V'>. + This is similar to mapValues(ValueMapperWithKey), but more flexible, allowing access to additional state-stores, + and access to the ProcessorContext. + Furthermore, via Punctuator.punctuate(long) the processing progress can be observed and additional + periodic actions can be performed. +

        + If the downstream topology uses aggregation functions, (e.g. KGroupedTable.reduce(org.apache.kafka.streams.kstream.Reducer<V>, org.apache.kafka.streams.kstream.Reducer<V>, org.apache.kafka.streams.kstream.Materialized<K, V, org.apache.kafka.streams.state.KeyValueStore<org.apache.kafka.common.utils.Bytes, byte[]>>), KGroupedTable.aggregate(org.apache.kafka.streams.kstream.Initializer<VR>, org.apache.kafka.streams.kstream.Aggregator<? super K, ? super V, VR>, org.apache.kafka.streams.kstream.Aggregator<? super K, ? super V, VR>, org.apache.kafka.streams.kstream.Materialized<K, VR, org.apache.kafka.streams.state.KeyValueStore<org.apache.kafka.common.utils.Bytes, byte[]>>), etc), + care must be taken when dealing with state, (either held in state-stores or transformer instances), to ensure correct aggregate results. + In contrast, if the resulting KTable is materialized, (cf. transformValues(ValueTransformerWithKeySupplier, Materialized, String...)), + such concerns are handled for you. +

        + In order to assign a state, the state must be created and registered beforehand: +

        
        + // create store
        + StoreBuilder<KeyValueStore<String,String>> keyValueStoreBuilder =
        +         Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
        +                 Serdes.String(),
        +                 Serdes.String());
        + // register store
        + builder.addStateStore(keyValueStoreBuilder);
        +
        + KTable outputTable = inputTable.transformValues(new ValueTransformerWithKeySupplier() { ... }, "myValueTransformState");
        + 
        +

        + Within the ValueTransformerWithKey, the state is obtained via the + ProcessorContext. + To trigger periodic actions via punctuate(), + a schedule must be registered. +

        
        + new ValueTransformerWithKeySupplier() {
        +     ValueTransformerWithKey get() {
        +         return new ValueTransformerWithKey() {
        +             private KeyValueStore<String, String> state;
        +
        +             void init(ProcessorContext context) {
        +                 this.state = (KeyValueStore<String, String>)context.getStateStore("myValueTransformState");
        +                 context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, new Punctuator(..)); // punctuate each 1000ms, can access this.state
        +             }
        +
        +             NewValueType transform(K readOnlyKey, V value) {
        +                 // can access this.state and use read-only key
        +                 return new NewValueType(readOnlyKey); // or null
        +             }
        +
        +             void close() {
        +                 // can access this.state
        +             }
        +         }
        +     }
        + }
        + 
        +

        + Note that the key is read-only and should not be modified, as this can lead to corrupt partitioning. + Setting a new value preserves data co-location with respect to the key.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result table
        +
        Parameters:
        +
        transformerSupplier - an instance of ValueTransformerWithKeySupplier that generates a + ValueTransformerWithKey. + At least one transformer instance will be created per streaming task. + Transformers do not need to be thread-safe.
        +
        stateStoreNames - the names of the state stores used by the processor
        +
        Returns:
        +
        a KTable that contains records with unmodified key and new values (possibly of different type)
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        transformValues

        +
        <VR> KTable<K,VR> transformValues(ValueTransformerWithKeySupplier<? super K,? super V,? extends VR> transformerSupplier, + Named named, + String... stateStoreNames)
        +
        Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type), with default serializers, deserializers, and state store. + A ValueTransformerWithKey (provided by the given ValueTransformerWithKeySupplier) is applied to each input + record value and computes a new value for it. + Thus, an input record <K,V> can be transformed into an output record <K:V'>. + This is similar to mapValues(ValueMapperWithKey), but more flexible, allowing access to additional state-stores, + and access to the ProcessorContext. + Furthermore, via Punctuator.punctuate(long) the processing progress can be observed and additional + periodic actions can be performed. +

        + If the downstream topology uses aggregation functions, (e.g. KGroupedTable.reduce(org.apache.kafka.streams.kstream.Reducer<V>, org.apache.kafka.streams.kstream.Reducer<V>, org.apache.kafka.streams.kstream.Materialized<K, V, org.apache.kafka.streams.state.KeyValueStore<org.apache.kafka.common.utils.Bytes, byte[]>>), KGroupedTable.aggregate(org.apache.kafka.streams.kstream.Initializer<VR>, org.apache.kafka.streams.kstream.Aggregator<? super K, ? super V, VR>, org.apache.kafka.streams.kstream.Aggregator<? super K, ? super V, VR>, org.apache.kafka.streams.kstream.Materialized<K, VR, org.apache.kafka.streams.state.KeyValueStore<org.apache.kafka.common.utils.Bytes, byte[]>>), etc), + care must be taken when dealing with state, (either held in state-stores or transformer instances), to ensure correct aggregate results. + In contrast, if the resulting KTable is materialized, (cf. transformValues(ValueTransformerWithKeySupplier, Materialized, String...)), + such concerns are handled for you. +

        + In order to assign a state, the state must be created and registered beforehand: +

        
        + // create store
        + StoreBuilder<KeyValueStore<String,String>> keyValueStoreBuilder =
        +         Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
        +                 Serdes.String(),
        +                 Serdes.String());
        + // register store
        + builder.addStateStore(keyValueStoreBuilder);
        +
        + KTable outputTable = inputTable.transformValues(new ValueTransformerWithKeySupplier() { ... }, "myValueTransformState");
        + 
        +

        + Within the ValueTransformerWithKey, the state is obtained via the + ProcessorContext. + To trigger periodic actions via punctuate(), + a schedule must be registered. +

        
        + new ValueTransformerWithKeySupplier() {
        +     ValueTransformerWithKey get() {
        +         return new ValueTransformerWithKey() {
        +             private KeyValueStore<String, String> state;
        +
        +             void init(ProcessorContext context) {
        +                 this.state = (KeyValueStore<String, String>)context.getStateStore("myValueTransformState");
        +                 context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, new Punctuator(..)); // punctuate each 1000ms, can access this.state
        +             }
        +
        +             NewValueType transform(K readOnlyKey, V value) {
        +                 // can access this.state and use read-only key
        +                 return new NewValueType(readOnlyKey); // or null
        +             }
        +
        +             void close() {
        +                 // can access this.state
        +             }
        +         }
        +     }
        + }
        + 
        +

        + Note that the key is read-only and should not be modified, as this can lead to corrupt partitioning. + Setting a new value preserves data co-location with respect to the key.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result table
        +
        Parameters:
        +
        transformerSupplier - an instance of ValueTransformerWithKeySupplier that generates a + ValueTransformerWithKey. + At least one transformer instance will be created per streaming task. + Transformers do not need to be thread-safe.
        +
        named - a Named config used to name the processor in the topology
        +
        stateStoreNames - the names of the state stores used by the processor
        +
        Returns:
        +
        a KTable that contains records with unmodified key and new values (possibly of different type)
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        transformValues

        +
        <VR> KTable<K,VR> transformValues(ValueTransformerWithKeySupplier<? super K,? super V,? extends VR> transformerSupplier, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized, + String... stateStoreNames)
        +
        Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type), with the key serde, value serde, and the underlying + materialized state storage configured in the Materialized instance. + A ValueTransformerWithKey (provided by the given ValueTransformerWithKeySupplier) is applied to each input + record value and computes a new value for it. + This is similar to mapValues(ValueMapperWithKey), but more flexible, allowing stateful, rather than stateless, + record-by-record operation, access to additional state-stores, and access to the ProcessorContext. + Furthermore, via Punctuator.punctuate(long) the processing progress can be observed and additional + periodic actions can be performed. + The resulting KTable is materialized into another state store (additional to the provided state store names) + as specified by the user via Materialized parameter, and is queryable through its given name. +

        + In order to assign a state, the state must be created and registered beforehand: +

        
        + // create store
        + StoreBuilder<KeyValueStore<String,String>> keyValueStoreBuilder =
        +         Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
        +                 Serdes.String(),
        +                 Serdes.String());
        + // register store
        + builder.addStateStore(keyValueStoreBuilder);
        +
        + KTable outputTable = inputTable.transformValues(
        +     new ValueTransformerWithKeySupplier() { ... },
        +     Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("outputTable")
        +                                 .withKeySerde(Serdes.String())
        +                                 .withValueSerde(Serdes.String()),
        +     "myValueTransformState");
        + 
        +

        + Within the ValueTransformerWithKey, the state is obtained via the + ProcessorContext. + To trigger periodic actions via punctuate(), + a schedule must be registered. +

        
        + new ValueTransformerWithKeySupplier() {
        +     ValueTransformerWithKey get() {
        +         return new ValueTransformerWithKey() {
        +             private KeyValueStore<String, String> state;
        +
        +             void init(ProcessorContext context) {
        +                 this.state = (KeyValueStore<String, String>)context.getStateStore("myValueTransformState");
        +                 context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, new Punctuator(..)); // punctuate each 1000ms, can access this.state
        +             }
        +
        +             NewValueType transform(K readOnlyKey, V value) {
        +                 // can access this.state and use read-only key
        +                 return new NewValueType(readOnlyKey); // or null
        +             }
        +
        +             void close() {
        +                 // can access this.state
        +             }
        +         }
        +     }
        + }
        + 
        +

        + Note that the key is read-only and should not be modified, as this can lead to corrupt partitioning. + Setting a new value preserves data co-location with respect to the key.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result table
        +
        Parameters:
        +
        transformerSupplier - an instance of ValueTransformerWithKeySupplier that generates a + ValueTransformerWithKey. + At least one transformer instance will be created per streaming task. + Transformers do not need to be thread-safe.
        +
        materialized - an instance of Materialized used to describe how the state store of the + resulting table should be materialized. + Cannot be null
        +
        stateStoreNames - the names of the state stores used by the processor
        +
        Returns:
        +
        a KTable that contains records with unmodified key and new values (possibly of different type)
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        transformValues

        +
        <VR> KTable<K,VR> transformValues(ValueTransformerWithKeySupplier<? super K,? super V,? extends VR> transformerSupplier, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized, + Named named, + String... stateStoreNames)
        +
        Create a new KTable by transforming the value of each record in this KTable into a new value + (with possibly a new type), with the key serde, value serde, and the underlying + materialized state storage configured in the Materialized instance. + A ValueTransformerWithKey (provided by the given ValueTransformerWithKeySupplier) is applied to each input + record value and computes a new value for it. + This is similar to mapValues(ValueMapperWithKey), but more flexible, allowing stateful, rather than stateless, + record-by-record operation, access to additional state-stores, and access to the ProcessorContext. + Furthermore, via Punctuator.punctuate(long) the processing progress can be observed and additional + periodic actions can be performed. + The resulting KTable is materialized into another state store (additional to the provided state store names) + as specified by the user via Materialized parameter, and is queryable through its given name. +

        + In order to assign a state, the state must be created and registered beforehand: +

        
        + // create store
        + StoreBuilder<KeyValueStore<String,String>> keyValueStoreBuilder =
        +         Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
        +                 Serdes.String(),
        +                 Serdes.String());
        + // register store
        + builder.addStateStore(keyValueStoreBuilder);
        +
        + KTable outputTable = inputTable.transformValues(
        +     new ValueTransformerWithKeySupplier() { ... },
        +     Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("outputTable")
        +                                 .withKeySerde(Serdes.String())
        +                                 .withValueSerde(Serdes.String()),
        +     "myValueTransformState");
        + 
        +

        + Within the ValueTransformerWithKey, the state is obtained via the + ProcessorContext. + To trigger periodic actions via punctuate(), + a schedule must be registered. +

        
        + new ValueTransformerWithKeySupplier() {
        +     ValueTransformerWithKey get() {
        +         return new ValueTransformerWithKey() {
        +             private KeyValueStore<String, String> state;
        +
        +             void init(ProcessorContext context) {
        +                 this.state = (KeyValueStore<String, String>)context.getStateStore("myValueTransformState");
        +                 context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, new Punctuator(..)); // punctuate each 1000ms, can access this.state
        +             }
        +
        +             NewValueType transform(K readOnlyKey, V value) {
        +                 // can access this.state and use read-only key
        +                 return new NewValueType(readOnlyKey); // or null
        +             }
        +
        +             void close() {
        +                 // can access this.state
        +             }
        +         }
        +     }
        + }
        + 
        +

        + Note that the key is read-only and should not be modified, as this can lead to corrupt partitioning. + Setting a new value preserves data co-location with respect to the key.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result table
        +
        Parameters:
        +
        transformerSupplier - an instance of ValueTransformerWithKeySupplier that generates a + ValueTransformerWithKey. + At least one transformer instance will be created per streaming task. + Transformers do not need to be thread-safe.
        +
        materialized - an instance of Materialized used to describe how the state store of the + resulting table should be materialized. + Cannot be null
        +
        named - a Named config used to name the processor in the topology
        +
        stateStoreNames - the names of the state stores used by the processor
        +
        Returns:
        +
        a KTable that contains records with unmodified key and new values (possibly of different type)
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        groupBy

        +
        <KR, +VR> KGroupedTable<KR,VR> groupBy(KeyValueMapper<? super K,? super V,? extends KeyValue<? extends KR,? extends VR>> selector)
        +
        Re-groups the records of this KTable using the provided KeyValueMapper and default serializers + and deserializers. + Each KeyValue pair of this KTable is mapped to a new KeyValue pair by applying the + provided KeyValueMapper. + Re-grouping a KTable is required before an aggregation operator can be applied to the data + (cf. KGroupedTable). + The KeyValueMapper selects a new key and value (with should both have unmodified type). + If the new record key is null the record will not be included in the resulting KGroupedTable +

        + Because a new key is selected, an internal repartitioning topic will be created in Kafka. + This topic will be named "${applicationId}-<name>-repartition", where "applicationId" is user-specified in + StreamsConfig via parameter APPLICATION_ID_CONFIG, "<name>" is + an internally generated name, and "-repartition" is a fixed suffix. + +

        You can retrieve all generated internal topic names via Topology.describe(). + +

        + All data of this KTable will be redistributed through the repartitioning topic by writing all update + records to and rereading all updated records from it, such that the resulting KGroupedTable is partitioned + on the new key. +

        + If the key or value type is changed, it is recommended to use groupBy(KeyValueMapper, Grouped) + instead.

        +
        +
        Type Parameters:
        +
        KR - the key type of the result KGroupedTable
        +
        VR - the value type of the result KGroupedTable
        +
        Parameters:
        +
        selector - a KeyValueMapper that computes a new grouping key and value to be aggregated
        +
        Returns:
        +
        a KGroupedTable that contains the re-grouped records of the original KTable
        +
        +
        +
      • +
      • +
        +

        groupBy

        +
        <KR, +VR> KGroupedTable<KR,VR> groupBy(KeyValueMapper<? super K,? super V,? extends KeyValue<? extends KR,? extends VR>> selector, + Grouped<KR,VR> grouped)
        +
        Re-groups the records of this KTable using the provided KeyValueMapper + and Serdes as specified by Grouped. + Each KeyValue pair of this KTable is mapped to a new KeyValue pair by applying the + provided KeyValueMapper. + Re-grouping a KTable is required before an aggregation operator can be applied to the data + (cf. KGroupedTable). + The KeyValueMapper selects a new key and value (where both could the same type or a new type). + If the new record key is null the record will not be included in the resulting KGroupedTable +

        + Because a new key is selected, an internal repartitioning topic will be created in Kafka. + This topic will be named "${applicationId}-<name>-repartition", where "applicationId" is user-specified in + StreamsConfig via parameter APPLICATION_ID_CONFIG, "<name>" is + either provided via Grouped.as(String) or an internally generated name. + +

        + You can retrieve all generated internal topic names via Topology.describe(). + +

        + All data of this KTable will be redistributed through the repartitioning topic by writing all update + records to and rereading all updated records from it, such that the resulting KGroupedTable is partitioned + on the new key.

        +
        +
        Type Parameters:
        +
        KR - the key type of the result KGroupedTable
        +
        VR - the value type of the result KGroupedTable
        +
        Parameters:
        +
        selector - a KeyValueMapper that computes a new grouping key and value to be aggregated
        +
        grouped - the Grouped instance used to specify Serdes + and the name for a repartition topic if repartitioning is required.
        +
        Returns:
        +
        a KGroupedTable that contains the re-grouped records of the original KTable
        +
        +
        +
      • +
      • +
        +

        join

        +
        <VO, +VR> KTable<K,VR> join(KTable<K,VO> other, + ValueJoiner<? super V,? super VO,? extends VR> joiner)
        +
        Join records of this KTable with another KTable's records using non-windowed inner equi join, + with default serializers, deserializers, and state store. + The join is a primary key join with join attribute thisKTable.key == otherKTable.key. + The result is an ever updating KTable that represents the current (i.e., processing time) result + of the join. +

        + The join is computed by (1) updating the internal state of one KTable and (2) performing a lookup for a + matching record in the current (i.e., processing time) internal state of the other KTable. + This happens in a symmetric way, i.e., for each update of either this or the other input + KTable the result gets updated. +

        + For each KTable record that finds a corresponding record in the other KTable the provided + ValueJoiner will be called to compute a value (with arbitrary type) for the result record. + The key of the result record is the same as for both joining input records. +

        + Note that records with null values (so-called tombstone records) have delete semantics. + Thus, for input tombstones the provided value-joiner is not called but a tombstone record is forwarded + directly to delete a record in the result KTable if required (i.e., if there is anything to be deleted). +

        + Input records with null key will be dropped and no join computation is performed. +

        + Example: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        thisKTablethisStateotherKTableotherStateresult updated record
        <K1:A><K1:A>
        <K1:A><K1:b><K1:b><K1:ValueJoiner(A,b)>
        <K1:C><K1:C><K1:b><K1:ValueJoiner(C,b)>
        <K1:C><K1:null><K1:null>
        + Both input streams (or to be more precise, their underlying source topics) need to have the same number of + partitions.

        +
        +
        Type Parameters:
        +
        VO - the value type of the other KTable
        +
        VR - the value type of the result KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        Returns:
        +
        a KTable that contains join-records for each key and values computed by the given + ValueJoiner, one for each matched record-pair with the same key
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        join

        +
        <VO, +VR> KTable<K,VR> join(KTable<K,VO> other, + ValueJoiner<? super V,? super VO,? extends VR> joiner, + Named named)
        +
        Join records of this KTable with another KTable's records using non-windowed inner equi join, + with default serializers, deserializers, and state store. + The join is a primary key join with join attribute thisKTable.key == otherKTable.key. + The result is an ever updating KTable that represents the current (i.e., processing time) result + of the join. +

        + The join is computed by (1) updating the internal state of one KTable and (2) performing a lookup for a + matching record in the current (i.e., processing time) internal state of the other KTable. + This happens in a symmetric way, i.e., for each update of either this or the other input + KTable the result gets updated. +

        + For each KTable record that finds a corresponding record in the other KTable the provided + ValueJoiner will be called to compute a value (with arbitrary type) for the result record. + The key of the result record is the same as for both joining input records. +

        + Note that records with null values (so-called tombstone records) have delete semantics. + Thus, for input tombstones the provided value-joiner is not called but a tombstone record is forwarded + directly to delete a record in the result KTable if required (i.e., if there is anything to be deleted). +

        + Input records with null key will be dropped and no join computation is performed. +

        + Example: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        thisKTablethisStateotherKTableotherStateresult updated record
        <K1:A><K1:A>
        <K1:A><K1:b><K1:b><K1:ValueJoiner(A,b)>
        <K1:C><K1:C><K1:b><K1:ValueJoiner(C,b)>
        <K1:C><K1:null><K1:null>
        + Both input streams (or to be more precise, their underlying source topics) need to have the same number of + partitions.

        +
        +
        Type Parameters:
        +
        VO - the value type of the other KTable
        +
        VR - the value type of the result KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        named - a Named config used to name the processor in the topology
        +
        Returns:
        +
        a KTable that contains join-records for each key and values computed by the given + ValueJoiner, one for each matched record-pair with the same key
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        join

        +
        <VO, +VR> KTable<K,VR> join(KTable<K,VO> other, + ValueJoiner<? super V,? super VO,? extends VR> joiner, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Join records of this KTable with another KTable's records using non-windowed inner equi join, + with the Materialized instance for configuration of the key serde, + the result table's value serde, and state store. + The join is a primary key join with join attribute thisKTable.key == otherKTable.key. + The result is an ever updating KTable that represents the current (i.e., processing time) result + of the join. +

        + The join is computed by (1) updating the internal state of one KTable and (2) performing a lookup for a + matching record in the current (i.e., processing time) internal state of the other KTable. + This happens in a symmetric way, i.e., for each update of either this or the other input + KTable the result gets updated. +

        + For each KTable record that finds a corresponding record in the other KTable the provided + ValueJoiner will be called to compute a value (with arbitrary type) for the result record. + The key of the result record is the same as for both joining input records. +

        + Note that records with null values (so-called tombstone records) have delete semantics. + Thus, for input tombstones the provided value-joiner is not called but a tombstone record is forwarded + directly to delete a record in the result KTable if required (i.e., if there is anything to be deleted). +

        + Input records with null key will be dropped and no join computation is performed. +

        + Example: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        thisKTablethisStateotherKTableotherStateresult updated record
        <K1:A><K1:A>
        <K1:A><K1:b><K1:b><K1:ValueJoiner(A,b)>
        <K1:C><K1:C><K1:b><K1:ValueJoiner(C,b)>
        <K1:C><K1:null><K1:null>
        + Both input streams (or to be more precise, their underlying source topics) need to have the same number of + partitions.

        +
        +
        Type Parameters:
        +
        VO - the value type of the other KTable
        +
        VR - the value type of the result KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        materialized - an instance of Materialized used to describe how the state store should be materialized. + Cannot be null
        +
        Returns:
        +
        a KTable that contains join-records for each key and values computed by the given + ValueJoiner, one for each matched record-pair with the same key
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        join

        +
        <VO, +VR> KTable<K,VR> join(KTable<K,VO> other, + ValueJoiner<? super V,? super VO,? extends VR> joiner, + Named named, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Join records of this KTable with another KTable's records using non-windowed inner equi join, + with the Materialized instance for configuration of the key serde, + the result table's value serde, and state store. + The join is a primary key join with join attribute thisKTable.key == otherKTable.key. + The result is an ever updating KTable that represents the current (i.e., processing time) result + of the join. +

        + The join is computed by (1) updating the internal state of one KTable and (2) performing a lookup for a + matching record in the current (i.e., processing time) internal state of the other KTable. + This happens in a symmetric way, i.e., for each update of either this or the other input + KTable the result gets updated. +

        + For each KTable record that finds a corresponding record in the other KTable the provided + ValueJoiner will be called to compute a value (with arbitrary type) for the result record. + The key of the result record is the same as for both joining input records. +

        + Note that records with null values (so-called tombstone records) have delete semantics. + Thus, for input tombstones the provided value-joiner is not called but a tombstone record is forwarded + directly to delete a record in the result KTable if required (i.e., if there is anything to be deleted). +

        + Input records with null key will be dropped and no join computation is performed. +

        + Example: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        thisKTablethisStateotherKTableotherStateresult updated record
        <K1:A><K1:A>
        <K1:A><K1:b><K1:b><K1:ValueJoiner(A,b)>
        <K1:C><K1:C><K1:b><K1:ValueJoiner(C,b)>
        <K1:C><K1:null><K1:null>
        + Both input streams (or to be more precise, their underlying source topics) need to have the same number of + partitions.

        +
        +
        Type Parameters:
        +
        VO - the value type of the other KTable
        +
        VR - the value type of the result KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        named - a Named config used to name the processor in the topology
        +
        materialized - an instance of Materialized used to describe how the state store should be materialized. + Cannot be null
        +
        Returns:
        +
        a KTable that contains join-records for each key and values computed by the given + ValueJoiner, one for each matched record-pair with the same key
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        leftJoin

        +
        <VO, +VR> KTable<K,VR> leftJoin(KTable<K,VO> other, + ValueJoiner<? super V,? super VO,? extends VR> joiner)
        +
        Join records of this KTable (left input) with another KTable's (right input) records using + non-windowed left equi join, with default serializers, deserializers, and state store. + The join is a primary key join with join attribute thisKTable.key == otherKTable.key. + In contrast to inner-join, all records from left KTable will produce + an output record (cf. below). + The result is an ever updating KTable that represents the current (i.e., processing time) result + of the join. +

        + The join is computed by (1) updating the internal state of one KTable and (2) performing a lookup for a + matching record in the current (i.e., processing time) internal state of the other KTable. + This happens in a symmetric way, i.e., for each update of either this or the other input + KTable the result gets updated. +

        + For each KTable record that finds a corresponding record in the other KTable's state the + provided ValueJoiner will be called to compute a value (with arbitrary type) for the result record. + Additionally, for each record of left KTable that does not find a corresponding record in the + right KTable's state the provided ValueJoiner will be called with rightValue = + null to compute a value (with arbitrary type) for the result record. + The key of the result record is the same as for both joining input records. +

        + Note that records with null values (so-called tombstone records) have delete semantics. + For example, for left input tombstones the provided value-joiner is not called but a tombstone record is + forwarded directly to delete a record in the result KTable if required (i.e., if there is anything to be + deleted). +

        + Input records with null key will be dropped and no join computation is performed. +

        + Example: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        thisKTablethisStateotherKTableotherStateresult updated record
        <K1:A><K1:A><K1:ValueJoiner(A,null)>
        <K1:A><K1:b><K1:b><K1:ValueJoiner(A,b)>
        <K1:null><K1:b><K1:null>
        <K1:null>
        + Both input streams (or to be more precise, their underlying source topics) need to have the same number of + partitions.

        +
        +
        Type Parameters:
        +
        VO - the value type of the other KTable
        +
        VR - the value type of the result KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        Returns:
        +
        a KTable that contains join-records for each key and values computed by the given + ValueJoiner, one for each matched record-pair with the same key plus one for each non-matching record of + left KTable
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        leftJoin

        +
        <VO, +VR> KTable<K,VR> leftJoin(KTable<K,VO> other, + ValueJoiner<? super V,? super VO,? extends VR> joiner, + Named named)
        +
        Join records of this KTable (left input) with another KTable's (right input) records using + non-windowed left equi join, with default serializers, deserializers, and state store. + The join is a primary key join with join attribute thisKTable.key == otherKTable.key. + In contrast to inner-join, all records from left KTable will produce + an output record (cf. below). + The result is an ever updating KTable that represents the current (i.e., processing time) result + of the join. +

        + The join is computed by (1) updating the internal state of one KTable and (2) performing a lookup for a + matching record in the current (i.e., processing time) internal state of the other KTable. + This happens in a symmetric way, i.e., for each update of either this or the other input + KTable the result gets updated. +

        + For each KTable record that finds a corresponding record in the other KTable's state the + provided ValueJoiner will be called to compute a value (with arbitrary type) for the result record. + Additionally, for each record of left KTable that does not find a corresponding record in the + right KTable's state the provided ValueJoiner will be called with rightValue = + null to compute a value (with arbitrary type) for the result record. + The key of the result record is the same as for both joining input records. +

        + Note that records with null values (so-called tombstone records) have delete semantics. + For example, for left input tombstones the provided value-joiner is not called but a tombstone record is + forwarded directly to delete a record in the result KTable if required (i.e., if there is anything to be + deleted). +

        + Input records with null key will be dropped and no join computation is performed. +

        + Example: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        thisKTablethisStateotherKTableotherStateresult updated record
        <K1:A><K1:A><K1:ValueJoiner(A,null)>
        <K1:A><K1:b><K1:b><K1:ValueJoiner(A,b)>
        <K1:null><K1:b><K1:null>
        <K1:null>
        + Both input streams (or to be more precise, their underlying source topics) need to have the same number of + partitions.

        +
        +
        Type Parameters:
        +
        VO - the value type of the other KTable
        +
        VR - the value type of the result KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        named - a Named config used to name the processor in the topology
        +
        Returns:
        +
        a KTable that contains join-records for each key and values computed by the given + ValueJoiner, one for each matched record-pair with the same key plus one for each non-matching record of + left KTable
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        leftJoin

        +
        <VO, +VR> KTable<K,VR> leftJoin(KTable<K,VO> other, + ValueJoiner<? super V,? super VO,? extends VR> joiner, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Join records of this KTable (left input) with another KTable's (right input) records using + non-windowed left equi join, with the Materialized instance for configuration of the key serde, + the result table's value serde, and state store. + The join is a primary key join with join attribute thisKTable.key == otherKTable.key. + In contrast to inner-join, all records from left KTable will produce + an output record (cf. below). + The result is an ever updating KTable that represents the current (i.e., processing time) result + of the join. +

        + The join is computed by (1) updating the internal state of one KTable and (2) performing a lookup for a + matching record in the current (i.e., processing time) internal state of the other KTable. + This happens in a symmetric way, i.e., for each update of either this or the other input + KTable the result gets updated. +

        + For each KTable record that finds a corresponding record in the other KTable's state the + provided ValueJoiner will be called to compute a value (with arbitrary type) for the result record. + Additionally, for each record of left KTable that does not find a corresponding record in the + right KTable's state the provided ValueJoiner will be called with rightValue = + null to compute a value (with arbitrary type) for the result record. + The key of the result record is the same as for both joining input records. +

        + Note that records with null values (so-called tombstone records) have delete semantics. + For example, for left input tombstones the provided value-joiner is not called but a tombstone record is + forwarded directly to delete a record in the result KTable if required (i.e., if there is anything to be + deleted). +

        + Input records with null key will be dropped and no join computation is performed. +

        + Example: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        thisKTablethisStateotherKTableotherStateresult updated record
        <K1:A><K1:A><K1:ValueJoiner(A,null)>
        <K1:A><K1:b><K1:b><K1:ValueJoiner(A,b)>
        <K1:null><K1:b><K1:null>
        <K1:null>
        + Both input streams (or to be more precise, their underlying source topics) need to have the same number of + partitions.

        +
        +
        Type Parameters:
        +
        VO - the value type of the other KTable
        +
        VR - the value type of the result KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        materialized - an instance of Materialized used to describe how the state store should be materialized. + Cannot be null
        +
        Returns:
        +
        a KTable that contains join-records for each key and values computed by the given + ValueJoiner, one for each matched record-pair with the same key plus one for each non-matching record of + left KTable
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        leftJoin

        +
        <VO, +VR> KTable<K,VR> leftJoin(KTable<K,VO> other, + ValueJoiner<? super V,? super VO,? extends VR> joiner, + Named named, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Join records of this KTable (left input) with another KTable's (right input) records using + non-windowed left equi join, with the Materialized instance for configuration of the key serde, + the result table's value serde, and state store. + The join is a primary key join with join attribute thisKTable.key == otherKTable.key. + In contrast to inner-join, all records from left KTable will produce + an output record (cf. below). + The result is an ever updating KTable that represents the current (i.e., processing time) result + of the join. +

        + The join is computed by (1) updating the internal state of one KTable and (2) performing a lookup for a + matching record in the current (i.e., processing time) internal state of the other KTable. + This happens in a symmetric way, i.e., for each update of either this or the other input + KTable the result gets updated. +

        + For each KTable record that finds a corresponding record in the other KTable's state the + provided ValueJoiner will be called to compute a value (with arbitrary type) for the result record. + Additionally, for each record of left KTable that does not find a corresponding record in the + right KTable's state the provided ValueJoiner will be called with rightValue = + null to compute a value (with arbitrary type) for the result record. + The key of the result record is the same as for both joining input records. +

        + Note that records with null values (so-called tombstone records) have delete semantics. + For example, for left input tombstones the provided value-joiner is not called but a tombstone record is + forwarded directly to delete a record in the result KTable if required (i.e., if there is anything to be + deleted). +

        + Input records with null key will be dropped and no join computation is performed. +

        + Example: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        thisKTablethisStateotherKTableotherStateresult updated record
        <K1:A><K1:A><K1:ValueJoiner(A,null)>
        <K1:A><K1:b><K1:b><K1:ValueJoiner(A,b)>
        <K1:null><K1:b><K1:null>
        <K1:null>
        + Both input streams (or to be more precise, their underlying source topics) need to have the same number of + partitions.

        +
        +
        Type Parameters:
        +
        VO - the value type of the other KTable
        +
        VR - the value type of the result KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        named - a Named config used to name the processor in the topology
        +
        materialized - an instance of Materialized used to describe how the state store should be materialized. + Cannot be null
        +
        Returns:
        +
        a KTable that contains join-records for each key and values computed by the given + ValueJoiner, one for each matched record-pair with the same key plus one for each non-matching record of + left KTable
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        outerJoin

        +
        <VO, +VR> KTable<K,VR> outerJoin(KTable<K,VO> other, + ValueJoiner<? super V,? super VO,? extends VR> joiner)
        +
        Join records of this KTable (left input) with another KTable's (right input) records using + non-windowed outer equi join, with default serializers, deserializers, and state store. + The join is a primary key join with join attribute thisKTable.key == otherKTable.key. + In contrast to inner-join or left-join, + all records from both input KTables will produce an output record (cf. below). + The result is an ever updating KTable that represents the current (i.e., processing time) result + of the join. +

        + The join is computed by (1) updating the internal state of one KTable and (2) performing a lookup for a + matching record in the current (i.e., processing time) internal state of the other KTable. + This happens in a symmetric way, i.e., for each update of either this or the other input + KTable the result gets updated. +

        + For each KTable record that finds a corresponding record in the other KTable's state the + provided ValueJoiner will be called to compute a value (with arbitrary type) for the result record. + Additionally, for each record that does not find a corresponding record in the corresponding other + KTable's state the provided ValueJoiner will be called with null value for the + corresponding other value to compute a value (with arbitrary type) for the result record. + The key of the result record is the same as for both joining input records. +

        + Note that records with null values (so-called tombstone records) have delete semantics. + Thus, for input tombstones the provided value-joiner is not called but a tombstone record is forwarded directly + to delete a record in the result KTable if required (i.e., if there is anything to be deleted). +

        + Input records with null key will be dropped and no join computation is performed. +

        + Example: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        thisKTablethisStateotherKTableotherStateresult updated record
        <K1:A><K1:A><K1:ValueJoiner(A,null)>
        <K1:A><K1:b><K1:b><K1:ValueJoiner(A,b)>
        <K1:null><K1:b><K1:ValueJoiner(null,b)>
        <K1:null><K1:null>
        + Both input streams (or to be more precise, their underlying source topics) need to have the same number of + partitions.

        +
        +
        Type Parameters:
        +
        VO - the value type of the other KTable
        +
        VR - the value type of the result KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        Returns:
        +
        a KTable that contains join-records for each key and values computed by the given + ValueJoiner, one for each matched record-pair with the same key plus one for each non-matching record of + both KTables
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        outerJoin

        +
        <VO, +VR> KTable<K,VR> outerJoin(KTable<K,VO> other, + ValueJoiner<? super V,? super VO,? extends VR> joiner, + Named named)
        +
        Join records of this KTable (left input) with another KTable's (right input) records using + non-windowed outer equi join, with default serializers, deserializers, and state store. + The join is a primary key join with join attribute thisKTable.key == otherKTable.key. + In contrast to inner-join or left-join, + all records from both input KTables will produce an output record (cf. below). + The result is an ever updating KTable that represents the current (i.e., processing time) result + of the join. +

        + The join is computed by (1) updating the internal state of one KTable and (2) performing a lookup for a + matching record in the current (i.e., processing time) internal state of the other KTable. + This happens in a symmetric way, i.e., for each update of either this or the other input + KTable the result gets updated. +

        + For each KTable record that finds a corresponding record in the other KTable's state the + provided ValueJoiner will be called to compute a value (with arbitrary type) for the result record. + Additionally, for each record that does not find a corresponding record in the corresponding other + KTable's state the provided ValueJoiner will be called with null value for the + corresponding other value to compute a value (with arbitrary type) for the result record. + The key of the result record is the same as for both joining input records. +

        + Note that records with null values (so-called tombstone records) have delete semantics. + Thus, for input tombstones the provided value-joiner is not called but a tombstone record is forwarded directly + to delete a record in the result KTable if required (i.e., if there is anything to be deleted). +

        + Input records with null key will be dropped and no join computation is performed. +

        + Example: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        thisKTablethisStateotherKTableotherStateresult updated record
        <K1:A><K1:A><K1:ValueJoiner(A,null)>
        <K1:A><K1:b><K1:b><K1:ValueJoiner(A,b)>
        <K1:null><K1:b><K1:ValueJoiner(null,b)>
        <K1:null><K1:null>
        + Both input streams (or to be more precise, their underlying source topics) need to have the same number of + partitions.

        +
        +
        Type Parameters:
        +
        VO - the value type of the other KTable
        +
        VR - the value type of the result KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        named - a Named config used to name the processor in the topology
        +
        Returns:
        +
        a KTable that contains join-records for each key and values computed by the given + ValueJoiner, one for each matched record-pair with the same key plus one for each non-matching record of + both KTables
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        outerJoin

        +
        <VO, +VR> KTable<K,VR> outerJoin(KTable<K,VO> other, + ValueJoiner<? super V,? super VO,? extends VR> joiner, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Join records of this KTable (left input) with another KTable's (right input) records using + non-windowed outer equi join, with the Materialized instance for configuration of the key serde, + the result table's value serde, and state store. + The join is a primary key join with join attribute thisKTable.key == otherKTable.key. + In contrast to inner-join or left-join, + all records from both input KTables will produce an output record (cf. below). + The result is an ever updating KTable that represents the current (i.e., processing time) result + of the join. +

        + The join is computed by (1) updating the internal state of one KTable and (2) performing a lookup for a + matching record in the current (i.e., processing time) internal state of the other KTable. + This happens in a symmetric way, i.e., for each update of either this or the other input + KTable the result gets updated. +

        + For each KTable record that finds a corresponding record in the other KTable's state the + provided ValueJoiner will be called to compute a value (with arbitrary type) for the result record. + Additionally, for each record that does not find a corresponding record in the corresponding other + KTable's state the provided ValueJoiner will be called with null value for the + corresponding other value to compute a value (with arbitrary type) for the result record. + The key of the result record is the same as for both joining input records. +

        + Note that records with null values (so-called tombstone records) have delete semantics. + Thus, for input tombstones the provided value-joiner is not called but a tombstone record is forwarded directly + to delete a record in the result KTable if required (i.e., if there is anything to be deleted). +

        + Input records with null key will be dropped and no join computation is performed. +

        + Example: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        thisKTablethisStateotherKTableotherStateresult updated record
        <K1:A><K1:A><K1:ValueJoiner(A,null)>
        <K1:A><K1:b><K1:b><K1:ValueJoiner(A,b)>
        <K1:null><K1:b><K1:ValueJoiner(null,b)>
        <K1:null><K1:null>
        + Both input streams (or to be more precise, their underlying source topics) need to have the same number of + partitions.

        +
        +
        Type Parameters:
        +
        VO - the value type of the other KTable
        +
        VR - the value type of the result KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        materialized - an instance of Materialized used to describe how the state store should be materialized. + Cannot be null
        +
        Returns:
        +
        a KTable that contains join-records for each key and values computed by the given + ValueJoiner, one for each matched record-pair with the same key plus one for each non-matching record of + both KTables
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        outerJoin

        +
        <VO, +VR> KTable<K,VR> outerJoin(KTable<K,VO> other, + ValueJoiner<? super V,? super VO,? extends VR> joiner, + Named named, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Join records of this KTable (left input) with another KTable's (right input) records using + non-windowed outer equi join, with the Materialized instance for configuration of the key serde, + the result table's value serde, and state store. + The join is a primary key join with join attribute thisKTable.key == otherKTable.key. + In contrast to inner-join or left-join, + all records from both input KTables will produce an output record (cf. below). + The result is an ever updating KTable that represents the current (i.e., processing time) result + of the join. +

        + The join is computed by (1) updating the internal state of one KTable and (2) performing a lookup for a + matching record in the current (i.e., processing time) internal state of the other KTable. + This happens in a symmetric way, i.e., for each update of either this or the other input + KTable the result gets updated. +

        + For each KTable record that finds a corresponding record in the other KTable's state the + provided ValueJoiner will be called to compute a value (with arbitrary type) for the result record. + Additionally, for each record that does not find a corresponding record in the corresponding other + KTable's state the provided ValueJoiner will be called with null value for the + corresponding other value to compute a value (with arbitrary type) for the result record. + The key of the result record is the same as for both joining input records. +

        + Note that records with null values (so-called tombstone records) have delete semantics. + Thus, for input tombstones the provided value-joiner is not called but a tombstone record is forwarded directly + to delete a record in the result KTable if required (i.e., if there is anything to be deleted). +

        + Input records with null key will be dropped and no join computation is performed. +

        + Example: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        thisKTablethisStateotherKTableotherStateresult updated record
        <K1:A><K1:A><K1:ValueJoiner(A,null)>
        <K1:A><K1:b><K1:b><K1:ValueJoiner(A,b)>
        <K1:null><K1:b><K1:ValueJoiner(null,b)>
        <K1:null><K1:null>
        + Both input streams (or to be more precise, their underlying source topics) need to have the same number of + partitions.

        +
        +
        Type Parameters:
        +
        VO - the value type of the other KTable
        +
        VR - the value type of the result KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        named - a Named config used to name the processor in the topology
        +
        materialized - an instance of Materialized used to describe how the state store should be materialized. + Cannot be null
        +
        Returns:
        +
        a KTable that contains join-records for each key and values computed by the given + ValueJoiner, one for each matched record-pair with the same key plus one for each non-matching record of + both KTables
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        join

        +
        <VR, +KO, +VO> KTable<K,VR> join(KTable<KO,VO> other, + Function<? super V,? extends KO> foreignKeyExtractor, + ValueJoiner<? super V,? super VO,? extends VR> joiner)
        +
        Join records of this KTable with another KTable using non-windowed inner join. +

        + This is a foreign key join, where the joining key is determined by the foreignKeyExtractor.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        KO - the key type of the other KTable
        +
        VO - the value type of the other KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable. Keyed by KO.
        +
        foreignKeyExtractor - a Function that extracts the key (KO) from this table's value (V). If the + result is null, the update is ignored as invalid.
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        Returns:
        +
        a KTable that contains the result of joining this table with other
        +
        +
        +
      • +
      • +
        +

        join

        +
        <VR, +KO, +VO> KTable<K,VR> join(KTable<KO,VO> other, + BiFunction<? super K,? super V,? extends KO> foreignKeyExtractor, + ValueJoiner<? super V,? super VO,? extends VR> joiner)
        +
        Join records of this KTable with another KTable using non-windowed inner join. +

        + This is a foreign key join, where the joining key is determined by the foreignKeyExtractor.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        KO - the key type of the other KTable
        +
        VO - the value type of the other KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable. Keyed by KO.
        +
        foreignKeyExtractor - a BiFunction that extracts the key (KO) from this table's key and value (K, V). If the + result is null, the update is ignored as invalid.
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        Returns:
        +
        a KTable that contains the result of joining this table with other
        +
        +
        +
      • +
      • +
        +

        join

        +
        <VR, +KO, +VO> KTable<K,VR> join(KTable<KO,VO> other, + Function<? super V,? extends KO> foreignKeyExtractor, + ValueJoiner<? super V,? super VO,? extends VR> joiner, + TableJoined<K,KO> tableJoined)
        +
        Join records of this KTable with another KTable using non-windowed inner join, + using the TableJoined instance for optional configurations including + partitioners when the tables being joined use non-default partitioning, + and also the base name for components of the join. +

        + This is a foreign key join, where the joining key is determined by the foreignKeyExtractor.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        KO - the key type of the other KTable
        +
        VO - the value type of the other KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable. Keyed by KO.
        +
        foreignKeyExtractor - a Function that extracts the key (KO) from this table's value (V). If the + result is null, the update is ignored as invalid.
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        tableJoined - a TableJoined used to configure partitioners and names of internal topics and stores
        +
        Returns:
        +
        a KTable that contains the result of joining this table with other
        +
        +
        +
      • +
      • +
        +

        join

        +
        <VR, +KO, +VO> KTable<K,VR> join(KTable<KO,VO> other, + BiFunction<? super K,? super V,? extends KO> foreignKeyExtractor, + ValueJoiner<? super V,? super VO,? extends VR> joiner, + TableJoined<K,KO> tableJoined)
        +
        Join records of this KTable with another KTable using non-windowed inner join, + using the TableJoined instance for optional configurations including + partitioners when the tables being joined use non-default partitioning, + and also the base name for components of the join. +

        + This is a foreign key join, where the joining key is determined by the foreignKeyExtractor.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        KO - the key type of the other KTable
        +
        VO - the value type of the other KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable. Keyed by KO.
        +
        foreignKeyExtractor - a BiFunction that extracts the key (KO) from this table's key and value (K, V). If the + result is null, the update is ignored as invalid.
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        tableJoined - a TableJoined used to configure partitioners and names of internal topics and stores
        +
        Returns:
        +
        a KTable that contains the result of joining this table with other
        +
        +
        +
      • +
      • +
        +

        join

        +
        <VR, +KO, +VO> KTable<K,VR> join(KTable<KO,VO> other, + Function<? super V,? extends KO> foreignKeyExtractor, + ValueJoiner<? super V,? super VO,? extends VR> joiner, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Join records of this KTable with another KTable using non-windowed inner join. +

        + This is a foreign key join, where the joining key is determined by the foreignKeyExtractor.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        KO - the key type of the other KTable
        +
        VO - the value type of the other KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable. Keyed by KO.
        +
        foreignKeyExtractor - a Function that extracts the key (KO) from this table's value (V). If the + result is null, the update is ignored as invalid.
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        materialized - a Materialized that describes how the StateStore for the resulting KTable + should be materialized. Cannot be null
        +
        Returns:
        +
        a KTable that contains the result of joining this table with other
        +
        +
        +
      • +
      • +
        +

        join

        +
        <VR, +KO, +VO> KTable<K,VR> join(KTable<KO,VO> other, + BiFunction<? super K,? super V,? extends KO> foreignKeyExtractor, + ValueJoiner<? super V,? super VO,? extends VR> joiner, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Join records of this KTable with another KTable using non-windowed inner join. +

        + This is a foreign key join, where the joining key is determined by the foreignKeyExtractor.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        KO - the key type of the other KTable
        +
        VO - the value type of the other KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable. Keyed by KO.
        +
        foreignKeyExtractor - a BiFunction that extracts the key (KO) from this table's key and value (K, V). If the + result is null, the update is ignored as invalid.
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        materialized - a Materialized that describes how the StateStore for the resulting KTable + should be materialized. Cannot be null
        +
        Returns:
        +
        a KTable that contains the result of joining this table with other
        +
        +
        +
      • +
      • +
        +

        join

        +
        <VR, +KO, +VO> KTable<K,VR> join(KTable<KO,VO> other, + Function<? super V,? extends KO> foreignKeyExtractor, + ValueJoiner<? super V,? super VO,? extends VR> joiner, + TableJoined<K,KO> tableJoined, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Join records of this KTable with another KTable using non-windowed inner join, + using the TableJoined instance for optional configurations including + partitioners when the tables being joined use non-default partitioning, + and also the base name for components of the join. +

        + This is a foreign key join, where the joining key is determined by the foreignKeyExtractor.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        KO - the key type of the other KTable
        +
        VO - the value type of the other KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable. Keyed by KO.
        +
        foreignKeyExtractor - a Function that extracts the key (KO) from this table's value (V). If the + result is null, the update is ignored as invalid.
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        tableJoined - a TableJoined used to configure partitioners and names of internal topics and stores
        +
        materialized - a Materialized that describes how the StateStore for the resulting KTable + should be materialized. Cannot be null
        +
        Returns:
        +
        a KTable that contains the result of joining this table with other
        +
        +
        +
      • +
      • +
        +

        join

        +
        <VR, +KO, +VO> KTable<K,VR> join(KTable<KO,VO> other, + BiFunction<? super K,? super V,? extends KO> foreignKeyExtractor, + ValueJoiner<? super V,? super VO,? extends VR> joiner, + TableJoined<K,KO> tableJoined, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Join records of this KTable with another KTable using non-windowed inner join, + using the TableJoined instance for optional configurations including + partitioners when the tables being joined use non-default partitioning, + and also the base name for components of the join. +

        + This is a foreign key join, where the joining key is determined by the foreignKeyExtractor.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        KO - the key type of the other KTable
        +
        VO - the value type of the other KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable. Keyed by KO.
        +
        foreignKeyExtractor - a BiFunction that extracts the key (KO) from this table's key and value (K, V). If the + result is null, the update is ignored as invalid.
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        tableJoined - a TableJoined used to configure partitioners and names of internal topics and stores
        +
        materialized - a Materialized that describes how the StateStore for the resulting KTable + should be materialized. Cannot be null
        +
        Returns:
        +
        a KTable that contains the result of joining this table with other
        +
        +
        +
      • +
      • +
        +

        leftJoin

        +
        <VR, +KO, +VO> KTable<K,VR> leftJoin(KTable<KO,VO> other, + Function<? super V,? extends KO> foreignKeyExtractor, + ValueJoiner<? super V,? super VO,? extends VR> joiner)
        +
        Join records of this KTable with another KTable using non-windowed left join. +

        + This is a foreign key join, where the joining key is determined by the foreignKeyExtractor.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        KO - the key type of the other KTable
        +
        VO - the value type of the other KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable. Keyed by KO.
        +
        foreignKeyExtractor - a Function that extracts the key (KO) from this table's value (V). If the + extract is null, then the right hand side of the result will be null.
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        Returns:
        +
        a KTable that contains only those records that satisfy the given predicate
        +
        +
        +
      • +
      • +
        +

        leftJoin

        +
        <VR, +KO, +VO> KTable<K,VR> leftJoin(KTable<KO,VO> other, + BiFunction<? super K,? super V,? extends KO> foreignKeyExtractor, + ValueJoiner<? super V,? super VO,? extends VR> joiner)
        +
        Join records of this KTable with another KTable using non-windowed left join. +

        + This is a foreign key join, where the joining key is determined by the foreignKeyExtractor.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        KO - the key type of the other KTable
        +
        VO - the value type of the other KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable. Keyed by KO.
        +
        foreignKeyExtractor - a BiFunction that extracts the key (KO) from this table's key and value (K, V). If the + extract is null, then the right hand side of the result will be null.
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        Returns:
        +
        a KTable that contains only those records that satisfy the given predicate
        +
        +
        +
      • +
      • +
        +

        leftJoin

        +
        <VR, +KO, +VO> KTable<K,VR> leftJoin(KTable<KO,VO> other, + Function<? super V,? extends KO> foreignKeyExtractor, + ValueJoiner<? super V,? super VO,? extends VR> joiner, + TableJoined<K,KO> tableJoined)
        +
        Join records of this KTable with another KTable using non-windowed left join, + using the TableJoined instance for optional configurations including + partitioners when the tables being joined use non-default partitioning, + and also the base name for components of the join. +

        + This is a foreign key join, where the joining key is determined by the foreignKeyExtractor.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        KO - the key type of the other KTable
        +
        VO - the value type of the other KTable
        +
        Parameters:
        +
        foreignKeyExtractor - a Function that extracts the key (KO) from this table's value (V). If the + extract is null, then the right hand side of the result will be null.
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        tableJoined - a TableJoined used to configure partitioners and names of internal topics and stores
        +
        Returns:
        +
        a KTable that contains the result of joining this table with other
        +
        +
        +
      • +
      • +
        +

        leftJoin

        +
        <VR, +KO, +VO> KTable<K,VR> leftJoin(KTable<KO,VO> other, + BiFunction<? super K,? super V,? extends KO> foreignKeyExtractor, + ValueJoiner<? super V,? super VO,? extends VR> joiner, + TableJoined<K,KO> tableJoined)
        +
        Join records of this KTable with another KTable using non-windowed left join, + using the TableJoined instance for optional configurations including + partitioners when the tables being joined use non-default partitioning, + and also the base name for components of the join. +

        + This is a foreign key join, where the joining key is determined by the foreignKeyExtractor.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        KO - the key type of the other KTable
        +
        VO - the value type of the other KTable
        +
        Parameters:
        +
        foreignKeyExtractor - a BiFunction that extracts the key (KO) from this table's key and value (K, V). If the + extract is null, then the right hand side of the result will be null.
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        tableJoined - a TableJoined used to configure partitioners and names of internal topics and stores
        +
        Returns:
        +
        a KTable that contains the result of joining this table with other
        +
        +
        +
      • +
      • +
        +

        leftJoin

        +
        <VR, +KO, +VO> KTable<K,VR> leftJoin(KTable<KO,VO> other, + Function<? super V,? extends KO> foreignKeyExtractor, + ValueJoiner<? super V,? super VO,? extends VR> joiner, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Join records of this KTable with another KTable using non-windowed left join. +

        + This is a foreign key join, where the joining key is determined by the foreignKeyExtractor.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        KO - the key type of the other KTable
        +
        VO - the value type of the other KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable. Keyed by KO.
        +
        foreignKeyExtractor - a Function that extracts the key (KO) from this table's value (V). If the + extract is null, then the right hand side of the result will be null.
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        materialized - a Materialized that describes how the StateStore for the resulting KTable + should be materialized. Cannot be null
        +
        Returns:
        +
        a KTable that contains the result of joining this table with other
        +
        +
        +
      • +
      • +
        +

        leftJoin

        +
        <VR, +KO, +VO> KTable<K,VR> leftJoin(KTable<KO,VO> other, + BiFunction<? super K,? super V,? extends KO> foreignKeyExtractor, + ValueJoiner<? super V,? super VO,? extends VR> joiner, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Join records of this KTable with another KTable using non-windowed left join. +

        + This is a foreign key join, where the joining key is determined by the foreignKeyExtractor.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        KO - the key type of the other KTable
        +
        VO - the value type of the other KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable. Keyed by KO.
        +
        foreignKeyExtractor - a BiFunction that extracts the key (KO) from this table's key and value (K, V). If the + extract is null, then the right hand side of the result will be null.
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        materialized - a Materialized that describes how the StateStore for the resulting KTable + should be materialized. Cannot be null
        +
        Returns:
        +
        a KTable that contains the result of joining this table with other
        +
        +
        +
      • +
      • +
        +

        leftJoin

        +
        <VR, +KO, +VO> KTable<K,VR> leftJoin(KTable<KO,VO> other, + Function<? super V,? extends KO> foreignKeyExtractor, + ValueJoiner<? super V,? super VO,? extends VR> joiner, + TableJoined<K,KO> tableJoined, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Join records of this KTable with another KTable using non-windowed left join, + using the TableJoined instance for optional configurations including + partitioners when the tables being joined use non-default partitioning, + and also the base name for components of the join. +

        + This is a foreign key join, where the joining key is determined by the foreignKeyExtractor.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        KO - the key type of the other KTable
        +
        VO - the value type of the other KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable. Keyed by KO.
        +
        foreignKeyExtractor - a Function that extracts the key (KO) from this table's value (V). If the + extract is null, then the right hand side of the result will be null.
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        tableJoined - a TableJoined used to configure partitioners and names of internal topics and stores
        +
        materialized - a Materialized that describes how the StateStore for the resulting KTable + should be materialized. Cannot be null
        +
        Returns:
        +
        a KTable that contains the result of joining this table with other
        +
        +
        +
      • +
      • +
        +

        leftJoin

        +
        <VR, +KO, +VO> KTable<K,VR> leftJoin(KTable<KO,VO> other, + BiFunction<? super K,? super V,? extends KO> foreignKeyExtractor, + ValueJoiner<? super V,? super VO,? extends VR> joiner, + TableJoined<K,KO> tableJoined, + Materialized<K,VR,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Join records of this KTable with another KTable using non-windowed left join, + using the TableJoined instance for optional configurations including + partitioners when the tables being joined use non-default partitioning, + and also the base name for components of the join. +

        + This is a foreign key join, where the joining key is determined by the foreignKeyExtractor.

        +
        +
        Type Parameters:
        +
        VR - the value type of the result KTable
        +
        KO - the key type of the other KTable
        +
        VO - the value type of the other KTable
        +
        Parameters:
        +
        other - the other KTable to be joined with this KTable. Keyed by KO.
        +
        foreignKeyExtractor - a BiFunction that extracts the key (KO) from this table's key and value (K, V). If the + extract is null, then the right hand side of the result will be null.
        +
        joiner - a ValueJoiner that computes the join result for a pair of matching records
        +
        tableJoined - a TableJoined used to configure partitioners and names of internal topics and stores
        +
        materialized - a Materialized that describes how the StateStore for the resulting KTable + should be materialized. Cannot be null
        +
        Returns:
        +
        a KTable that contains the result of joining this table with other
        +
        +
        +
      • +
      • +
        +

        queryableStoreName

        +
        String queryableStoreName()
        +
        Get the name of the local state store used that can be used to query this KTable.
        +
        +
        Returns:
        +
        the underlying state store name, or null if this KTable cannot be queried.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/KeyValueMapper.html b/static/41/javadoc/org/apache/kafka/streams/kstream/KeyValueMapper.html new file mode 100644 index 000000000..c19960eee --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/KeyValueMapper.html @@ -0,0 +1,174 @@ + + + + +KeyValueMapper (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface KeyValueMapper<K,V,VR>

    +
    +
    +
    +
    Type Parameters:
    +
    K - key type
    +
    V - value type
    +
    VR - mapped value type
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface KeyValueMapper<K,V,VR>
    +
    The KeyValueMapper interface for mapping a key-value pair to a new value of arbitrary type. + For example, it can be used to +
      +
    • map from an input KeyValue pair to an output KeyValue pair with different key and/or value type + (for this case output type VR == KeyValue<NewKeyType,NewValueType>)
    • +
    • map from an input record to a new key (with arbitrary key type as specified by VR)
    • +
    + This is a stateless record-by-record operation, i.e, apply(Object, Object) is invoked individually for each + record of a stream (cf. api.Processor for stateful record transformation). + KeyValueMapper is a generalization of ValueMapper.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      apply(K key, + V value)
      +
      +
      Map a record with the given key and value to a new value.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        apply

        +
        VR apply(K key, + V value)
        +
        Map a record with the given key and value to a new value.
        +
        +
        Parameters:
        +
        key - the key of the record
        +
        value - the value of the record
        +
        Returns:
        +
        the new value
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Materialized.StoreType.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Materialized.StoreType.html new file mode 100644 index 000000000..06036061c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Materialized.StoreType.html @@ -0,0 +1,280 @@ + + + + +Materialized.StoreType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class Materialized.StoreType

    +
    +
    java.lang.Object +
    java.lang.Enum<Materialized.StoreType> +
    org.apache.kafka.streams.kstream.Materialized.StoreType
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<Materialized.StoreType>, Constable, Configurable, DslStoreSuppliers
    +
    +
    +
    Enclosing class:
    +
    Materialized<K,V,S extends StateStore>
    +
    +
    +
    public static enum Materialized.StoreType +extends Enum<Materialized.StoreType> +implements DslStoreSuppliers
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Materialized.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Materialized.html new file mode 100644 index 000000000..dab5524ef --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Materialized.html @@ -0,0 +1,549 @@ + + + + +Materialized (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Materialized<K,V,S extends StateStore>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.Materialized<K,V,S>
    +
    +
    +
    +
    Type Parameters:
    +
    K - type of record key
    +
    V - type of record value
    +
    S - type of state store (note: state stores always have key/value types <Bytes,byte[]>
    +
    +
    +
    public class Materialized<K,V,S extends StateStore> +extends Object
    +
    Used to describe how a StateStore should be materialized. + You can either provide a custom StateStore backend through one of the provided methods accepting a supplier + or use the default RocksDB backends by providing just a store name. +

    + For example, you can read a topic as KTable and force a state store materialization to access the content + via Interactive Queries API: +

    
    + StreamsBuilder builder = new StreamsBuilder();
    + KTable<Integer, Integer> table = builder.table(
    +   "topicName",
    +   Materialized.as("queryable-store-name"));
    + 
    +

    + Correct Usage When Providing Serde: +

    + To configure both the name of the store and the Serde for the key and value, + you should use the following pattern: +

    
    + Materialized.<KeyType, ValueType, StateStore>as("MyStoreName")
    +     .withKeySerde(keySerde)
    +     .withValueSerde(valueSerde);
    + 
    + This ensures that the store name is retained while configuring the key and value serde. +

    + Warning: If you use the (static) with(Serde, Serde) method after calling + as(String), the instance created by as(String) will be replaced by a new + Materialized instance, and any configuration set on the first instance (e.g., store name, logging settings) + will be lost. +

    + For example, the following code is incorrect because it discards the configuration of the store name (calling static) methods on an object/instance is an anti-pattern): +

    
    + // This will not work as expected:
    + Materialized.<KeyType, ValueType, StateStore>as("MyStoreName")
    +     .with(keySerde, valueSerde);  // The store name "MyStoreName" is lost
    + 
    + Instead, use the proper pattern of chaining withKeySerde and withValueSerde.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        as

        +
        public static <K, +V, +S extends StateStore> +Materialized<K,V,S> as(DslStoreSuppliers storeSuppliers)
        +
        Materialize a StateStore with the given DslStoreSuppliers.
        +
        +
        Type Parameters:
        +
        K - key type of the store
        +
        V - value type of the store
        +
        S - type of the StateStore
        +
        Parameters:
        +
        storeSuppliers - the type of the state store
        +
        Returns:
        +
        a new Materialized instance with the given storeName
        +
        +
        +
      • +
      • +
        +

        as

        +
        public static <K, +V, +S extends StateStore> +Materialized<K,V,S> as(String storeName)
        +
        Materialize a StateStore with the given name. +

        + This method sets the name of the state store to be used during materialization. You can provide additional + configurations like key and value Serdes using withKeySerde(Serde) and + withValueSerde(Serde). +

        + Warning: Do not use with(Serde, Serde) after calling this method, as it creates a new + Materialized instance, which discards the store name and any other configurations set earlier. +

        + Correct usage: +

        
        + Materialized.<KeyType, ValueType, StateStore>as("MyStoreName")
        +     .withKeySerde(keySerde)
        +     .withValueSerde(valueSerde);
        + 
        +

        + Incorrect usage (store name is lost): +

        
        + Materialized.<KeyType, ValueType, StateStore>as("MyStoreName")
        +     .with(keySerde, valueSerde);  // Store name is lost
        + 
        +
        +
        Type Parameters:
        +
        K - key type of the store
        +
        V - value type of the store
        +
        S - type of the StateStore
        +
        Parameters:
        +
        storeName - the name of the underlying KTable state store; valid characters are ASCII + alphanumerics, '.', '_' and '-'.
        +
        Returns:
        +
        a new Materialized instance with the given storeName
        +
        +
        +
      • +
      • +
        +

        as

        +
        public static <K, +V> +Materialized<K,V,WindowStore<org.apache.kafka.common.utils.Bytes,byte[]>> as(WindowBytesStoreSupplier supplier)
        +
        Materialize a WindowStore using the provided WindowBytesStoreSupplier. + + Important: Custom subclasses are allowed here, but they should respect the retention contract: + Window stores are required to retain windows at least as long as (window size + window grace period). + Stores constructed via Stores already satisfy this contract.
        +
        +
        Type Parameters:
        +
        K - key type of the store
        +
        V - value type of the store
        +
        Parameters:
        +
        supplier - the WindowBytesStoreSupplier used to materialize the store
        +
        Returns:
        +
        a new Materialized instance with the given supplier
        +
        +
        +
      • +
      • +
        +

        as

        +
        public static <K, +V> +Materialized<K,V,SessionStore<org.apache.kafka.common.utils.Bytes,byte[]>> as(SessionBytesStoreSupplier supplier)
        +
        Materialize a SessionStore using the provided SessionBytesStoreSupplier. + + Important: Custom subclasses are allowed here, but they should respect the retention contract: + Session stores are required to retain windows at least as long as (session inactivity gap + session grace period). + Stores constructed via Stores already satisfy this contract.
        +
        +
        Type Parameters:
        +
        K - key type of the store
        +
        V - value type of the store
        +
        Parameters:
        +
        supplier - the SessionBytesStoreSupplier used to materialize the store
        +
        Returns:
        +
        a new Materialized instance with the given supplier
        +
        +
        +
      • +
      • +
        +

        as

        +
        public static <K, +V> +Materialized<K,V,KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>> as(KeyValueBytesStoreSupplier supplier)
        +
        Materialize a KeyValueStore using the provided KeyValueBytesStoreSupplier.
        +
        +
        Type Parameters:
        +
        K - key type of the store
        +
        V - value type of the store
        +
        Parameters:
        +
        supplier - the KeyValueBytesStoreSupplier used to materialize the store
        +
        Returns:
        +
        a new Materialized instance with the given supplier
        +
        +
        +
      • +
      • +
        +

        with

        +
        public static <K, +V, +S extends StateStore> +Materialized<K,V,S> with(Serde<K> keySerde, + Serde<V> valueSerde)
        +
        Materialize a StateStore with the provided key and value Serdes. +

        + Note: If this method is used after as(String), the original Materialized instance will be + replaced with a new instance, and any configuration on the first instance (e.g., store name) will be lost. To + configure both a store name and key/value serde, use withKeySerde(Serde) and + withValueSerde(Serde) instead. +

        + Correct usage with Serde: +

        
        + Materialized.<KeyType, ValueType, StateStore>as("MyStoreName")
        +     .withKeySerde(keySerde)
        +     .withValueSerde(valueSerde);
        + 
        +

        + Incorrect usage (store name will be lost): +

        
        + Materialized.<KeyType, ValueType, StateStore>as("MyStoreName")
        +     .with(keySerde, valueSerde);  // Store name is lost
        + 
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        S - store type
        +
        Parameters:
        +
        keySerde - the key Serde to use. If the Serde is null, then the default key + serde from configs will be used
        +
        valueSerde - the value Serde to use. If the Serde is null, then the default value + serde from configs will be used
        +
        Returns:
        +
        a new Materialized instance with the given key and value serdes
        +
        +
        +
      • +
      • +
        +

        withValueSerde

        +
        public Materialized<K,V,S> withValueSerde(Serde<V> valueSerde)
        +
        Set the valueSerde the materialized StateStore will use.
        +
        +
        Parameters:
        +
        valueSerde - the value Serde to use. If the Serde is null, then the default value + serde from configs will be used. If the serialized bytes is null for put operations, + it is treated as delete operation
        +
        Returns:
        +
        itself
        +
        +
        +
      • +
      • +
        +

        withKeySerde

        +
        public Materialized<K,V,S> withKeySerde(Serde<K> keySerde)
        +
        Set the keySerde the materialized StateStore will use.
        +
        +
        Parameters:
        +
        keySerde - the key Serde to use. If the Serde is null, then the default key + serde from configs will be used
        +
        Returns:
        +
        itself
        +
        +
        +
      • +
      • +
        +

        withLoggingEnabled

        +
        public Materialized<K,V,S> withLoggingEnabled(Map<String,String> config)
        +
        Indicates that a changelog should be created for the store. The changelog will be created + with the provided configs. +

        + Note: Any unrecognized configs will be ignored.

        +
        +
        Parameters:
        +
        config - any configs that should be applied to the changelog
        +
        Returns:
        +
        itself
        +
        +
        +
      • +
      • +
        +

        withLoggingDisabled

        +
        public Materialized<K,V,S> withLoggingDisabled()
        +
        Disable change logging for the materialized StateStore.
        +
        +
        Returns:
        +
        itself
        +
        +
        +
      • +
      • +
        +

        withCachingEnabled

        +
        public Materialized<K,V,S> withCachingEnabled()
        +
        Enable caching for the materialized StateStore.
        +
        +
        Returns:
        +
        itself
        +
        +
        +
      • +
      • +
        +

        withCachingDisabled

        +
        public Materialized<K,V,S> withCachingDisabled()
        +
        Disable caching for the materialized StateStore.
        +
        +
        Returns:
        +
        itself
        +
        +
        +
      • +
      • +
        +

        withRetention

        +
        public Materialized<K,V,S> withRetention(Duration retention) + throws IllegalArgumentException
        +
        Configure retention period for window and session stores. Ignored for key/value stores. + + Overridden by pre-configured store suppliers + (as(SessionBytesStoreSupplier) or as(WindowBytesStoreSupplier)). + + Note that the retention period must be at least long enough to contain the windowed data's entire life cycle, + from window-start through window-end, and for the entire grace period. If not specified, the retention + period would be set as the window length (from window-start through window-end) plus the grace period.
        +
        +
        Parameters:
        +
        retention - the retention time
        +
        Returns:
        +
        itself
        +
        Throws:
        +
        IllegalArgumentException - if retention is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        withStoreType

        +
        public Materialized<K,V,S> withStoreType(DslStoreSuppliers storeSuppliers) + throws IllegalArgumentException
        +
        Set the type of the materialized StateStore.
        +
        +
        Parameters:
        +
        storeSuppliers - the store type Materialized.StoreType to use.
        +
        Returns:
        +
        itself
        +
        Throws:
        +
        IllegalArgumentException - if store supplier is also pre-configured
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Merger.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Merger.html new file mode 100644 index 000000000..81593141d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Merger.html @@ -0,0 +1,150 @@ + + + + +Merger (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Merger<K,V>

    +
    +
    +
    +
    Type Parameters:
    +
    K - key type
    +
    V - aggregate value type
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface Merger<K,V>
    +
    The interface for merging aggregate values for SessionWindows with the given key.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      apply(K aggKey, + V aggOne, + V aggTwo)
      +
      +
      Compute a new aggregate from the key and two aggregates.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        apply

        +
        V apply(K aggKey, + V aggOne, + V aggTwo)
        +
        Compute a new aggregate from the key and two aggregates.
        +
        +
        Parameters:
        +
        aggKey - the key of the record
        +
        aggOne - the first aggregate
        +
        aggTwo - the second aggregate
        +
        Returns:
        +
        the new aggregate value
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Named.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Named.html new file mode 100644 index 000000000..540eaa0a2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Named.html @@ -0,0 +1,151 @@ + + + + +Named (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Named

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.Named
    +
    +
    +
    +
    public class Named +extends Object
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        as

        +
        public static Named as(String name)
        +
        Create a Named instance with provided name.
        +
        +
        Parameters:
        +
        name - the processor name to be used. If null a default processor name will be generated.
        +
        Returns:
        +
        A new Named instance configured with name
        +
        Throws:
        +
        TopologyException - if an invalid name is specified; valid characters are ASCII alphanumerics, '.', '_' and '-'.
        +
        +
        +
      • +
      • +
        +

        withName

        +
        public Named withName(String name)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Predicate.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Predicate.html new file mode 100644 index 000000000..c410c59e0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Predicate.html @@ -0,0 +1,161 @@ + + + + +Predicate (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Predicate<K,V>

    +
    +
    +
    +
    Type Parameters:
    +
    K - key type
    +
    V - value type
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface Predicate<K,V>
    +
    The Predicate interface represents a predicate (boolean-valued function) of a KeyValue pair. + This is a stateless record-by-record operation, i.e, test(Object, Object) is invoked individually for each + record of a stream.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      boolean
      +
      test(K key, + V value)
      +
      +
      Test if the record with the given key and value satisfies the predicate.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        test

        +
        boolean test(K key, + V value)
        +
        Test if the record with the given key and value satisfies the predicate.
        +
        +
        Parameters:
        +
        key - the key of the record
        +
        value - the value of the record
        +
        Returns:
        +
        true if the KeyValue pair satisfies the predicate—false otherwise
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Printed.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Printed.html new file mode 100644 index 000000000..8e85ac378 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Printed.html @@ -0,0 +1,248 @@ + + + + +Printed (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Printed<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.Printed<K,V>
    +
    +
    +
    +
    Type Parameters:
    +
    K - key type
    +
    V - value type
    +
    +
    +
    public class Printed<K,V> +extends Object
    +
    An object to define the options used when printing a KStream.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        toFile

        +
        public static <K, +V> Printed<K,V> toFile(String filePath)
        +
        Print the records of a KStream to a file.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        filePath - path of the file
        +
        Returns:
        +
        a new Printed instance
        +
        +
        +
      • +
      • +
        +

        toSysOut

        +
        public static <K, +V> Printed<K,V> toSysOut()
        +
        Print the records of a KStream to system out.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Returns:
        +
        a new Printed instance
        +
        +
        +
      • +
      • +
        +

        withLabel

        +
        public Printed<K,V> withLabel(String label)
        +
        Print the records of a KStream with the provided label.
        +
        +
        Parameters:
        +
        label - label to use
        +
        Returns:
        +
        this
        +
        +
        +
      • +
      • +
        +

        withKeyValueMapper

        +
        public Printed<K,V> withKeyValueMapper(KeyValueMapper<? super K,? super V,String> mapper)
        +
        Print the records of a KStream with the provided KeyValueMapper + The provided KeyValueMapper's mapped value type must be String. +

        + The example below shows how to customize output data. +

        
        + final KeyValueMapper<Integer, String, String> mapper = new KeyValueMapper<Integer, String, String>() {
        +     public String apply(Integer key, String value) {
        +         return String.format("(%d, %s)", key, value);
        +     }
        + };
        + 
        + + Implementors will need to override toString() for keys and values that are not of type String, + Integer etc. to get meaningful information.
        +
        +
        Parameters:
        +
        mapper - mapper to use
        +
        Returns:
        +
        this
        +
        +
        +
      • +
      • +
        +

        withName

        +
        public Printed<K,V> withName(String processorName)
        +
        Print the records of a KStream with provided processor name.
        +
        +
        Parameters:
        +
        processorName - the processor name to be used. If null a default processor name will be generated
        +
        Returns:
        +
        this
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Produced.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Produced.html new file mode 100644 index 000000000..5cbb1547a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Produced.html @@ -0,0 +1,402 @@ + + + + +Produced (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Produced<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.Produced<K,V>
    +
    +
    +
    +
    Type Parameters:
    +
    K - key type
    +
    V - value type
    +
    +
    +
    public class Produced<K,V> +extends Object
    +
    This class is used to provide the optional parameters when producing to new topics + using KStream.to(String, Produced).
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        with

        +
        public static <K, +V> Produced<K,V> with(Serde<K> keySerde, + Serde<V> valueSerde)
        +
        Create a Produced instance with provided keySerde and valueSerde.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        keySerde - Serde to use for serializing the key
        +
        valueSerde - Serde to use for serializing the value
        +
        Returns:
        +
        A new Produced instance configured with keySerde and valueSerde
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        with

        +
        public static <K, +V> Produced<K,V> with(Serde<K> keySerde, + Serde<V> valueSerde, + StreamPartitioner<? super K,? super V> partitioner)
        +
        Create a Produced instance with provided keySerde, valueSerde, and partitioner.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        keySerde - Serde to use for serializing the key
        +
        valueSerde - Serde to use for serializing the value
        +
        partitioner - the function used to determine how records are distributed among partitions of the topic, + if not specified and keySerde provides a WindowedSerializer for the key + WindowedStreamPartitioner will be used—otherwise DefaultStreamPartitioner + will be used
        +
        Returns:
        +
        A new Produced instance configured with keySerde, valueSerde, and partitioner
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        as

        +
        public static <K, +V> Produced<K,V> as(String processorName)
        +
        Create an instance of Produced with provided processor name.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        processorName - the processor name to be used. If null a default processor name will be generated
        +
        Returns:
        +
        a new instance of Produced
        +
        +
        +
      • +
      • +
        +

        keySerde

        +
        public static <K, +V> Produced<K,V> keySerde(Serde<K> keySerde)
        +
        Create a Produced instance with provided keySerde.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        keySerde - Serde to use for serializing the key
        +
        Returns:
        +
        A new Produced instance configured with keySerde
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        valueSerde

        +
        public static <K, +V> Produced<K,V> valueSerde(Serde<V> valueSerde)
        +
        Create a Produced instance with provided valueSerde.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        valueSerde - Serde to use for serializing the key
        +
        Returns:
        +
        A new Produced instance configured with valueSerde
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        streamPartitioner

        +
        public static <K, +V> Produced<K,V> streamPartitioner(StreamPartitioner<? super K,? super V> partitioner)
        +
        Create a Produced instance with provided partitioner.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        partitioner - the function used to determine how records are distributed among partitions of the topic, + if not specified and the key serde provides a WindowedSerializer for the key + WindowedStreamPartitioner will be used—otherwise DefaultStreamPartitioner will be used
        +
        Returns:
        +
        A new Produced instance configured with partitioner
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        withStreamPartitioner

        +
        public Produced<K,V> withStreamPartitioner(StreamPartitioner<? super K,? super V> partitioner)
        +
        Produce records using the provided partitioner.
        +
        +
        Parameters:
        +
        partitioner - the function used to determine how records are distributed among partitions of the topic, + if not specified and the key serde provides a WindowedSerializer for the key + WindowedStreamPartitioner will be used—otherwise DefaultStreamPartitioner will be used
        +
        Returns:
        +
        this
        +
        +
        +
      • +
      • +
        +

        withValueSerde

        +
        public Produced<K,V> withValueSerde(Serde<V> valueSerde)
        +
        Produce records using the provided valueSerde.
        +
        +
        Parameters:
        +
        valueSerde - Serde to use for serializing the value
        +
        Returns:
        +
        this
        +
        +
        +
      • +
      • +
        +

        withKeySerde

        +
        public Produced<K,V> withKeySerde(Serde<K> keySerde)
        +
        Produce records using the provided keySerde.
        +
        +
        Parameters:
        +
        keySerde - Serde to use for serializing the key
        +
        Returns:
        +
        this
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        withName

        +
        public Produced<K,V> withName(String name)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Reducer.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Reducer.html new file mode 100644 index 000000000..694a63f99 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Reducer.html @@ -0,0 +1,166 @@ + + + + +Reducer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Reducer<V>

    +
    +
    +
    +
    Type Parameters:
    +
    V - value type
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface Reducer<V>
    +
    The Reducer interface for combining two values of the same type into a new value. + In contrast to Aggregator the result type must be the same as the input type. +

    + The provided values can be either original values from input KeyValue pair records or be a previously + computed result from apply(Object, Object). +

    + Reducer can be used to implement aggregation functions like sum, min, or max.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      apply(V value1, + V value2)
      +
      +
      Aggregate the two given values into a single one.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        apply

        +
        V apply(V value1, + V value2)
        +
        Aggregate the two given values into a single one.
        +
        +
        Parameters:
        +
        value1 - the first value for the aggregation
        +
        value2 - the second value for the aggregation
        +
        Returns:
        +
        the aggregated value
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Repartitioned.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Repartitioned.html new file mode 100644 index 000000000..984d085e2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Repartitioned.html @@ -0,0 +1,350 @@ + + + + +Repartitioned (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Repartitioned<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.Repartitioned<K,V>
    +
    +
    +
    +
    Type Parameters:
    +
    K - key type
    +
    V - value type
    +
    +
    +
    public class Repartitioned<K,V> +extends Object
    +
    This class is used to provide the optional parameters for internal repartition topics.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        as

        +
        public static <K, +V> Repartitioned<K,V> as(String name)
        +
        Create a Repartitioned instance with the provided name used as part of the repartition topic.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        name - the name used as a processor named and part of the repartition topic name.
        +
        Returns:
        +
        A new Repartitioned instance configured with processor name and repartition topic name
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        with

        +
        public static <K, +V> Repartitioned<K,V> with(Serde<K> keySerde, + Serde<V> valueSerde)
        +
        Create a Repartitioned instance with provided key serde and value serde.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        keySerde - Serde to use for serializing the key
        +
        valueSerde - Serde to use for serializing the value
        +
        Returns:
        +
        A new Repartitioned instance configured with key serde and value serde
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        streamPartitioner

        +
        public static <K, +V> Repartitioned<K,V> streamPartitioner(StreamPartitioner<K,V> partitioner)
        +
        Create a Repartitioned instance with provided partitioner.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        partitioner - the function used to determine how records are distributed among partitions of the topic, + if not specified and the key serde provides a WindowedSerializer for the key + WindowedStreamPartitioner will be used—otherwise DefaultStreamPartitioner will be used
        +
        Returns:
        +
        A new Repartitioned instance configured with partitioner
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        numberOfPartitions

        +
        public static <K, +V> Repartitioned<K,V> numberOfPartitions(int numberOfPartitions)
        +
        Create a Repartitioned instance with provided number of partitions for repartition topic.
        +
        +
        Type Parameters:
        +
        K - key type
        +
        V - value type
        +
        Parameters:
        +
        numberOfPartitions - number of partitions used when creating repartition topic
        +
        Returns:
        +
        A new Repartitioned instance configured number of partitions
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        withName

        +
        public Repartitioned<K,V> withName(String name)
        +
        Create a new instance of Repartitioned with the provided name used as part of repartition topic and processor name.
        +
        +
        Parameters:
        +
        name - the name used for the processor name and as part of the repartition topic
        +
        Returns:
        +
        a new Repartitioned instance configured with the name
        +
        +
        +
      • +
      • +
        +

        withNumberOfPartitions

        +
        public Repartitioned<K,V> withNumberOfPartitions(int numberOfPartitions)
        +
        Create a new instance of Repartitioned with the provided number of partitions for repartition topic.
        +
        +
        Parameters:
        +
        numberOfPartitions - the name used for the processor name and as part of the repartition topic name
        +
        Returns:
        +
        a new Repartitioned instance configured with the number of partitions
        +
        +
        +
      • +
      • +
        +

        withKeySerde

        +
        public Repartitioned<K,V> withKeySerde(Serde<K> keySerde)
        +
        Create a new instance of Repartitioned with the provided key serde.
        +
        +
        Parameters:
        +
        keySerde - Serde to use for serializing the key
        +
        Returns:
        +
        a new Repartitioned instance configured with the key serde
        +
        +
        +
      • +
      • +
        +

        withValueSerde

        +
        public Repartitioned<K,V> withValueSerde(Serde<V> valueSerde)
        +
        Create a new instance of Repartitioned with the provided value serde.
        +
        +
        Parameters:
        +
        valueSerde - Serde to use for serializing the value
        +
        Returns:
        +
        a new Repartitioned instance configured with the value serde
        +
        +
        +
      • +
      • +
        +

        withStreamPartitioner

        +
        public Repartitioned<K,V> withStreamPartitioner(StreamPartitioner<K,V> partitioner)
        +
        Create a new instance of Repartitioned with the provided partitioner.
        +
        +
        Parameters:
        +
        partitioner - the function used to determine how records are distributed among partitions of the topic, + if not specified and the key serde provides a WindowedSerializer for the key + WindowedStreamPartitioner will be used—otherwise DefaultStreamPartitioner will be used
        +
        Returns:
        +
        a new Repartitioned instance configured with provided partitioner
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/SessionWindowedCogroupedKStream.html b/static/41/javadoc/org/apache/kafka/streams/kstream/SessionWindowedCogroupedKStream.html new file mode 100644 index 000000000..acca9a9e4 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/SessionWindowedCogroupedKStream.html @@ -0,0 +1,376 @@ + + + + +SessionWindowedCogroupedKStream (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface SessionWindowedCogroupedKStream<K,V>

    +
    +
    +
    +
    public interface SessionWindowedCogroupedKStream<K,V>
    +
    Same as a SessionWindowedKStream, however, for multiple co-grouped KStreams.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        aggregate

        +
        KTable<Windowed<K>,V> aggregate(Initializer<V> initializer, + Merger<? super K,V> sessionMerger)
        +
        Aggregate the values of records in these streams by the grouped key and defined sessions. + Note that sessions are generated on a per-key basis and records with different keys create independent sessions. + Records with null key or value are ignored. + The result is written into a local SessionStore (which is basically an ever-updating materialized view). + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied directly before the first input record per session is processed to + provide an initial intermediate aggregation result that is used to process the first record per session. + The specified Aggregator (as specified in KGroupedStream.cogroup(Aggregator) or + CogroupedKStream.cogroup(KGroupedStream, Aggregator)) is applied for each input record and computes a new + aggregate using the current aggregate (or for the very first record using the intermediate aggregation result + provided via the Initializer) and the record's value. + The specified Merger is used to merge two existing sessions into one, i.e., when the windows overlap, + they are merged into a single session and the old sessions are discarded. + Thus, aggregate() can be used to compute aggregate functions like count or sum etc. +

        + The default key and value serde from the config will be used for serializing the result. + If a different serde is required then you should use aggregate(Initializer, Merger, Materialized). +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same window and key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result. Cannot be null.
        +
        sessionMerger - a Merger that combines two aggregation results. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key per session
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        KTable<Windowed<K>,V> aggregate(Initializer<V> initializer, + Merger<? super K,V> sessionMerger, + Named named)
        +
        Aggregate the values of records in these streams by the grouped key and defined sessions. + Note that sessions are generated on a per-key basis and records with different keys create independent sessions. + Records with null key or value are ignored. + The result is written into a local SessionStore (which is basically an ever-updating materialized view). + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied directly before the first input record per session is processed to + provide an initial intermediate aggregation result that is used to process the first record per session. + The specified Aggregator (as specified in KGroupedStream.cogroup(Aggregator) or + CogroupedKStream.cogroup(KGroupedStream, Aggregator)) is applied for each input record and computes a new + aggregate using the current aggregate (or for the very first record using the intermediate aggregation result + provided via the Initializer) and the record's value. + The specified Merger is used to merge two existing sessions into one, i.e., when the windows overlap, + they are merged into a single session and the old sessions are discarded. + Thus, aggregate() can be used to compute aggregate functions like count or sum etc. +

        + The default key and value serde from the config will be used for serializing the result. + If a different serde is required then you should use + aggregate(Initializer, Merger, Named, Materialized). +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same window and key. + The rate of propagated updates depends on your input data rate, the number of distinct + keys, the number of parallel running Kafka Streams instances, and the configuration + parameters for cache size, and + commit interval. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result. Cannot be null.
        +
        sessionMerger - a Merger that combines two aggregation results. Cannot be null.
        +
        named - a Named config used to name the processor in the topology. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key per session
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        KTable<Windowed<K>,V> aggregate(Initializer<V> initializer, + Merger<? super K,V> sessionMerger, + Materialized<K,V,SessionStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Aggregate the values of records in these streams by the grouped key and defined sessions. + Records with null key or value are ignored. + The result is written into a local SessionStore (which is basically an ever-updating materialized view) + that can be queried using the store name as provided with Materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied directly before the first input record (per key) in each window is + processed to provide an initial intermediate aggregation result that is used to process the first record for + the session (per key). + The specified Aggregator (as specified in KGroupedStream.cogroup(Aggregator) or + CogroupedKStream.cogroup(KGroupedStream, Aggregator)) is applied for each input record and computes a new + aggregate using the current aggregate (or for the very first record using the intermediate aggregation result + provided via the Initializer) and the record's value. + The specified Merger is used to merge two existing sessions into one, i.e., when the windows overlap, + they are merged into a single session and the old sessions are discarded. + Thus, aggregate() can be used to compute aggregate functions like count or sum etc. +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same window and key if caching is enabled on the Materialized instance. + When caching is enabled the rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + To query the local SessionStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // counting words
        + Store queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlySessionStore<String, Long>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.sessionStore());
        + ReadOnlySessionStore<String,Long> localWindowStore = streams.store(storeQueryParams);
        +
        + String key = "some-word";
        + long fromTime = ...;
        + long toTime = ...;
        + WindowStoreIterator<Long> aggregateStore = localWindowStore.fetch(key, timeFrom, timeTo); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and + cannot contain characters other than ASCII alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the + provide store name defined in Materialized, and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result. Cannot be null.
        +
        sessionMerger - a Merger that combines two aggregation results. Cannot be null.
        +
        materialized - a Materialized config used to materialize a state store. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key within a window
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        KTable<Windowed<K>,V> aggregate(Initializer<V> initializer, + Merger<? super K,V> sessionMerger, + Named named, + Materialized<K,V,SessionStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Aggregate the values of records in these streams by the grouped key and defined sessions. + Records with null key or value are ignored. + The result is written into a local SessionStore (which is basically an ever-updating materialized view) + that can be queried using the store name as provided with Materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied directly before the first input record (per key) in each window is + processed to provide an initial intermediate aggregation result that is used to process the first record for + the session (per key). + The specified Aggregator (as specified in KGroupedStream.cogroup(Aggregator) or + CogroupedKStream.cogroup(KGroupedStream, Aggregator)) is applied for each input record and computes a new + aggregate using the current aggregate (or for the very first record using the intermediate aggregation result + provided via the Initializer) and the record's value. + The specified Merger is used to merge two existing sessions into one, i.e., when the windows overlap, + they are merged into a single session and the old sessions are discarded. + Thus, aggregate() can be used to compute aggregate functions like count or sum etc. +

        + Not all updates might get sent downstream, as an internal cache will be used to deduplicate consecutive updates + to the same window and key if caching is enabled on the Materialized instance. + When caching is enabled the rate of propagated updates depends on your input data rate, the number of distinct + keys, the number of parallel running Kafka Streams instances, and the configuration + parameters for cache size, and + commit interval. +

        + To query the local SessionStore it must be obtained via + KafkaStreams.store(StoreQueryParameters) KafkaStreams#store(...)}: +

        
        + KafkaStreams streams = ... // some windowed aggregation on value type double
        + String queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlySessionStore<String, Long>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.sessionStore());
        + ReadOnlySessionStore<String,Long> localWindowStore = streams.store(storeQueryParams);
        + String key = "some-key";
        + KeyValueIterator<Windowed<String>, Long> aggForKeyForSession = localWindowStore.fetch(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and + cannot contain characters other than ASCII alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the + provide store name defined in Materialized, and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result. Cannot be null.
        +
        sessionMerger - a Merger that combines two aggregation results. Cannot be null.
        +
        named - a Named config used to name the processor in the topology. Cannot be null.
        +
        materialized - a Materialized config used to materialize a state store. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key per session
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/SessionWindowedDeserializer.html b/static/41/javadoc/org/apache/kafka/streams/kstream/SessionWindowedDeserializer.html new file mode 100644 index 000000000..176556b39 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/SessionWindowedDeserializer.html @@ -0,0 +1,276 @@ + + + + +SessionWindowedDeserializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SessionWindowedDeserializer<T>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.SessionWindowedDeserializer<T>
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Deserializer<Windowed<T>>
    +
    +
    +
    public class SessionWindowedDeserializer<T> +extends Object +implements Deserializer<Windowed<T>>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        WINDOWED_INNER_DESERIALIZER_CLASS

        +
        public static final String WINDOWED_INNER_DESERIALIZER_CLASS
        +
        Default deserializer for the inner deserializer class of a windowed record. Must implement the Serde interface.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SessionWindowedDeserializer

        +
        public SessionWindowedDeserializer()
        +
        +
      • +
      • +
        +

        SessionWindowedDeserializer

        +
        public SessionWindowedDeserializer(Deserializer<T> inner)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs, + boolean isKey)
        +
        Description copied from interface: Deserializer
        +
        Configure this class.
        +
        +
        Specified by:
        +
        configure in interface Deserializer<T>
        +
        Parameters:
        +
        configs - configs in key/value pairs
        +
        isKey - whether the deserializer is used for the key or the value
        +
        +
        +
      • +
      • +
        +

        deserialize

        +
        public Windowed<T> deserialize(String topic, + byte[] data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a byte array into a value or object. + +

        It is recommended to deserialize a null byte array to a null object.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<T>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        data - serialized bytes; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close()
        +
        Description copied from interface: Deserializer
        +
        Close this deserializer. + +

        This method must be idempotent as it may be called multiple times.

        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        Specified by:
        +
        close in interface Deserializer<T>
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/SessionWindowedKStream.html b/static/41/javadoc/org/apache/kafka/streams/kstream/SessionWindowedKStream.html new file mode 100644 index 000000000..d5a41da9a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/SessionWindowedKStream.html @@ -0,0 +1,906 @@ + + + + +SessionWindowedKStream (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface SessionWindowedKStream<K,V>

    +
    +
    +
    +
    Type Parameters:
    +
    K - the key type of this session-windowed stream
    +
    V - the value type of this session-windowed stream
    +
    +
    +
    public interface SessionWindowedKStream<K,V>
    +
    SessionWindowedKStream is an abstraction of a windowed record stream of key-value pairs. + It is an intermediate representation of a KStream, that is aggregated into a windowed KTable + (a windowed KTable is a KTable with key type Windowed). + +

    A SessionWindowedKStream represents a session window type. + +

    The result is written into a local SessionStore (which is basically an ever-updating + materialized view) that can be queried using the name provided in the Materialized instance. + Furthermore, updates to the store are sent downstream into a windowed KTable changelog stream, where + "windowed" implies that the KTable key is a combined key of the original record key and a window ID. + New events are added to SessionWindows until their grace period ends + (see SessionWindows.ofInactivityGapAndGrace(Duration, Duration)). + +

    A SessionWindowedKStream is obtained from a KStream by grouping and + windowing.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        count

        +
        KTable<Windowed<K>,Long> count()
        +
        Count the number of records in this stream by the grouped key and defined sessions. + Note that sessions are generated on a per-key basis and records with different keys create independent sessions. + Records with null key or value are ignored. +

        + The result is written into a local SessionStore (which is basically an ever-updating materialized view). + The default key serde from the config will be used for serializing the result. + If a different serde is required then you should use count(Materialized). + Furthermore, updates to the store are sent downstream into a KTable changelog stream. + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same session and key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys and Long values + that represent the latest (rolling) count (i.e., number of records) for each key per session
        +
        +
        +
      • +
      • +
        +

        count

        +
        KTable<Windowed<K>,Long> count(Named named)
        +
        Count the number of records in this stream by the grouped key and defined sessions. + Note that sessions are generated on a per-key basis and records with different keys create independent sessions. + Records with null key or value are ignored. +

        + The result is written into a local SessionStore (which is basically an ever-updating materialized view). + The default key serde from the config will be used for serializing the result. + If a different serde is required then you should use count(Named, Materialized). + Furthermore, updates to the store are sent downstream into a KTable changelog stream. + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same session and key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        named - a Named config used to name the processor in the topology. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys and Long values + that represent the latest (rolling) count (i.e., number of records) for each key per session
        +
        +
        +
      • +
      • +
        +

        count

        +
        KTable<Windowed<K>,Long> count(Materialized<K,Long,SessionStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Count the number of records in this stream by the grouped key and defined sessions. + Note that sessions are generated on a per-key basis and records with different keys create independent sessions. + Records with null key or value are ignored. +

        + The result is written into a local SessionStore (which is basically an ever-updating materialized view) + that can be queried using the name provided with Materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + Not all updates might get sent downstream, as an internal cache will be used to deduplicate consecutive updates + to the same window and key if caching is enabled on the Materialized instance. + When caching is enabled the rate of propagated updates depends on your input data rate, the number of distinct + keys, the number of parallel running Kafka Streams instances, and the configuration + parameters for cache size, and + commit interval. +

        + To query the local SessionStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // compute sum
        + String queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlySessionStore<String, Long>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.sessionStore());
        + ReadOnlySessionStore<String,Long> sessionStore = streams.store(storeQueryParams);
        + String key = "some-key";
        + KeyValueIterator<Windowed<String>, Long> sumForKeyForWindows = sessionStore.fetch(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot + contain characters other than ASCII alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the provide store name defined + in Materialized, and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        materialized - an instance of Materialized used to materialize a state store. Cannot be null. + Note: the valueSerde will be automatically set to Serdes#Long() + if there is no valueSerde provided
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys and Long values + that represent the latest (rolling) count (i.e., number of records) for each key per session
        +
        +
        +
      • +
      • +
        +

        count

        +
        KTable<Windowed<K>,Long> count(Named named, + Materialized<K,Long,SessionStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Count the number of records in this stream by the grouped key and defined sessions. + Note that sessions are generated on a per-key basis and records with different keys create independent sessions. + Records with null key or value are ignored. +

        + The result is written into a local SessionStore (which is basically an ever-updating materialized view) + that can be queried using the name provided with Materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + Not all updates might get sent downstream, as an internal cache will be used to deduplicate consecutive updates + to the same window and key if caching is enabled on the Materialized instance. + When caching is enabled the rate of propagated updates depends on your input data rate, the number of distinct + keys, the number of parallel running Kafka Streams instances, and the configuration + parameters for cache size, and + commit interval. +

        + To query the local SessionStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // compute sum
        + String queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlySessionStore<String, Long>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.sessionStore());
        + ReadOnlySessionStore<String,Long> sessionStore = streams.store(storeQueryParams);
        + String key = "some-key";
        + KeyValueIterator<Windowed<String>, Long> sumForKeyForWindows = sessionStore.fetch(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot + contain characters other than ASCII alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the provide store name defined + in Materialized, and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        named - a Named config used to name the processor in the topology. Cannot be null.
        +
        materialized - an instance of Materialized used to materialize a state store. Cannot be null. + Note: the valueSerde will be automatically set to Serdes#Long() + if there is no valueSerde provided
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys and Long values + that represent the latest (rolling) count (i.e., number of records) for each key per session
        +
        +
        +
      • +
      • +
        +

        reduce

        +
        KTable<Windowed<K>,V> reduce(Reducer<V> reducer)
        +
        Combine the values of records in this stream by the grouped key and defined sessions. + Note that sessions are generated on a per-key basis and records with different keys create independent sessions. + Records with null key or value are ignored. + Combining implies that the type of the aggregate result is the same as the type of the input value + (cf. aggregate(Initializer, Aggregator, Merger)). + The result is written into a local SessionStore (which is basically an ever-updating materialized view). + Furthermore, updates to the store are sent downstream into a KTable changelog stream. + The default key and value serde from the config will be used for serializing the result. + If a different serde is required then you should use reduce(Reducer, Materialized) . +

        + The value of the first record per session initialized the session result. + The specified Reducer is applied for each additional input record per session and computes a new + aggregate using the current aggregate (first argument) and the record's value (second argument): +

        
        + // At the example of a Reducer<Long>
        + new Reducer<Long>() {
        +   public Long apply(Long aggValue, Long currValue) {
        +     return aggValue + currValue;
        +   }
        + }
        + 
        + Thus, reduce() can be used to compute aggregate functions like sum, min, or max. +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same window and key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        reducer - a Reducer that computes a new aggregate result. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key per session
        +
        +
        +
      • +
      • +
        +

        reduce

        +
        KTable<Windowed<K>,V> reduce(Reducer<V> reducer, + Named named)
        +
        Combine the values of records in this stream by the grouped key and defined sessions. + Note that sessions are generated on a per-key basis and records with different keys create independent sessions. + Records with null key or value are ignored. + Combining implies that the type of the aggregate result is the same as the type of the input value + (cf. aggregate(Initializer, Aggregator, Merger)). + The result is written into a local SessionStore (which is basically an ever-updating materialized view). + Furthermore, updates to the store are sent downstream into a KTable changelog stream. + The default key and value serde from the config will be used for serializing the result. + If a different serde is required then you should use reduce(Reducer, Named, Materialized) . +

        + The value of the first record per session initialized the session result. + The specified Reducer is applied for each additional input record per session and computes a new + aggregate using the current aggregate (first argument) and the record's value (second argument): +

        
        + // At the example of a Reducer<Long>
        + new Reducer<Long>() {
        +   public Long apply(Long aggValue, Long currValue) {
        +     return aggValue + currValue;
        +   }
        + }
        + 
        + Thus, reduce() can be used to compute aggregate functions like sum, min, or max. +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same window and key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        reducer - a Reducer that computes a new aggregate result. Cannot be null.
        +
        named - a Named config used to name the processor in the topology. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key per session
        +
        +
        +
      • +
      • +
        +

        reduce

        +
        KTable<Windowed<K>,V> reduce(Reducer<V> reducer, + Materialized<K,V,SessionStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Combine the values of records in this stream by the grouped key and defined sessions. + Note that sessions are generated on a per-key basis and records with different keys create independent sessions. + Records with null key or value are ignored. + Combining implies that the type of the aggregate result is the same as the type of the input value + (cf. aggregate(Initializer, Aggregator, Merger)). + The result is written into a local SessionStore (which is basically an ever-updating materialized view) + that can be queried using the store name as provided with Materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The value of the first record per session initialized the session result. + The specified Reducer is applied for each additional input record per session and computes a new + aggregate using the current aggregate (first argument) and the record's value (second argument): +

        
        + // At the example of a Reducer<Long>
        + new Reducer<Long>() {
        +   public Long apply(Long aggValue, Long currValue) {
        +     return aggValue + currValue;
        +   }
        + }
        + 
        + Thus, reduce() can be used to compute aggregate functions like sum, min, or max. +

        + Not all updates might get sent downstream, as an internal cache will be used to deduplicate consecutive updates + to the same window and key if caching is enabled on the Materialized instance. + When caching is enabled the rate of propagated updates depends on your input data rate, the number of distinct + keys, the number of parallel running Kafka Streams instances, and the configuration + parameters for cache size, and + commit interval. +

        + To query the local SessionStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // compute sum
        + String queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlySessionStore<String, Long>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.sessionStore());
        + ReadOnlySessionStore<String,Long> sessionStore = streams.store(storeQueryParams);
        + String key = "some-key";
        + KeyValueIterator<Windowed<String>, Long> sumForKeyForWindows = sessionStore.fetch(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot + contain characters other than ASCII alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the provide store name defined + in Materialized, and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        reducer - a Reducer that computes a new aggregate result. Cannot be null.
        +
        materialized - a Materialized config used to materialize a state store. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key per session
        +
        +
        +
      • +
      • +
        +

        reduce

        +
        KTable<Windowed<K>,V> reduce(Reducer<V> reducer, + Named named, + Materialized<K,V,SessionStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Combine the values of records in this stream by the grouped key and defined sessions. + Note that sessions are generated on a per-key basis and records with different keys create independent sessions. + Records with null key or value are ignored. + Combining implies that the type of the aggregate result is the same as the type of the input value + (cf. aggregate(Initializer, Aggregator, Merger)). + The result is written into a local SessionStore (which is basically an ever-updating materialized view) + that can be queried using the store name as provided with Materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The value of the first record per session initialized the session result. + The specified Reducer is applied for each additional input record per session and computes a new + aggregate using the current aggregate (first argument) and the record's value (second argument): +

        
        + // At the example of a Reducer<Long>
        + new Reducer<Long>() {
        +   public Long apply(Long aggValue, Long currValue) {
        +     return aggValue + currValue;
        +   }
        + }
        + 
        + Thus, reduce() can be used to compute aggregate functions like sum, min, or max. +

        + Not all updates might get sent downstream, as an internal cache will be used to deduplicate consecutive updates + to the same window and key if caching is enabled on the Materialized instance. + When caching is enabled the rate of propagated updates depends on your input data rate, the number of distinct + keys, the number of parallel running Kafka Streams instances, and the configuration + parameters for cache size, and + commit interval. +

        + To query the local SessionStore it must be obtained via + KafkaStreams.store(StoreQueryParameters) KafkaStreams#store(...)}: +

        
        + KafkaStreams streams = ... // compute sum
        + String queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlySessionStore<String, Long>> storeQueryParams = StoreQueryParameters.fromNameAndType(QueryableStoreTypes.sessionStore());
        + ReadOnlySessionStore<String,Long> sessionStore = streams.store(storeQueryParams);
        + String key = "some-key";
        + KeyValueIterator<Windowed<String>, Long> sumForKeyForWindows = sessionStore.fetch(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot + contain characters other than ASCII alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the provide store name defined + in Materialized, and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        reducer - a Reducer that computes a new aggregate result. Cannot be null.
        +
        named - a Named config used to name the processor in the topology. Cannot be null.
        +
        materialized - a Materialized config used to materialize a state store. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key per session
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        <VOut> KTable<Windowed<K>,VOut> aggregate(Initializer<VOut> initializer, + Aggregator<? super K,? super V,VOut> aggregator, + Merger<? super K,VOut> sessionMerger)
        +
        Aggregate the values of records in this stream by the grouped key and defined sessions. + Note that sessions are generated on a per-key basis and records with different keys create independent sessions. + Records with null key or value are ignored. + Aggregating is a generalization of combining via reduce(...) as it, for example, + allows the result to have a different type than the input values. + The result is written into a local SessionStore (which is basically an ever-updating materialized view). + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied directly before the first input record per session is processed to + provide an initial intermediate aggregation result that is used to process the first record per session. + The specified Aggregator is applied for each input record and computes a new aggregate using the current + aggregate (or for the very first record using the intermediate aggregation result provided via the + Initializer) and the record's value. + The specified Merger is used to merge two existing sessions into one, i.e., when the windows overlap, + they are merged into a single session and the old sessions are discarded. + Thus, aggregate() can be used to compute aggregate functions like count (cf. count()). +

        + The default key and value serde from the config will be used for serializing the result. + If a different serde is required then you should use + aggregate(Initializer, Aggregator, Merger, Materialized). +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same window and key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Type Parameters:
        +
        VOut - the value type of the resulting KTable
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result. Cannot be null.
        +
        aggregator - an Aggregator that computes a new aggregate result. Cannot be null.
        +
        sessionMerger - a Merger that combines two aggregation results. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key per session
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        <VOut> KTable<Windowed<K>,VOut> aggregate(Initializer<VOut> initializer, + Aggregator<? super K,? super V,VOut> aggregator, + Merger<? super K,VOut> sessionMerger, + Named named)
        +
        Aggregate the values of records in this stream by the grouped key and defined sessions. + Note that sessions are generated on a per-key basis and records with different keys create independent sessions. + Records with null key or value are ignored. + Aggregating is a generalization of combining via reduce(...) as it, for example, + allows the result to have a different type than the input values. + The result is written into a local SessionStore (which is basically an ever-updating materialized view). + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied directly before the first input record per session is processed to + provide an initial intermediate aggregation result that is used to process the first record per session. + The specified Aggregator is applied for each input record and computes a new aggregate using the current + aggregate (or for the very first record using the intermediate aggregation result provided via the + Initializer) and the record's value. + The specified Merger is used to merge two existing sessions into one, i.e., when the windows overlap, + they are merged into a single session and the old sessions are discarded. + Thus, aggregate() can be used to compute aggregate functions like count (cf. count()). +

        + The default key and value serde from the config will be used for serializing the result. + If a different serde is required then you should use + aggregate(Initializer, Aggregator, Merger, Named, Materialized). +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same window and key. + The rate of propagated updates depends on your input data rate, the number of distinct + keys, the number of parallel running Kafka Streams instances, and the configuration + parameters for cache size, and + commit interval. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Type Parameters:
        +
        VOut - the value type of the resulting KTable
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result. Cannot be null.
        +
        aggregator - an Aggregator that computes a new aggregate result. Cannot be null.
        +
        sessionMerger - a Merger that combines two aggregation results. Cannot be null.
        +
        named - a Named config used to name the processor in the topology. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key per session
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        <VOut> KTable<Windowed<K>,VOut> aggregate(Initializer<VOut> initializer, + Aggregator<? super K,? super V,VOut> aggregator, + Merger<? super K,VOut> sessionMerger, + Materialized<K,VOut,SessionStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Aggregate the values of records in this stream by the grouped key and defined sessions. + Note that sessions are generated on a per-key basis and records with different keys create independent sessions. + Records with null key or value are ignored. + Aggregating is a generalization of combining via reduce(...) as it, for example, + allows the result to have a different type than the input values. + The result is written into a local SessionStore (which is basically an ever-updating materialized view) + that can be queried using the store name as provided with Materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied directly before the first input record per session is processed to + provide an initial intermediate aggregation result that is used to process the first record per session. + The specified Aggregator is applied for each input record and computes a new aggregate using the current + aggregate (or for the very first record using the intermediate aggregation result provided via the + Initializer) and the record's value. + The specified Merger is used to merge two existing sessions into one, i.e., when the windows overlap, + they are merged into a single session and the old sessions are discarded. + Thus, aggregate() can be used to compute aggregate functions like count (cf. count()). +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same window and key if caching is enabled on the Materialized instance. + When caching is enabled the rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + To query the local SessionStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // some windowed aggregation on value type double
        + String queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlySessionStore<String, Long>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.sessionStore());
        + ReadOnlySessionStore<String,Long> sessionStore = streams.store(storeQueryParams);
        + String key = "some-key";
        + KeyValueIterator<Windowed<String>, Long> aggForKeyForSession = sessionStore.fetch(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and + cannot contain characters other than ASCII alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the + provide store name defined in Materialized, and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Type Parameters:
        +
        VOut - the value type of the resulting KTable
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result. Cannot be null.
        +
        aggregator - an Aggregator that computes a new aggregate result. Cannot be null.
        +
        sessionMerger - a Merger that combines two aggregation results. Cannot be null.
        +
        materialized - a Materialized config used to materialize a state store. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key per session
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        <VOut> KTable<Windowed<K>,VOut> aggregate(Initializer<VOut> initializer, + Aggregator<? super K,? super V,VOut> aggregator, + Merger<? super K,VOut> sessionMerger, + Named named, + Materialized<K,VOut,SessionStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Aggregate the values of records in this stream by the grouped key and defined sessions. + Note that sessions are generated on a per-key basis and records with different keys create independent sessions. + Records with null key or value are ignored. + Aggregating is a generalization of combining via reduce(...) as it, for example, + allows the result to have a different type than the input values. + The result is written into a local SessionStore (which is basically an ever-updating materialized view) + that can be queried using the store name as provided with Materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied directly before the first input record per session is processed to + provide an initial intermediate aggregation result that is used to process the first record per session. + The specified Aggregator is applied for each input record and computes a new aggregate using the current + aggregate (or for the very first record using the intermediate aggregation result provided via the + Initializer) and the record's value. + The specified Merger is used to merge two existing sessions into one, i.e., when the windows overlap, + they are merged into a single session and the old sessions are discarded. + Thus, aggregate() can be used to compute aggregate functions like count (cf. count()). +

        + Not all updates might get sent downstream, as an internal cache will be used to deduplicate consecutive updates + to the same window and key if caching is enabled on the Materialized instance. + When caching is enabled the rate of propagated updates depends on your input data rate, the number of distinct + keys, the number of parallel running Kafka Streams instances, and the configuration + parameters for cache size, and + commit interval. +

        + To query the local SessionStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // some windowed aggregation on value type double
        + String queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlySessionStore<String, Long>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.sessionStore());
        + ReadOnlySessionStore<String,Long> sessionStore = streams.store(storeQueryParams);
        + String key = "some-key";
        + KeyValueIterator<Windowed<String>, Long> aggForKeyForSession = sessionStore.fetch(key); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and + cannot contain characters other than ASCII alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the + provide store name defined in Materialized, and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Type Parameters:
        +
        VOut - the value type of the resulting KTable
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result. Cannot be null.
        +
        aggregator - an Aggregator that computes a new aggregate result. Cannot be null.
        +
        sessionMerger - a Merger that combines two aggregation results. Cannot be null.
        +
        named - a Named config used to name the processor in the topology. Cannot be null.
        +
        materialized - a Materialized config used to materialize a state store. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key per session
        +
        +
        +
      • +
      • +
        +

        emitStrategy

        +
        SessionWindowedKStream<K,V> emitStrategy(EmitStrategy emitStrategy)
        +
        Configure when the aggregated result will be emitted for SessionWindowedKStream. +

        + For example, for EmitStrategy.onWindowClose() strategy, the aggregated result for a + window will only be emitted when the window closes. For EmitStrategy.onWindowUpdate() + strategy, the aggregated result for a window will be emitted whenever there is an update to + the window. Note that whether the result will be available in downstream also depends on + cache policy.

        +
        +
        Parameters:
        +
        emitStrategy - EmitStrategy to configure when the aggregated result for a window will be emitted.
        +
        Returns:
        +
        a SessionWindowedKStream with EmitStrategy configured.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/SessionWindowedSerializer.html b/static/41/javadoc/org/apache/kafka/streams/kstream/SessionWindowedSerializer.html new file mode 100644 index 000000000..9ac30c9bf --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/SessionWindowedSerializer.html @@ -0,0 +1,291 @@ + + + + +SessionWindowedSerializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SessionWindowedSerializer<T>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.SessionWindowedSerializer<T>
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serializer<Windowed<T>>, org.apache.kafka.streams.kstream.internals.WindowedSerializer<T>
    +
    +
    +
    public class SessionWindowedSerializer<T> +extends Object +implements org.apache.kafka.streams.kstream.internals.WindowedSerializer<T>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        WINDOWED_INNER_SERIALIZER_CLASS

        +
        public static final String WINDOWED_INNER_SERIALIZER_CLASS
        +
        Default serializer for the inner serializer class of a windowed record. Must implement the Serde interface.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SessionWindowedSerializer

        +
        public SessionWindowedSerializer()
        +
        +
      • +
      • +
        +

        SessionWindowedSerializer

        +
        public SessionWindowedSerializer(Serializer<T> inner)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs, + boolean isKey)
        +
        Description copied from interface: Serializer
        +
        Configure this class.
        +
        +
        Specified by:
        +
        configure in interface Serializer<T>
        +
        Parameters:
        +
        configs - configs in key/value pairs
        +
        isKey - whether the serializer is used for the key or the value
        +
        +
        +
      • +
      • +
        +

        serialize

        +
        public byte[] serialize(String topic, + Windowed<T> data)
        +
        Description copied from interface: Serializer
        +
        Convert data into a byte array. + +

        It is recommended to serialize null data to the null byte array.

        +
        +
        Specified by:
        +
        serialize in interface Serializer<T>
        +
        Parameters:
        +
        topic - topic associated with data
        +
        data - typed data; may be null
        +
        Returns:
        +
        serialized bytes; may be null
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close()
        +
        Description copied from interface: Serializer
        +
        Close this serializer. + +

        This method must be idempotent as it may be called multiple times.

        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        Specified by:
        +
        close in interface Serializer<T>
        +
        +
        +
      • +
      • +
        +

        serializeBaseKey

        +
        public byte[] serializeBaseKey(String topic, + Windowed<T> data)
        +
        +
        Specified by:
        +
        serializeBaseKey in interface org.apache.kafka.streams.kstream.internals.WindowedSerializer<T>
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/SessionWindows.html b/static/41/javadoc/org/apache/kafka/streams/kstream/SessionWindows.html new file mode 100644 index 000000000..9725b1a06 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/SessionWindows.html @@ -0,0 +1,292 @@ + + + + +SessionWindows (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SessionWindows

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.SessionWindows
    +
    +
    +
    +
    public final class SessionWindows +extends Object
    +
    A session based window specification used for aggregating events into sessions. +

    + Sessions represent a period of activity separated by a defined gap of inactivity. + Any events processed that fall within the inactivity gap of any existing sessions are merged into the existing sessions. + If the event falls outside the session gap then a new session will be created. +

    + For example, if we have a session gap of 5 and the following data arrives: +

    + +--------------------------------------+
    + |    key    |    value    |    time    |
    + +-----------+-------------+------------+
    + |    A      |     1       |     10     |
    + +-----------+-------------+------------+
    + |    A      |     2       |     12     |
    + +-----------+-------------+------------+
    + |    A      |     3       |     20     |
    + +-----------+-------------+------------+
    + 
    + We'd have 2 sessions for key A. + One starting from time 10 and ending at time 12 and another starting and ending at time 20. + The length of the session is driven by the timestamps of the data within the session. + Thus, session windows are no fixed-size windows (cf. TimeWindows and JoinWindows). +

    + If we then received another record: +

    + +--------------------------------------+
    + |    key    |    value    |    time    |
    + +-----------+-------------+------------+
    + |    A      |     4       |     16     |
    + +-----------+-------------+------------+
    + 
    + The previous 2 sessions would be merged into a single session with start time 10 and end time 20. + The aggregate value for this session would be the result of aggregating all 4 values. +

    + For time semantics, see TimestampExtractor.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        ofInactivityGapWithNoGrace

        +
        public static SessionWindows ofInactivityGapWithNoGrace(Duration inactivityGap)
        +
        Creates a new window specification with the specified inactivity gap. +

        + Note that new events may change the boundaries of session windows, so aggressive + close times can lead to surprising results in which an out-of-order event is rejected and then + a subsequent event moves the window boundary forward. +

        + CAUTION: Using this method implicitly sets the grace period to zero, which means that any out-of-order + records arriving after the window ends are considered late and will be dropped.

        +
        +
        Parameters:
        +
        inactivityGap - the gap of inactivity between sessions
        +
        Returns:
        +
        a window definition with the window size and no grace period. Note that this means out-of-order records arriving after the window end will be dropped
        +
        Throws:
        +
        IllegalArgumentException - if inactivityGap is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        ofInactivityGapAndGrace

        +
        public static SessionWindows ofInactivityGapAndGrace(Duration inactivityGap, + Duration afterWindowEnd)
        +
        Creates a new window specification with the specified inactivity gap. +

        + Note that new events may change the boundaries of session windows, so aggressive + close times can lead to surprising results in which an out-of-order event is rejected and then + a subsequent event moves the window boundary forward. +

        + Using this method explicitly sets the grace period to the duration specified by afterWindowEnd, which + means that only out-of-order records arriving more than the grace period after the window end will be dropped. + The window close, after which any incoming records are considered late and will be rejected, is defined as + windowEnd + afterWindowEnd

        +
        +
        Parameters:
        +
        inactivityGap - the gap of inactivity between sessions
        +
        afterWindowEnd - The grace period to admit out-of-order events to a window.
        +
        Returns:
        +
        A SessionWindows object with the specified inactivity gap and grace period
        +
        Throws:
        +
        IllegalArgumentException - if inactivityGap or afterWindowEnd is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        gracePeriodMs

        +
        public long gracePeriodMs()
        +
        +
      • +
      • +
        +

        inactivityGap

        +
        public long inactivityGap()
        +
        Return the specified gap for the session windows in milliseconds.
        +
        +
        Returns:
        +
        the inactivity gap of the specified windows
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/SlidingWindows.html b/static/41/javadoc/org/apache/kafka/streams/kstream/SlidingWindows.html new file mode 100644 index 000000000..f076f862f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/SlidingWindows.html @@ -0,0 +1,281 @@ + + + + +SlidingWindows (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class SlidingWindows

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.SlidingWindows
    +
    +
    +
    +
    public final class SlidingWindows +extends Object
    +
    A sliding window used for aggregating events. +

    + Sliding Windows are defined based on a record's timestamp, the window size based on the given maximum time difference (inclusive) between + records in the same window, and the given window grace period. While the window is sliding over the input data stream, a new window is + created each time a record enters the sliding window or a record drops out of the sliding window. +

    + Records that come after set grace period will be ignored, i.e., a window is closed when + stream-time > window-end + grace-period. +

    + For example, if we have a time difference of 5000ms and the following data arrives: +

    + +--------------------------------------+
    + |    key    |    value    |    time    |
    + +-----------+-------------+------------+
    + |    A      |     1       |    8000    |
    + +-----------+-------------+------------+
    + |    A      |     2       |    9200    |
    + +-----------+-------------+------------+
    + |    A      |     3       |    12400   |
    + +-----------+-------------+------------+
    + 
    + We'd have the following 5 windows: +
      +
    • window [3000;8000] contains [1] (created when first record enters the window)
    • +
    • window [4200;9200] contains [1,2] (created when second record enters the window)
    • +
    • window [7400;12400] contains [1,2,3] (created when third record enters the window)
    • +
    • window [8001;13001] contains [2,3] (created when the first record drops out of the window)
    • +
    • window [9201;14201] contains [3] (created when the second record drops out of the window)
    • +
    +

    + Note that while SlidingWindows are of a fixed size, as are TimeWindows, the start and end points of the window + depend on when events occur in the stream (i.e., event timestamps), similar to SessionWindows. +

    + For time semantics, see TimestampExtractor.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        ofTimeDifferenceWithNoGrace

        +
        public static SlidingWindows ofTimeDifferenceWithNoGrace(Duration timeDifference) + throws IllegalArgumentException
        +
        Return a window definition with the window size based on the given maximum time difference (inclusive) between + records in the same window and given window grace period. Reject out-of-order events that arrive after grace. + A window is closed when stream-time > window-end + grace-period. +

        + CAUTION: Using this method implicitly sets the grace period to zero, which means that any out-of-order + records arriving after the window ends are considered late and will be dropped.

        +
        +
        Parameters:
        +
        timeDifference - the max time difference (inclusive) between two records in a window
        +
        Returns:
        +
        a new window definition with no grace period. Note that this means out-of-order records arriving after the window end will be dropped
        +
        Throws:
        +
        IllegalArgumentException - if the timeDifference is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        ofTimeDifferenceAndGrace

        +
        public static SlidingWindows ofTimeDifferenceAndGrace(Duration timeDifference, + Duration afterWindowEnd) + throws IllegalArgumentException
        +
        Return a window definition with the window size based on the given maximum time difference (inclusive) between + records in the same window and given window grace period. Reject out-of-order events that arrive after afterWindowEnd. + A window is closed when stream-time > window-end + grace-period.
        +
        +
        Parameters:
        +
        timeDifference - the max time difference (inclusive) between two records in a window
        +
        afterWindowEnd - the grace period to admit out-of-order events to a window
        +
        Returns:
        +
        a new window definition with the specified grace period
        +
        Throws:
        +
        IllegalArgumentException - if the timeDifference or afterWindowEnd (grace period) is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        timeDifferenceMs

        +
        public long timeDifferenceMs()
        +
        +
      • +
      • +
        +

        gracePeriodMs

        +
        public long gracePeriodMs()
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/StreamJoined.html b/static/41/javadoc/org/apache/kafka/streams/kstream/StreamJoined.html new file mode 100644 index 000000000..420dec2dc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/StreamJoined.html @@ -0,0 +1,461 @@ + + + + +StreamJoined (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StreamJoined<K,V1,V2>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.StreamJoined<K,V1,V2>
    +
    +
    +
    +
    Type Parameters:
    +
    K - the key type
    +
    V1 - this value type
    +
    V2 - other value type
    +
    +
    +
    public class StreamJoined<K,V1,V2> +extends Object
    +
    Class used to configure the name of the join processor, the repartition topic name, + state stores or state store names in Stream-Stream join.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        with

        +
        public static <K, +V1, +V2> StreamJoined<K,V1,V2> with(WindowBytesStoreSupplier storeSupplier, + WindowBytesStoreSupplier otherStoreSupplier)
        +
        Creates a StreamJoined instance with the provided store suppliers. The store suppliers must implement + the WindowBytesStoreSupplier interface. The store suppliers must provide unique names or a + StreamsException is thrown.
        +
        +
        Type Parameters:
        +
        K - the key type
        +
        V1 - this value type
        +
        V2 - other value type
        +
        Parameters:
        +
        storeSupplier - this store supplier
        +
        otherStoreSupplier - other store supplier
        +
        Returns:
        +
        StreamJoined instance
        +
        +
        +
      • +
      • +
        +

        with

        +
        public static <K, +V1, +V2> StreamJoined<K,V1,V2> with(DslStoreSuppliers storeSuppliers)
        +
        Creates a StreamJoined instance with the given DslStoreSuppliers. The store plugin + will be used to get all the state stores in this operation that do not otherwise have an + explicitly configured DslStoreSuppliers.
        +
        +
        Type Parameters:
        +
        K - the key type
        +
        V1 - this value type
        +
        V2 - other value type
        +
        Parameters:
        +
        storeSuppliers - the store plugin that will be used for state stores
        +
        Returns:
        +
        StreamJoined instance
        +
        +
        +
      • +
      • +
        +

        as

        +
        public static <K, +V1, +V2> StreamJoined<K,V1,V2> as(String storeName)
        +
        Creates a StreamJoined instance using the provided name for the state stores and hence the changelog + topics for the join stores. The name for the stores will be ${applicationId}-<storeName>-this-join and ${applicationId}-<storeName>-other-join + or ${applicationId}-<storeName>-outer-this-join and ${applicationId}-<storeName>-outer-other-join depending if the join is an inner-join + or an outer join. The changelog topics will have the -changelog suffix. The user should note that even though the join stores will have a + specified name, the stores will remain unavailable for querying. + + Please note that if you are using StreamJoined to replace deprecated KStream.join(org.apache.kafka.streams.kstream.KStream<K, VRight>, org.apache.kafka.streams.kstream.ValueJoiner<? super V, ? super VRight, ? extends VOut>, org.apache.kafka.streams.kstream.JoinWindows) functions with + Joined parameters in order to set the name for the join processors, you would need to create the StreamJoined + object first and then call withName(java.lang.String)
        +
        +
        Type Parameters:
        +
        K - The key type
        +
        V1 - This value type
        +
        V2 - Other value type
        +
        Parameters:
        +
        storeName - The name to use for the store
        +
        Returns:
        +
        StreamJoined instance
        +
        +
        +
      • +
      • +
        +

        with

        +
        public static <K, +V1, +V2> StreamJoined<K,V1,V2> with(Serde<K> keySerde, + Serde<V1> valueSerde, + Serde<V2> otherValueSerde)
        +
        Creates a StreamJoined instance with the provided serdes to configure the stores + for the join.
        +
        +
        Type Parameters:
        +
        K - The key type
        +
        V1 - This value type
        +
        V2 - Other value type
        +
        Parameters:
        +
        keySerde - The key serde
        +
        valueSerde - This value serde
        +
        otherValueSerde - Other value serde
        +
        Returns:
        +
        StreamJoined instance
        +
        +
        +
      • +
      • +
        +

        withName

        +
        public StreamJoined<K,V1,V2> withName(String name)
        +
        Set the name to use for the join processor and the repartition topic(s) if required.
        +
        +
        Parameters:
        +
        name - the name to use
        +
        Returns:
        +
        a new StreamJoined instance
        +
        +
        +
      • +
      • +
        +

        withStoreName

        +
        public StreamJoined<K,V1,V2> withStoreName(String storeName)
        +
        Sets the base store name to use for both sides of the join. The name for the state stores and hence the changelog + topics for the join stores. The name for the stores will be ${applicationId}-<storeName>-this-join and ${applicationId}-<storeName>-other-join + or ${applicationId}-<storeName>-outer-this-join and ${applicationId}-<storeName>-outer-other-join depending if the join is an inner-join + or an outer join. The changelog topics will have the -changelog suffix. The user should note that even though the join stores will have a + specified name, the stores will remain unavailable for querying.
        +
        +
        Parameters:
        +
        storeName - the storeName to use
        +
        Returns:
        +
        a new StreamJoined instance
        +
        +
        +
      • +
      • +
        +

        withKeySerde

        +
        public StreamJoined<K,V1,V2> withKeySerde(Serde<K> keySerde)
        +
        Configure with the provided Serde for the key
        +
        +
        Parameters:
        +
        keySerde - the serde to use for the key
        +
        Returns:
        +
        a new StreamJoined configured with the keySerde
        +
        +
        +
      • +
      • +
        +

        withValueSerde

        +
        public StreamJoined<K,V1,V2> withValueSerde(Serde<V1> valueSerde)
        +
        Configure with the provided Serde for this value
        +
        +
        Parameters:
        +
        valueSerde - the serde to use for this value (calling or left side of the join)
        +
        Returns:
        +
        a new StreamJoined configured with the valueSerde
        +
        +
        +
      • +
      • +
        +

        withOtherValueSerde

        +
        public StreamJoined<K,V1,V2> withOtherValueSerde(Serde<V2> otherValueSerde)
        +
        Configure with the provided Serde for the other value
        +
        +
        Parameters:
        +
        otherValueSerde - the serde to use for the other value (other or right side of the join)
        +
        Returns:
        +
        a new StreamJoined configured with the otherValueSerde
        +
        +
        +
      • +
      • +
        +

        withDslStoreSuppliers

        +
        public StreamJoined<K,V1,V2> withDslStoreSuppliers(DslStoreSuppliers dslStoreSuppliers)
        +
        Configure with the provided DslStoreSuppliers for store suppliers that are not provided.
        +
        +
        Parameters:
        +
        dslStoreSuppliers - the default store suppliers to use for this StreamJoined
        +
        Returns:
        +
        a new StreamJoined configured with dslStoreSuppliers
        +
        +
        +
      • +
      • +
        +

        withThisStoreSupplier

        +
        public StreamJoined<K,V1,V2> withThisStoreSupplier(WindowBytesStoreSupplier thisStoreSupplier)
        +
        Configure with the provided WindowBytesStoreSupplier for this store supplier. Please note + this method only provides the store supplier for the left side of the join. If you wish to also provide a + store supplier for the right (i.e., other) side you must use the withOtherStoreSupplier(WindowBytesStoreSupplier) + method
        +
        +
        Parameters:
        +
        thisStoreSupplier - the store supplier to use for this store supplier (calling or left side of the join)
        +
        Returns:
        +
        a new StreamJoined configured with thisStoreSupplier
        +
        +
        +
      • +
      • +
        +

        withOtherStoreSupplier

        +
        public StreamJoined<K,V1,V2> withOtherStoreSupplier(WindowBytesStoreSupplier otherStoreSupplier)
        +
        Configure with the provided WindowBytesStoreSupplier for the other store supplier. Please note + this method only provides the store supplier for the right side of the join. If you wish to also provide a + store supplier for the left side you must use the withThisStoreSupplier(WindowBytesStoreSupplier) + method
        +
        +
        Parameters:
        +
        otherStoreSupplier - the store supplier to use for the other store supplier (other or right side of the join)
        +
        Returns:
        +
        a new StreamJoined configured with otherStoreSupplier
        +
        +
        +
      • +
      • +
        +

        withLoggingEnabled

        +
        public StreamJoined<K,V1,V2> withLoggingEnabled(Map<String,String> config)
        +
        Configures logging for both state stores. The changelog will be created with the provided configs. +

        + Note: Any unrecognized configs will be ignored

        +
        +
        Parameters:
        +
        config - configs applied to the changelog topic
        +
        Returns:
        +
        a new StreamJoined configured with logging enabled
        +
        +
        +
      • +
      • +
        +

        withLoggingDisabled

        +
        public StreamJoined<K,V1,V2> withLoggingDisabled()
        +
        Disable change logging for both state stores.
        +
        +
        Returns:
        +
        a new StreamJoined configured with logging disabled
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Suppressed.BufferConfig.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Suppressed.BufferConfig.html new file mode 100644 index 000000000..3ed507a28 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Suppressed.BufferConfig.html @@ -0,0 +1,289 @@ + + + + +Suppressed.BufferConfig (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Suppressed.BufferConfig<BC extends Suppressed.BufferConfig<BC>>

    +
    +
    +
    +
    All Known Subinterfaces:
    +
    Suppressed.EagerBufferConfig, Suppressed.StrictBufferConfig
    +
    +
    +
    Enclosing interface:
    +
    Suppressed<K>
    +
    +
    +
    public static interface Suppressed.BufferConfig<BC extends Suppressed.BufferConfig<BC>>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        maxRecords

        +
        static Suppressed.EagerBufferConfig maxRecords(long recordLimit)
        +
        Create a size-constrained buffer in terms of the maximum number of keys it will store.
        +
        +
      • +
      • +
        +

        withMaxRecords

        +
        BC withMaxRecords(long recordLimit)
        +
        Set a size constraint on the buffer in terms of the maximum number of keys it will store.
        +
        +
      • +
      • +
        +

        maxBytes

        +
        static Suppressed.EagerBufferConfig maxBytes(long byteLimit)
        +
        Create a size-constrained buffer in terms of the maximum number of bytes it will use.
        +
        +
      • +
      • +
        +

        withMaxBytes

        +
        BC withMaxBytes(long byteLimit)
        +
        Set a size constraint on the buffer, the maximum number of bytes it will use.
        +
        +
      • +
      • +
        +

        unbounded

        +
        static Suppressed.StrictBufferConfig unbounded()
        +
        Create a buffer unconstrained by size (either keys or bytes). + +

        As a result, the buffer will consume as much memory as it needs, dictated by the time bound. + +

        If there isn't enough heap available to meet the demand, the application will encounter an + OutOfMemoryError and shut down (not guaranteed to be a graceful exit). Also, note that + JVM processes under extreme memory pressure may exhibit poor GC behavior. + +

        This is a convenient option if you doubt that your buffer will be that large, but also don't + wish to pick particular constraints, such as in testing. + +

        This buffer is "strict" in the sense that it will enforce the time bound or crash. + It will never emit early.

        +
        +
      • +
      • +
        +

        withNoBound

        + +
        Set the buffer to be unconstrained by size (either keys or bytes). + +

        As a result, the buffer will consume as much memory as it needs, dictated by the time bound. + +

        If there isn't enough heap available to meet the demand, the application will encounter an + OutOfMemoryError and shut down (not guaranteed to be a graceful exit). Also, note that + JVM processes under extreme memory pressure may exhibit poor GC behavior. + +

        This is a convenient option if you doubt that your buffer will be that large, but also don't + wish to pick particular constraints, such as in testing. + +

        This buffer is "strict" in the sense that it will enforce the time bound or crash. + It will never emit early.

        +
        +
      • +
      • +
        +

        shutDownWhenFull

        +
        Suppressed.StrictBufferConfig shutDownWhenFull()
        +
        Set the buffer to gracefully shut down the application when any of its constraints are violated + +

        This buffer is "strict" in the sense that it will enforce the time bound or shut down. + It will never emit early.

        +
        +
      • +
      • +
        +

        emitEarlyWhenFull

        +
        Suppressed.EagerBufferConfig emitEarlyWhenFull()
        +
        Set the buffer to just emit the oldest records when any of its constraints are violated. + +

        This buffer is "not strict" in the sense that it may emit early, so it is suitable for reducing + duplicate results downstream, but does not promise to eliminate them.

        +
        +
      • +
      • +
        +

        withLoggingDisabled

        +
        BC withLoggingDisabled()
        +
        Disable the changelog for this suppression's internal buffer. + This will turn off fault-tolerance for the suppression, and will result in data loss in the event of a rebalance. + By default, the changelog is enabled.
        +
        +
        Returns:
        +
        this
        +
        +
        +
      • +
      • +
        +

        withLoggingEnabled

        +
        BC withLoggingEnabled(Map<String,String> config)
        +
        Indicates that a changelog topic should be created containing the currently suppressed + records. Due to the short-lived nature of records in this topic it is likely more + compactable than changelog topics for KTables.
        +
        +
        Parameters:
        +
        config - Configs that should be applied to the changelog. Note: Any unrecognized + configs will be ignored.
        +
        Returns:
        +
        this
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Suppressed.EagerBufferConfig.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Suppressed.EagerBufferConfig.html new file mode 100644 index 000000000..1f9ab0e9d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Suppressed.EagerBufferConfig.html @@ -0,0 +1,104 @@ + + + + +Suppressed.EagerBufferConfig (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Suppressed.EagerBufferConfig

    +
    +
    +
    +
    All Superinterfaces:
    +
    Suppressed.BufferConfig<Suppressed.EagerBufferConfig>
    +
    +
    +
    Enclosing interface:
    +
    Suppressed<K>
    +
    +
    +
    public static interface Suppressed.EagerBufferConfig +extends Suppressed.BufferConfig<Suppressed.EagerBufferConfig>
    +
    Marker interface for a buffer configuration that will strictly enforce size constraints + (bytes and/or number of records) on the buffer, so it is suitable for reducing duplicate + results downstream, but does not promise to eliminate them entirely.
    +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Suppressed.StrictBufferConfig.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Suppressed.StrictBufferConfig.html new file mode 100644 index 000000000..004091e13 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Suppressed.StrictBufferConfig.html @@ -0,0 +1,103 @@ + + + + +Suppressed.StrictBufferConfig (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Suppressed.StrictBufferConfig

    +
    +
    +
    +
    All Superinterfaces:
    +
    Suppressed.BufferConfig<Suppressed.StrictBufferConfig>
    +
    +
    +
    Enclosing interface:
    +
    Suppressed<K>
    +
    +
    +
    public static interface Suppressed.StrictBufferConfig +extends Suppressed.BufferConfig<Suppressed.StrictBufferConfig>
    +
    Marker interface for a buffer configuration that is "strict" in the sense that it will strictly + enforce the time bound and never emit early.
    +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Suppressed.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Suppressed.html new file mode 100644 index 000000000..35cc144b4 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Suppressed.html @@ -0,0 +1,227 @@ + + + + +Suppressed (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Suppressed<K>

    +
    +
    +
    +
    public interface Suppressed<K>
    +
    +
    +
      + +
    • +
      +

      Nested Class Summary

      +
      Nested Classes
      +
      +
      Modifier and Type
      +
      Interface
      +
      Description
      +
      static interface 
      + +
       
      +
      static interface 
      + +
      +
      Marker interface for a buffer configuration that will strictly enforce size constraints + (bytes and/or number of records) on the buffer, so it is suitable for reducing duplicate + results downstream, but does not promise to eliminate them entirely.
      +
      +
      static interface 
      + +
      +
      Marker interface for a buffer configuration that is "strict" in the sense that it will strictly + enforce the time bound and never emit early.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      static <K> Suppressed<K>
      +
      untilTimeLimit(Duration timeToWaitForMoreEvents, + Suppressed.BufferConfig<?> bufferConfig)
      +
      +
      Configure the suppression to wait timeToWaitForMoreEvents amount of time after receiving a record + before emitting it further downstream.
      +
      +
      static Suppressed<Windowed<?>>
      + +
      +
      Configure the suppression to emit only the "final results" from the window.
      +
      + + +
      +
      Use the specified name for the suppression node in the topology.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        untilWindowCloses

        +
        static Suppressed<Windowed<?>> untilWindowCloses(Suppressed.StrictBufferConfig bufferConfig)
        +
        Configure the suppression to emit only the "final results" from the window. + +

        By default, all Streams operators emit results whenever new results are available. + This includes windowed operations. + +

        This configuration will instead emit just one result per key for each window, guaranteeing + to deliver only the final result. This option is suitable for use cases in which the business logic + requires a hard guarantee that only the final result is propagated. For example, sending alerts. + +

        To accomplish this, the operator will buffer events from the window until the window close (that is, + until the end-time passes, and additionally until the grace period expires). Since windowed operators + are required to reject out-of-order events for a window whose grace period is expired, there is an additional + guarantee that the final results emitted from this suppression will match any queryable state upstream.

        +
        +
        Parameters:
        +
        bufferConfig - A configuration specifying how much space to use for buffering intermediate results. + This is required to be a "strict" config, since it would violate the "final results" + property to emit early and then issue an update later.
        +
        Returns:
        +
        a "final results" mode suppression configuration
        +
        +
        +
      • +
      • +
        +

        untilTimeLimit

        +
        static <K> Suppressed<K> untilTimeLimit(Duration timeToWaitForMoreEvents, + Suppressed.BufferConfig<?> bufferConfig)
        +
        Configure the suppression to wait timeToWaitForMoreEvents amount of time after receiving a record + before emitting it further downstream. If another record for the same key arrives in the meantime, it replaces + the first record in the buffer but does not re-start the timer.
        +
        +
        Type Parameters:
        +
        K - The key type for the KTable to apply this suppression to.
        +
        Parameters:
        +
        timeToWaitForMoreEvents - The amount of time to wait, per record, for new events.
        +
        bufferConfig - A configuration specifying how much space to use for buffering intermediate results.
        +
        Returns:
        +
        a suppression configuration
        +
        +
        +
      • +
      • +
        +

        withName

        +
        Suppressed<K> withName(String name)
        +
        Use the specified name for the suppression node in the topology. + +

        This can be used to insert a suppression without changing the rest of the topology names + (and therefore not requiring an application reset). + +

        Note however, that once a suppression has buffered some records, removing it from the topology would cause + the loss of those records. + +

        A suppression can be "disabled" with the configuration untilTimeLimit(Duration.ZERO, ....

        +
        +
        Parameters:
        +
        name - The name to be used for the suppression node and changelog topic
        +
        Returns:
        +
        The same configuration with the addition of the given name.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/TableJoined.html b/static/41/javadoc/org/apache/kafka/streams/kstream/TableJoined.html new file mode 100644 index 000000000..3e97f3dda --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/TableJoined.html @@ -0,0 +1,265 @@ + + + + +TableJoined (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TableJoined<K,KO>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.TableJoined<K,KO>
    +
    +
    +
    +
    Type Parameters:
    +
    K - this key type ; key type for the left (primary) table
    +
    KO - other key type ; key type for the right (foreign key) table
    +
    +
    +
    public class TableJoined<K,KO> +extends Object
    +
    The TableJoined class represents optional parameters that can be passed to + KTable#join(KTable,Function,...) and + KTable#leftJoin(KTable,Function,...) + operations, for foreign key joins.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        with

        +
        public static <K, +KO> TableJoined<K,KO> with(StreamPartitioner<K,Void> partitioner, + StreamPartitioner<KO,Void> otherPartitioner)
        +
        Create an instance of TableJoined with partitioner and otherPartitioner StreamPartitioner instances. + null values are accepted and will result in the default partitioner being used.
        +
        +
        Type Parameters:
        +
        K - this key type ; key type for the left (primary) table
        +
        KO - other key type ; key type for the right (foreign key) table
        +
        Parameters:
        +
        partitioner - a StreamPartitioner that captures the partitioning strategy for the left (primary) + table of the foreign key join. Specifying this option does not repartition or otherwise + affect the source table; rather, this option informs the foreign key join on how internal + topics should be partitioned in order to be co-partitioned with the left join table. + The partitioning strategy must depend only on the message key and not the message value, + else the source table is not supported with foreign key joins. This option may be left + null if the source table uses the default partitioner.
        +
        otherPartitioner - a StreamPartitioner that captures the partitioning strategy for the right (foreign + key) table of the foreign key join. Specifying this option does not repartition or otherwise + affect the source table; rather, this option informs the foreign key join on how internal + topics should be partitioned in order to be co-partitioned with the right join table. + The partitioning strategy must depend only on the message key and not the message value, + else the source table is not supported with foreign key joins. This option may be left + null if the source table uses the default partitioner.
        +
        Returns:
        +
        new TableJoined instance with the provided partitioners
        +
        +
        +
      • +
      • +
        +

        as

        +
        public static <K, +KO> TableJoined<K,KO> as(String name)
        +
        Create an instance of TableJoined with base name for all components of the join, including internal topics + created to complete the join.
        +
        +
        Type Parameters:
        +
        K - this key type ; key type for the left (primary) table
        +
        KO - other key type ; key type for the right (foreign key) table
        +
        Parameters:
        +
        name - the name used as the base for naming components of the join including internal topics
        +
        Returns:
        +
        new TableJoined instance configured with the name
        +
        +
        +
      • +
      • +
        +

        withPartitioner

        +
        public TableJoined<K,KO> withPartitioner(StreamPartitioner<K,Void> partitioner)
        +
        Set the custom StreamPartitioner to be used as part of computing the join. + null values are accepted and will result in the default partitioner being used.
        +
        +
        Parameters:
        +
        partitioner - a StreamPartitioner that captures the partitioning strategy for the left (primary) + table of the foreign key join. Specifying this option does not repartition or otherwise + affect the source table; rather, this option informs the foreign key join on how internal + topics should be partitioned in order to be co-partitioned with the left join table. + The partitioning strategy must depend only on the message key and not the message value, + else the source table is not supported with foreign key joins. This option may be left + null if the source table uses the default partitioner.
        +
        Returns:
        +
        new TableJoined instance configured with the partitioner
        +
        +
        +
      • +
      • +
        +

        withOtherPartitioner

        +
        public TableJoined<K,KO> withOtherPartitioner(StreamPartitioner<KO,Void> otherPartitioner)
        +
        Set the custom other StreamPartitioner to be used as part of computing the join. + null values are accepted and will result in the default partitioner being used.
        +
        +
        Parameters:
        +
        otherPartitioner - a StreamPartitioner that captures the partitioning strategy for the right (foreign + key) table of the foreign key join. Specifying this option does not repartition or otherwise + affect the source table; rather, this option informs the foreign key join on how internal + topics should be partitioned in order to be co-partitioned with the right join table. + The partitioning strategy must depend only on the message key and not the message value, + else the source table is not supported with foreign key joins. This option may be left + null if the source table uses the default partitioner.
        +
        Returns:
        +
        new TableJoined instance configured with the otherPartitioner
        +
        +
        +
      • +
      • +
        +

        withName

        +
        public TableJoined<K,KO> withName(String name)
        +
        Set the base name used for all components of the join, including internal topics + created to complete the join.
        +
        +
        Parameters:
        +
        name - the name used as the base for naming components of the join including internal topics
        +
        Returns:
        +
        new TableJoined instance configured with the name
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/TimeWindowedCogroupedKStream.html b/static/41/javadoc/org/apache/kafka/streams/kstream/TimeWindowedCogroupedKStream.html new file mode 100644 index 000000000..b1b108abb --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/TimeWindowedCogroupedKStream.html @@ -0,0 +1,359 @@ + + + + +TimeWindowedCogroupedKStream (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface TimeWindowedCogroupedKStream<K,V>

    +
    +
    +
    +
    public interface TimeWindowedCogroupedKStream<K,V>
    +
    Same as a TimeWindowedKStream, however, for multiple co-grouped KStreams.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        aggregate

        +
        KTable<Windowed<K>,V> aggregate(Initializer<V> initializer)
        +
        Aggregate the values of records in this stream by the grouped key and defined windows. + Records with null key or value are ignored. + The result is written into a local WindowStore (which is basically an ever-updating materialized view). + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied directly before the first input record (per key) in each window is + processed to provide an initial intermediate aggregation result that is used to process the first record for + the window (per key). + The specified Aggregator (as specified in KGroupedStream.cogroup(Aggregator) or + CogroupedKStream.cogroup(KGroupedStream, Aggregator)) is applied for each input record and computes a new + aggregate using the current aggregate (or for the very first record using the intermediate aggregation result + provided via the Initializer) and the record's value. + Thus, aggregate() can be used to compute aggregate functions like count or sum etc. +

        + The default key and value serde from the config will be used for serializing the result. + If a different serde is required then you should use aggregate(Initializer, Materialized). + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same window and key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + For failure and recovery the store (which always will be of type TimestampedWindowStore) will be backed by + an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key within a window
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        KTable<Windowed<K>,V> aggregate(Initializer<V> initializer, + Named named)
        +
        Aggregate the values of records in this stream by the grouped key and defined windows. + Records with null key or value are ignored. + The result is written into a local WindowStore (which is basically an ever-updating materialized view). + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied directly before the first input record (per key) in each window is + processed to provide an initial intermediate aggregation result that is used to process the first record for + the window (per key). + The specified Aggregator (as specified in KGroupedStream.cogroup(Aggregator) or + CogroupedKStream.cogroup(KGroupedStream, Aggregator)) is applied for each input record and computes a new + aggregate using the current aggregate (or for the very first record using the intermediate aggregation result + provided via the Initializer) and the record's value. + Thus, aggregate() can be used to compute aggregate functions like count or sum etc. +

        + The default key and value serde from the config will be used for serializing the result. + If a different serde is required then you should use aggregate(Initializer, Named, Materialized). + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same window and key. + The rate of propagated updates depends on your input data rate, the number of distinct + keys, the number of parallel running Kafka Streams instances, and the configuration + parameters for cache size, and + commit interval. +

        + For failure and recovery the store (which always will be of type TimestampedWindowStore) will be backed by + an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result. Cannot be null.
        +
        named - a Named config used to name the processor in the topology. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key within a window
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        KTable<Windowed<K>,V> aggregate(Initializer<V> initializer, + Materialized<K,V,WindowStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Aggregate the values of records in this stream by the grouped key and defined windows. + Records with null key or value are ignored. + The result is written into a local WindowStore (which is basically an ever-updating materialized view) + that can be queried using the store name as provided with Materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied directly before the first input record (per key) in each window is + processed to provide an initial intermediate aggregation result that is used to process the first record for + the window (per key). + The specified Aggregator (as specified in KGroupedStream.cogroup(Aggregator) or + CogroupedKStream.cogroup(KGroupedStream, Aggregator)) is applied for each input record and computes a new + aggregate using the current aggregate (or for the very first record using the intermediate aggregation result + provided via the Initializer) and the record's value. + Thus, aggregate() can be used to compute aggregate functions like count or sum etc. +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same window and key if caching is enabled on the Materialized instance. + When caching is enabled the rate of propagated updates depends on your input data rate, the number of distinct + keys, the number of parallel running Kafka Streams instances, and the configuration + parameters for cache size, and + commit interval. +

        + To query the local ReadOnlyWindowStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // counting words
        + Store queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<VR>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedWindowStore());
        + ReadOnlyWindowStore<K, ValueAndTimestamp<VR>> localWindowStore = streams.store(storeQueryParams);
        + K key = "some-word";
        + long fromTime = ...;
        + long toTime = ...;
        + WindowStoreIterator<ValueAndTimestamp<V>> aggregateStore = localWindowStore.fetch(key, timeFrom, timeTo); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store (which always will be of type TimestampedWindowStore -- regardless of what + is specified in the parameter materialized) will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and + cannot contain characters other than ASCII alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the + name of the store defined in Materialized, and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result. Cannot be null.
        +
        materialized - a Materialized config used to materialize a state store. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key within a window
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        KTable<Windowed<K>,V> aggregate(Initializer<V> initializer, + Named named, + Materialized<K,V,WindowStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Aggregate the values of records in this stream by the grouped key and defined windows. + Records with null key or value are ignored. + The result is written into a local WindowStore (which is basically an ever-updating materialized view) + that can be queried using the store name as provided with Materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied directly before the first input record (per key) in each window is + processed to provide an initial intermediate aggregation result that is used to process the first record for + the window (per key). + The specified Aggregator (as specified in KGroupedStream.cogroup(Aggregator) or + CogroupedKStream.cogroup(KGroupedStream, Aggregator)) is applied for each input record and computes a new + aggregate using the current aggregate (or for the very first record using the intermediate aggregation result + provided via the Initializer) and the record's value. + Thus, aggregate() can be used to compute aggregate functions like count or sum etc. +

        + Not all updates might get sent downstream, as an internal cache will be used to deduplicate consecutive updates + to the same window and key if caching is enabled on the Materialized instance. + When caching is enabled the rate of propagated updates depends on your input data rate, the number of distinct + keys, the number of parallel running Kafka Streams instances, and the configuration + parameters for cache size, and + commit interval. +

        + To query the local ReadOnlyWindowStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // counting words
        + Store queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<VR>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedWindowStore());
        + ReadOnlyWindowStore<K, ValueAndTimestamp<VR>> localWindowStore = streams.store(storeQueryParams);
        +
        + K key = "some-word";
        + long fromTime = ...;
        + long toTime = ...;
        + WindowStoreIterator<ValueAndTimestamp<V>> aggregateStore = localWindowStore.fetch(key, timeFrom, timeTo); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store (which always will be of type TimestampedWindowStore -- regardless of what + is specified in the parameter materialized) will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and + cannot contain characters other than ASCII alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the + name of the store defined in Materialized, and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result. Cannot be null.
        +
        named - a Named config used to name the processor in the topology. Cannot be null.
        +
        materialized - a Materialized config used to materialize a state store. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key within a window
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/TimeWindowedDeserializer.html b/static/41/javadoc/org/apache/kafka/streams/kstream/TimeWindowedDeserializer.html new file mode 100644 index 000000000..ce8f92624 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/TimeWindowedDeserializer.html @@ -0,0 +1,316 @@ + + + + +TimeWindowedDeserializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TimeWindowedDeserializer<T>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.TimeWindowedDeserializer<T>
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Deserializer<Windowed<T>>
    +
    +
    +
    public class TimeWindowedDeserializer<T> +extends Object +implements Deserializer<Windowed<T>>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        WINDOW_SIZE_MS_CONFIG

        +
        public static final String WINDOW_SIZE_MS_CONFIG
        +
        Sets window size for the deserializer in order to calculate window end times.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        WINDOWED_INNER_DESERIALIZER_CLASS

        +
        public static final String WINDOWED_INNER_DESERIALIZER_CLASS
        +
        Default deserializer for the inner deserializer class of a windowed record. Must implement the Serde interface.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TimeWindowedDeserializer

        +
        public TimeWindowedDeserializer()
        +
        +
      • +
      • +
        +

        TimeWindowedDeserializer

        +
        public TimeWindowedDeserializer(Deserializer<T> inner, + Long windowSize)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        getWindowSize

        +
        public Long getWindowSize()
        +
        +
      • +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs, + boolean isKey)
        +
        Description copied from interface: Deserializer
        +
        Configure this class.
        +
        +
        Specified by:
        +
        configure in interface Deserializer<T>
        +
        Parameters:
        +
        configs - configs in key/value pairs
        +
        isKey - whether the deserializer is used for the key or the value
        +
        +
        +
      • +
      • +
        +

        deserialize

        +
        public Windowed<T> deserialize(String topic, + byte[] data)
        +
        Description copied from interface: Deserializer
        +
        Deserialize a record value from a byte array into a value or object. + +

        It is recommended to deserialize a null byte array to a null object.

        +
        +
        Specified by:
        +
        deserialize in interface Deserializer<T>
        +
        Parameters:
        +
        topic - topic associated with the data
        +
        data - serialized bytes; may be null
        +
        Returns:
        +
        deserialized typed data; may be null
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close()
        +
        Description copied from interface: Deserializer
        +
        Close this deserializer. + +

        This method must be idempotent as it may be called multiple times.

        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        Specified by:
        +
        close in interface Deserializer<T>
        +
        +
        +
      • +
      • +
        +

        setIsChangelogTopic

        +
        public void setIsChangelogTopic(boolean isChangelogTopic)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/TimeWindowedKStream.html b/static/41/javadoc/org/apache/kafka/streams/kstream/TimeWindowedKStream.html new file mode 100644 index 000000000..215e74fbd --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/TimeWindowedKStream.html @@ -0,0 +1,915 @@ + + + + +TimeWindowedKStream (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface TimeWindowedKStream<K,V>

    +
    +
    +
    +
    Type Parameters:
    +
    K - the key type of this time-windowed stream
    +
    V - the value type of this time-windowed stream
    +
    +
    +
    public interface TimeWindowedKStream<K,V>
    +
    TimeWindowedKStream is an abstraction of a windowed record stream of key-value pairs. + It is an intermediate representation of a KStream, that is aggregated into a windowed KTable + (a windowed KTable is a KTable with key type Windowed). + +

    A TimeWindowedKStream can represent one of four different windowed types: +

    + + The result of the aggregation is written into a local WindowStore (which is basically an ever-updating + materialized view) that can be queried using the name provided in the Materialized instance. + Furthermore, updates to the store are sent downstream into a windowed KTable changelog stream, where + "windowed" implies that the KTable key is a combined key of the original record key and a window ID. + New events are added to TimeWindows/SlidingWindows until their grace period ends + (see TimeWindows.ofSizeAndGrace(Duration, Duration) or + SlidingWindows.ofTimeDifferenceAndGrace(Duration, Duration); + note that landmark windows don't have a grace period). + +

    A TimeWindowedKStream is obtained from a KStream by grouping and + windowing + (cf. KGroupedStream#windowedBy(Windows) [for tumbling/hopping/landmark] + or KGroupedStream.windowedBy(SlidingWindows)).

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        count

        +
        KTable<Windowed<K>,Long> count()
        +
        Count the number of records in this stream by the grouped key and defined windows. + Records with null key or value are ignored. +

        + The result is written into a local WindowStore (which is basically an ever-updating materialized view). + The default key serde from the config will be used for serializing the result. + If a different serde is required then you should use count(Materialized). + Furthermore, updates to the store are sent downstream into a KTable changelog stream. + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same window and key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + For failure and recovery the store (which always will be of type TimestampedWindowStore) will be backed by + an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys and Long values + that represent the latest (rolling) count (i.e., number of records) for each key within a window
        +
        +
        +
      • +
      • +
        +

        count

        +
        KTable<Windowed<K>,Long> count(Named named)
        +
        Count the number of records in this stream by the grouped key and defined windows. + Records with null key or value are ignored. +

        + The result is written into a local WindowStore (which is basically an ever-updating materialized view). + The default key serde from the config will be used for serializing the result. + If a different serde is required then you should use count(Named, Materialized). + Furthermore, updates to the store are sent downstream into a KTable changelog stream. + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same window and key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + For failure and recovery the store (which always will be of type TimestampedWindowStore) will be backed by + an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        named - a Named config used to name the processor in the topology. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys and Long values + that represent the latest (rolling) count (i.e., number of records) for each key within a window
        +
        +
        +
      • +
      • +
        +

        count

        +
        KTable<Windowed<K>,Long> count(Materialized<K,Long,WindowStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Count the number of records in this stream by the grouped key and defined windows. + Records with null key or value are ignored. +

        + The result is written into a local WindowStore (which is basically an ever-updating materialized view) + that can be queried using the name provided with Materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + Not all updates might get sent downstream, as an internal cache will be used to deduplicate consecutive updates + to the same window and key if caching is enabled on the Materialized instance. + When caching is enabled the rate of propagated updates depends on your input data rate, the number of distinct + keys, the number of parallel running Kafka Streams instances, and the configuration + parameters for cache size, and + commit interval. +

        + To query the local ReadOnlyWindowStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // counting words
        + Store queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<Long>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedWindowStore());
        + ReadOnlyWindowStore<K, ValueAndTimestamp<Long>> localWindowStore = streams.store(storeQueryParams);
        +
        + K key = "some-word";
        + long fromTime = ...;
        + long toTime = ...;
        + WindowStoreIterator<ValueAndTimestamp<Long>> countForWordsForWindows = localWindowStore.fetch(key, timeFrom, timeTo); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store (which always will be of type TimestampedWindowStore -- regardless of what + is specified in the parameter materialized) will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot + contain characters other than ASCII alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the name of the store defined + in Materialized, and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        materialized - an instance of Materialized used to materialize a state store. Cannot be null. + Note: the valueSerde will be automatically set to Serdes#Long() + if there is no valueSerde provided
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys and Long values + that represent the latest (rolling) count (i.e., number of records) for each key within a window
        +
        +
        +
      • +
      • +
        +

        count

        +
        KTable<Windowed<K>,Long> count(Named named, + Materialized<K,Long,WindowStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Count the number of records in this stream by the grouped key and defined windows. + Records with null key or value are ignored. +

        + The result is written into a local WindowStore (which is basically an ever-updating materialized view) + that can be queried using the name provided with Materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + Not all updates might get sent downstream, as an internal cache will be used to deduplicate consecutive updates + to the same window and key if caching is enabled on the Materialized instance. + When caching is enabled the rate of propagated updates depends on your input data rate, the number of distinct + keys, the number of parallel running Kafka Streams instances, and the configuration + parameters for cache size, and + commit interval +

        + To query the local ReadOnlyWindowStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // counting words
        + Store queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<Long>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedWindowStore());
        + ReadOnlyWindowStore<K, ValueAndTimestamp<Long>> localWindowStore = streams.store(storeQueryParams);
        +
        + K key = "some-word";
        + long fromTime = ...;
        + long toTime = ...;
        + WindowStoreIterator<ValueAndTimestamp<Long>> countForWordsForWindows = localWindowStore.fetch(key, timeFrom, timeTo); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store (which always will be of type TimestampedWindowStore -- regardless of what + is specified in the parameter materialized) will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot + contain characters other than ASCII alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the name of the store defined + in Materialized, and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        named - a Named config used to name the processor in the topology. Cannot be null.
        +
        materialized - an instance of Materialized used to materialize a state store. Cannot be null. + Note: the valueSerde will be automatically set to Serdes#Long() + if there is no valueSerde provided
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys and Long values + that represent the latest (rolling) count (i.e., number of records) for each key within a window
        +
        +
        +
      • +
      • +
        +

        reduce

        +
        KTable<Windowed<K>,V> reduce(Reducer<V> reducer)
        +
        Combine the values of records in this stream by the grouped key and defined windows. + Records with null key or value are ignored. + Combining implies that the type of the aggregate result is the same as the type of the input value + (cf. aggregate(Initializer, Aggregator)). +

        + The result is written into a local WindowStore (which is basically an ever-updating materialized view). + Furthermore, updates to the store are sent downstream into a KTable changelog stream. + The default key and value serde from the config will be used for serializing the result. + If a different serde is required then you should use reduce(Reducer, Materialized) . +

        + The value of the first record per window initialized the aggregation result. + The specified Reducer is applied for each additional input record per window and computes a new + aggregate using the current aggregate (first argument) and the record's value (second argument): +

        
        + // At the example of a Reducer<Long>
        + new Reducer<Long>() {
        +   public Long apply(Long aggValue, Long currValue) {
        +     return aggValue + currValue;
        +   }
        + }
        + 
        + Thus, reduce() can be used to compute aggregate functions like sum, min, or max. +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same window and key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + For failure and recovery the store (which always will be of type TimestampedWindowStore) will be backed by + an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        reducer - a Reducer that computes a new aggregate result. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key within a window
        +
        +
        +
      • +
      • +
        +

        reduce

        +
        KTable<Windowed<K>,V> reduce(Reducer<V> reducer, + Named named)
        +
        Combine the values of records in this stream by the grouped key and defined windows. + Records with null key or value are ignored. + Combining implies that the type of the aggregate result is the same as the type of the input value. +

        + The result is written into a local WindowStore (which is basically an ever-updating materialized view). + Furthermore, updates to the store are sent downstream into a KTable changelog stream. + The default key and value serde from the config will be used for serializing the result. + If a different serde is required then you should use reduce(Reducer, Named, Materialized) . +

        + The value of the first record per window initialized the aggregation result. + The specified Reducer is applied for each additional input record per window and computes a new + aggregate using the current aggregate (first argument) and the record's value (second argument): +

        
        + // At the example of a Reducer<Long>
        + new Reducer<Long>() {
        +   public Long apply(Long aggValue, Long currValue) {
        +     return aggValue + currValue;
        +   }
        + }
        + 
        + Thus, reduce() can be used to compute aggregate functions like sum, min, or max. +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same window and key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + For failure and recovery the store (which always will be of type TimestampedWindowStore) will be backed by + an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        reducer - a Reducer that computes a new aggregate result. Cannot be null.
        +
        named - a Named config used to name the processor in the topology. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key within a window
        +
        +
        +
      • +
      • +
        +

        reduce

        +
        KTable<Windowed<K>,V> reduce(Reducer<V> reducer, + Materialized<K,V,WindowStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Combine the values of records in this stream by the grouped key and defined windows. + Records with null key or value are ignored. + Combining implies that the type of the aggregate result is the same as the type of the input value. +

        + The result is written into a local WindowStore (which is basically an ever-updating materialized view) + that can be queried using the store name as provided with Materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The value of the first record per window initialized the aggregation result. + The specified Reducer is applied for each additional input record per window and computes a new + aggregate using the current aggregate (first argument) and the record's value (second argument): +

        
        + // At the example of a Reducer<Long>
        + new Reducer<Long>() {
        +   public Long apply(Long aggValue, Long currValue) {
        +     return aggValue + currValue;
        +   }
        + }
        + 
        + Thus, reduce() can be used to compute aggregate functions like sum, min, or max. +

        + Not all updates might get sent downstream, as an internal cache will be used to deduplicate consecutive updates + to the same window and key if caching is enabled on the Materialized instance. + When caching is enabled the rate of propagated updates depends on your input data rate, the number of distinct + keys, the number of parallel running Kafka Streams instances, and the configuration + parameters for cache size, and + commit interval. +

        + To query the local ReadOnlyWindowStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // counting words
        + Store queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedWindowStore());
        + ReadOnlyWindowStore<K, ValueAndTimestamp<V>> localWindowStore = streams.store(storeQueryParams);
        +
        + K key = "some-word";
        + long fromTime = ...;
        + long toTime = ...;
        + WindowStoreIterator<ValueAndTimestamp<V>> reduceStore = localWindowStore.fetch(key, timeFrom, timeTo); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store (which always will be of type TimestampedWindowStore -- regardless of what + is specified in the parameter materialized) will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot + contain characters other than ASCII alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the name of the store defined + in Materialized, and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        reducer - a Reducer that computes a new aggregate result. Cannot be null.
        +
        materialized - a Materialized config used to materialize a state store. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key within a window
        +
        +
        +
      • +
      • +
        +

        reduce

        +
        KTable<Windowed<K>,V> reduce(Reducer<V> reducer, + Named named, + Materialized<K,V,WindowStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Combine the values of records in this stream by the grouped key and defined windows. + Records with null key or value are ignored. + Combining implies that the type of the aggregate result is the same as the type of the input value. +

        + The result is written into a local WindowStore (which is basically an ever-updating materialized view) + that can be queried using the store name as provided with Materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The value of the first record per window initialized the aggregation result. + The specified Reducer is applied for each additional input record per window and computes a new + aggregate using the current aggregate (first argument) and the record's value (second argument): +

        
        + // At the example of a Reducer<Long>
        + new Reducer<Long>() {
        +   public Long apply(Long aggValue, Long currValue) {
        +     return aggValue + currValue;
        +   }
        + }
        + 
        + Thus, reduce() can be used to compute aggregate functions like sum, min, or max. +

        + Not all updates might get sent downstream, as an internal cache will be used to deduplicate consecutive updates + to the same window and key if caching is enabled on the Materialized instance. + When caching is enabled the rate of propagated updates depends on your input data rate, the number of distinct + keys, the number of parallel running Kafka Streams instances, and the configuration + parameters for cache size, and + commit interval. +

        + To query the local ReadOnlyWindowStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // counting words
        + Store queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<V>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedWindowStore());
        + ReadOnlyWindowStore<K, ValueAndTimestamp<V>> localWindowStore = streams.store(storeQueryParams);
        +
        + K key = "some-word";
        + long fromTime = ...;
        + long toTime = ...;
        + WindowStoreIterator<ValueAndTimestamp<V>> reduceStore = localWindowStore.fetch(key, timeFrom, timeTo); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store (which always will be of type TimestampedWindowStore -- regardless of what + is specified in the parameter materialized) will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot + contain characters other than ASCII alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the name of the store defined + in Materialized, and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Parameters:
        +
        reducer - a Reducer that computes a new aggregate result. Cannot be null.
        +
        named - a Named config used to name the processor in the topology. Cannot be null.
        +
        materialized - a Materialized config used to materialize a state store. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key within a window
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        <VOut> KTable<Windowed<K>,VOut> aggregate(Initializer<VOut> initializer, + Aggregator<? super K,? super V,VOut> aggregator)
        +
        Aggregate the values of records in this stream by the grouped key and defined windows. + Records with null key or value are ignored. + Aggregating is a generalization of combining via reduce(...) as it, for example, + allows the result to have a different type than the input values. + The result is written into a local WindowStore (which is basically an ever-updating materialized view). + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied directly before the first input record (per key) in each window is + processed to provide an initial intermediate aggregation result that is used to process the first record for + the window (per key). + The specified Aggregator is applied for each input record and computes a new aggregate using the current + aggregate (or for the very first record using the intermediate aggregation result provided via the + Initializer) and the record's value. + Thus, aggregate() can be used to compute aggregate functions like count (cf. count()). +

        + The default key and value serde from the config will be used for serializing the result. + If a different serde is required then you should use aggregate(Initializer, Aggregator, Materialized). + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same window and key. + The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of + parallel running Kafka Streams instances, and the configuration parameters for + cache size, and + commit interval. +

        + For failure and recovery the store (which always will be of type TimestampedWindowStore) will be backed by + an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Type Parameters:
        +
        VOut - the value type of the resulting KTable
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result. Cannot be null.
        +
        aggregator - an Aggregator that computes a new aggregate result. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key within a window
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        <VOut> KTable<Windowed<K>,VOut> aggregate(Initializer<VOut> initializer, + Aggregator<? super K,? super V,VOut> aggregator, + Named named)
        +
        Aggregate the values of records in this stream by the grouped key and defined windows. + Records with null key or value are ignored. + Aggregating is a generalization of combining via reduce(...) as it, for example, + allows the result to have a different type than the input values. + The result is written into a local WindowStore (which is basically an ever-updating materialized view). + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied directly before the first input record (per key) in each window is + processed to provide an initial intermediate aggregation result that is used to process the first record for + the window (per key). + The specified Aggregator is applied for each input record and computes a new aggregate using the current + aggregate (or for the very first record using the intermediate aggregation result provided via the + Initializer) and the record's value. + Thus, aggregate() can be used to compute aggregate functions like count (cf. count()). +

        + The default key and value serde from the config will be used for serializing the result. + If a different serde is required then you should use + aggregate(Initializer, Aggregator, Named, Materialized). + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same window and key. + The rate of propagated updates depends on your input data rate, the number of distinct + keys, the number of parallel running Kafka Streams instances, and the configuration + parameters for cache size, and + commit interval. +

        + For failure and recovery the store (which always will be of type TimestampedWindowStore) will be backed by + an internal changelog topic that will be created in Kafka. + The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "internalStoreName" is an internal name + and "-changelog" is a fixed suffix. + Note that the internal store name may not be queryable through Interactive Queries. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Type Parameters:
        +
        VOut - the value type of the resulting KTable
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result. Cannot be null.
        +
        aggregator - an Aggregator that computes a new aggregate result. Cannot be null.
        +
        named - a Named config used to name the processor in the topology. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key within a window
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        <VOut> KTable<Windowed<K>,VOut> aggregate(Initializer<VOut> initializer, + Aggregator<? super K,? super V,VOut> aggregator, + Materialized<K,VOut,WindowStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Aggregate the values of records in this stream by the grouped key and defined windows. + Records with null key or value are ignored. + Aggregating is a generalization of combining via reduce(...) as it, for example, + allows the result to have a different type than the input values. + The result is written into a local WindowStore (which is basically an ever-updating materialized view) + that can be queried using the store name as provided with Materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied directly before the first input record (per key) in each window is + processed to provide an initial intermediate aggregation result that is used to process the first record for + the window (per key). + The specified Aggregator is applied for each input record and computes a new aggregate using the current + aggregate (or for the very first record using the intermediate aggregation result provided via the + Initializer) and the record's value. + Thus, aggregate() can be used to compute aggregate functions like count (cf. count()). +

        + Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to + the same window and key if caching is enabled on the Materialized instance. + When caching is enabled the rate of propagated updates depends on your input data rate, the number of distinct + keys, the number of parallel running Kafka Streams instances, and the configuration + parameters for cache size, and + commit interval. +

        + To query the local ReadOnlyWindowStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // counting words
        + Store queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<VR>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedWindowStore());
        + ReadOnlyWindowStore<K, ValueAndTimestamp<VR>> localWindowStore = streams.store(storeQueryParams);
        +
        + K key = "some-word";
        + long fromTime = ...;
        + long toTime = ...;
        + WindowStoreIterator<ValueAndTimestamp<VR>> aggregateStore = localWindowStore.fetch(key, timeFrom, timeTo); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store (which always will be of type TimestampedWindowStore -- regardless of what + is specified in the parameter materialized) will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and + cannot contain characters other than ASCII alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the + name of the store defined in Materialized, and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Type Parameters:
        +
        VOut - the value type of the resulting KTable
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result. Cannot be null.
        +
        aggregator - an Aggregator that computes a new aggregate result. Cannot be null.
        +
        materialized - a Materialized config used to materialize a state store. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key within a window
        +
        +
        +
      • +
      • +
        +

        aggregate

        +
        <VOut> KTable<Windowed<K>,VOut> aggregate(Initializer<VOut> initializer, + Aggregator<? super K,? super V,VOut> aggregator, + Named named, + Materialized<K,VOut,WindowStore<org.apache.kafka.common.utils.Bytes,byte[]>> materialized)
        +
        Aggregate the values of records in this stream by the grouped key and defined windows. + Records with null key or value are ignored. + Aggregating is a generalization of combining via reduce(...) as it, for example, + allows the result to have a different type than the input values. + The result is written into a local WindowStore (which is basically an ever-updating materialized view) + that can be queried using the store name as provided with Materialized. + Furthermore, updates to the store are sent downstream into a KTable changelog stream. +

        + The specified Initializer is applied directly before the first input record (per key) in each window is + processed to provide an initial intermediate aggregation result that is used to process the first record for + the window (per key). + The specified Aggregator is applied for each input record and computes a new aggregate using the current + aggregate (or for the very first record using the intermediate aggregation result provided via the + Initializer) and the record's value. + Thus, aggregate() can be used to compute aggregate functions like count (cf. count()). +

        + Not all updates might get sent downstream, as an internal cache will be used to deduplicate consecutive updates + to the same window and key if caching is enabled on the Materialized instance. + When caching is enabled the rate of propagated updates depends on your input data rate, the number of distinct + keys, the number of parallel running Kafka Streams instances, and the configuration + parameters for cache size, and + commit interval +

        + To query the local ReadOnlyWindowStore it must be obtained via + KafkaStreams#store(...): +

        
        + KafkaStreams streams = ... // counting words
        + Store queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
        + StoreQueryParameters<ReadOnlyKeyValueStore<K, ValueAndTimestamp<VR>>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.timestampedWindowStore());
        + ReadOnlyWindowStore<K, ValueAndTimestamp<VR>> localWindowStore = streams.store(storeQueryParams);
        +
        + K key = "some-word";
        + long fromTime = ...;
        + long toTime = ...;
        + WindowStoreIterator<ValueAndTimestamp<VR>> aggregateStore = localWindowStore.fetch(key, timeFrom, timeTo); // key must be local (application state is shared over all running Kafka Streams instances)
        + 
        + For non-local keys, a custom RPC mechanism must be implemented using KafkaStreams.metadataForAllStreamsClients() to + query the value of the key on a parallel running instance of your Kafka Streams application. +

        + For failure and recovery the store (which always will be of type TimestampedWindowStore -- regardless of what + is specified in the parameter materialized) will be backed by an internal changelog topic that will be created in Kafka. + Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and + cannot contain characters other than ASCII alphanumerics, '.', '_' and '-'. + The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is + user-specified in StreamsConfig via parameter + APPLICATION_ID_CONFIG, "storeName" is the + name of the store defined in Materialized, and "-changelog" is a fixed suffix. +

        + You can retrieve all generated internal topic names via Topology.describe().

        +
        +
        Type Parameters:
        +
        VOut - the value type of the resulting KTable
        +
        Parameters:
        +
        initializer - an Initializer that computes an initial intermediate aggregation result. Cannot be null.
        +
        aggregator - an Aggregator that computes a new aggregate result. Cannot be null.
        +
        named - a Named config used to name the processor in the topology. Cannot be null.
        +
        materialized - a Materialized config used to materialize a state store. Cannot be null.
        +
        Returns:
        +
        a windowed KTable that contains "update" records with unmodified keys, and values that represent + the latest (rolling) aggregate for each key within a window
        +
        +
        +
      • +
      • +
        +

        emitStrategy

        +
        TimeWindowedKStream<K,V> emitStrategy(EmitStrategy emitStrategy)
        +
        Configure when the aggregated result will be emitted for TimeWindowedKStream. +

        + For example, for EmitStrategy.onWindowClose() strategy, the aggregated result for a + window will only be emitted when the window closes. For EmitStrategy.onWindowUpdate() + strategy, the aggregated result for a window will be emitted whenever there is an update to + the window. Note that whether the result will be available in downstream also depends on + cache policy.

        +
        +
        Parameters:
        +
        emitStrategy - EmitStrategy to configure when the aggregated result for a window will be emitted.
        +
        Returns:
        +
        a TimeWindowedKStream with EmitStrategy configured.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/TimeWindowedSerializer.html b/static/41/javadoc/org/apache/kafka/streams/kstream/TimeWindowedSerializer.html new file mode 100644 index 000000000..c6c242f10 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/TimeWindowedSerializer.html @@ -0,0 +1,291 @@ + + + + +TimeWindowedSerializer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TimeWindowedSerializer<T>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.TimeWindowedSerializer<T>
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serializer<Windowed<T>>, org.apache.kafka.streams.kstream.internals.WindowedSerializer<T>
    +
    +
    +
    public class TimeWindowedSerializer<T> +extends Object +implements org.apache.kafka.streams.kstream.internals.WindowedSerializer<T>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        WINDOWED_INNER_SERIALIZER_CLASS

        +
        public static final String WINDOWED_INNER_SERIALIZER_CLASS
        +
        Default serializer for the inner serializer class of a windowed record. Must implement the Serde interface.
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TimeWindowedSerializer

        +
        public TimeWindowedSerializer()
        +
        +
      • +
      • +
        +

        TimeWindowedSerializer

        +
        public TimeWindowedSerializer(Serializer<T> inner)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        public void configure(Map<String,?> configs, + boolean isKey)
        +
        Description copied from interface: Serializer
        +
        Configure this class.
        +
        +
        Specified by:
        +
        configure in interface Serializer<T>
        +
        Parameters:
        +
        configs - configs in key/value pairs
        +
        isKey - whether the serializer is used for the key or the value
        +
        +
        +
      • +
      • +
        +

        serialize

        +
        public byte[] serialize(String topic, + Windowed<T> data)
        +
        Description copied from interface: Serializer
        +
        Convert data into a byte array. + +

        It is recommended to serialize null data to the null byte array.

        +
        +
        Specified by:
        +
        serialize in interface Serializer<T>
        +
        Parameters:
        +
        topic - topic associated with data
        +
        data - typed data; may be null
        +
        Returns:
        +
        serialized bytes; may be null
        +
        +
        +
      • +
      • +
        +

        close

        +
        public void close()
        +
        Description copied from interface: Serializer
        +
        Close this serializer. + +

        This method must be idempotent as it may be called multiple times.

        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        Specified by:
        +
        close in interface Serializer<T>
        +
        +
        +
      • +
      • +
        +

        serializeBaseKey

        +
        public byte[] serializeBaseKey(String topic, + Windowed<T> data)
        +
        +
        Specified by:
        +
        serializeBaseKey in interface org.apache.kafka.streams.kstream.internals.WindowedSerializer<T>
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/TimeWindows.html b/static/41/javadoc/org/apache/kafka/streams/kstream/TimeWindows.html new file mode 100644 index 000000000..1e677a20c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/TimeWindows.html @@ -0,0 +1,392 @@ + + + + +TimeWindows (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TimeWindows

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.Windows<org.apache.kafka.streams.kstream.internals.TimeWindow> +
    org.apache.kafka.streams.kstream.TimeWindows
    +
    +
    +
    +
    +
    public final class TimeWindows +extends Windows<org.apache.kafka.streams.kstream.internals.TimeWindow>
    +
    The fixed-size time-based window specifications used for aggregations. +

    + The semantics of time-based aggregation windows are: Every T1 (advance) milliseconds, compute the aggregate total for + T2 (size) milliseconds. +

      +
    • If advance < size a hopping windows is defined:
      + it discretize a stream into overlapping windows, which implies that a record maybe contained in one and or + more "adjacent" windows.
    • +
    • If advance == size a tumbling window is defined:
      + it discretize a stream into non-overlapping windows, which implies that a record is only ever contained in + one and only one tumbling window.
    • +
    + Thus, the specified TimeWindows are aligned to the epoch. + Aligned to the epoch means, that the first window starts at timestamp zero. + For example, hopping windows with size of 5000ms and advance of 3000ms, have window boundaries + [0;5000),[3000;8000),... and not [1000;6000),[4000;9000),... or even something "random" like [1452;6452),[4452;9452),... +

    + For time semantics, see TimestampExtractor.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Field Summary

      +
      Fields
      +
      +
      Modifier and Type
      +
      Field
      +
      Description
      +
      final long
      + +
      +
      The size of the window's advance interval in milliseconds, i.e., by how much a window moves forward relative to + the previous one.
      +
      +
      final long
      + +
      +
      The size of the windows in milliseconds.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Return a window definition with the original size, but advance ("hop") the window by the given interval, which + specifies by how much a window moves forward relative to the previous one.
      +
      +
      boolean
      + +
       
      +
      long
      + +
      +
      Return the window grace period (the time to admit + out-of-order events after the end of the window.) + + Delay is defined as (stream_time - record_timestamp).
      +
      +
      int
      + +
       
      + +
      ofSizeAndGrace(Duration size, + Duration afterWindowEnd)
      +
      +
      Return a window definition with the given window size, and with the advance interval being equal to the window + size.
      +
      + + +
      +
      Return a window definition with the given window size, and with the advance interval being equal to the window + size.
      +
      +
      long
      + +
      +
      Return the size of the specified windows in milliseconds.
      +
      + + +
       
      +
      Map<Long,org.apache.kafka.streams.kstream.internals.TimeWindow>
      +
      windowsFor(long timestamp)
      +
      +
      Create all windows that contain the provided timestamp, indexed by non-negative window start timestamps.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +getClass, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        sizeMs

        +
        public final long sizeMs
        +
        The size of the windows in milliseconds.
        +
        +
      • +
      • +
        +

        advanceMs

        +
        public final long advanceMs
        +
        The size of the window's advance interval in milliseconds, i.e., by how much a window moves forward relative to + the previous one.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        ofSizeWithNoGrace

        +
        public static TimeWindows ofSizeWithNoGrace(Duration size) + throws IllegalArgumentException
        +
        Return a window definition with the given window size, and with the advance interval being equal to the window + size. + The time interval represented by the N-th window is: [N * size, N * size + size). +

        + This provides the semantics of tumbling windows, which are fixed-sized, gap-less, non-overlapping windows. + Tumbling windows are a special case of hopping windows with advance == size. +

        + CAUTION: Using this method implicitly sets the grace period to zero, which means that any out-of-order + records arriving after the window ends are considered late and will be dropped.

        +
        +
        Parameters:
        +
        size - The size of the window
        +
        Returns:
        +
        a new window definition with default no grace period. Note that this means out-of-order records arriving after the window end will be dropped
        +
        Throws:
        +
        IllegalArgumentException - if the specified window size is zero or negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        ofSizeAndGrace

        +
        public static TimeWindows ofSizeAndGrace(Duration size, + Duration afterWindowEnd) + throws IllegalArgumentException
        +
        Return a window definition with the given window size, and with the advance interval being equal to the window + size. + The time interval represented by the N-th window is: [N * size, N * size + size). +

        + This provides the semantics of tumbling windows, which are fixed-sized, gap-less, non-overlapping windows. + Tumbling windows are a special case of hopping windows with advance == size. +

        + Using this method explicitly sets the grace period to the duration specified by afterWindowEnd, which + means that only out-of-order records arriving more than the grace period after the window end will be dropped. + The window close, after which any incoming records are considered late and will be rejected, is defined as + windowEnd + afterWindowEnd

        +
        +
        Parameters:
        +
        size - The size of the window. Must be larger than zero
        +
        afterWindowEnd - The grace period to admit out-of-order events to a window. Must be non-negative.
        +
        Returns:
        +
        a TimeWindows object with the specified size and the specified grace period
        +
        Throws:
        +
        IllegalArgumentException - if afterWindowEnd is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        advanceBy

        +
        public TimeWindows advanceBy(Duration advance)
        +
        Return a window definition with the original size, but advance ("hop") the window by the given interval, which + specifies by how much a window moves forward relative to the previous one. + The time interval represented by the N-th window is: [N * advance, N * advance + size). +

        + This provides the semantics of hopping windows, which are fixed-sized, overlapping windows.

        +
        +
        Parameters:
        +
        advance - The advance interval ("hop") of the window, with the requirement that 0 < advance.toMillis() <= sizeMs.
        +
        Returns:
        +
        a new window definition with default maintain duration of 1 day
        +
        Throws:
        +
        IllegalArgumentException - if the advance interval is negative, zero, or larger than the window size
        +
        +
        +
      • +
      • +
        +

        windowsFor

        +
        public Map<Long,org.apache.kafka.streams.kstream.internals.TimeWindow> windowsFor(long timestamp)
        +
        Description copied from class: Windows
        +
        Create all windows that contain the provided timestamp, indexed by non-negative window start timestamps.
        +
        +
        Specified by:
        +
        windowsFor in class Windows<org.apache.kafka.streams.kstream.internals.TimeWindow>
        +
        Parameters:
        +
        timestamp - the timestamp window should get created for
        +
        Returns:
        +
        a map of windowStartTimestamp -> Window entries
        +
        +
        +
      • +
      • +
        +

        size

        +
        public long size()
        +
        Description copied from class: Windows
        +
        Return the size of the specified windows in milliseconds.
        +
        +
        Specified by:
        +
        size in class Windows<org.apache.kafka.streams.kstream.internals.TimeWindow>
        +
        Returns:
        +
        the size of the specified windows
        +
        +
        +
      • +
      • +
        +

        gracePeriodMs

        +
        public long gracePeriodMs()
        +
        Description copied from class: Windows
        +
        Return the window grace period (the time to admit + out-of-order events after the end of the window.) + + Delay is defined as (stream_time - record_timestamp).
        +
        +
        Specified by:
        +
        gracePeriodMs in class Windows<org.apache.kafka.streams.kstream.internals.TimeWindow>
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Transformer.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Transformer.html new file mode 100644 index 000000000..051a5ca26 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Transformer.html @@ -0,0 +1,228 @@ + + + + +Transformer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Transformer<K,V,R>

    +
    +
    +
    +
    Type Parameters:
    +
    K - key type
    +
    V - value type
    +
    R - KeyValue return type (both key and value type can be set + arbitrarily)
    +
    +
    +
    @Deprecated +public interface Transformer<K,V,R>
    +
    Deprecated. +
    Since 4.0. Use api.Processor instead.
    +
    +
    The Transformer interface is for stateful mapping of an input record to zero, one, or multiple new output + records (both key and value type can be altered arbitrarily). + This is a stateful record-by-record operation, i.e, transform(Object, Object) is invoked individually for + each record of a stream and can access and modify a state that is available beyond a single call of + transform(Object, Object) (cf. KeyValueMapper for stateless record transformation). + Additionally, this Transformer can schedule + a method to be called periodically with the provided context. +

    + Use TransformerSupplier to provide new instances of Transformer to Kafka Stream's runtime. +

    + If only a record's value should be modified ValueTransformer can be used.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
      +
      Deprecated.
      +
      Close this transformer and clean up any resources.
      +
      +
      void
      + +
      +
      Deprecated.
      +
      Initialize this transformer.
      +
      + +
      transform(K key, + V value)
      +
      +
      Deprecated.
      +
      Transform the record with the given key and value.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/TransformerSupplier.html b/static/41/javadoc/org/apache/kafka/streams/kstream/TransformerSupplier.html new file mode 100644 index 000000000..48923fd35 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/TransformerSupplier.html @@ -0,0 +1,178 @@ + + + + +TransformerSupplier (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface TransformerSupplier<K,V,R>

    +
    +
    +
    +
    Type Parameters:
    +
    K - key type
    +
    V - value type
    +
    R - KeyValue return type (both key and value type can be set + arbitrarily)
    +
    +
    +
    All Superinterfaces:
    +
    ConnectedStoreProvider, Supplier<Transformer<K,V,R>>
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @Deprecated +@FunctionalInterface +public interface TransformerSupplier<K,V,R> +extends ConnectedStoreProvider, Supplier<Transformer<K,V,R>>
    +
    Deprecated. +
    Since 4.0. Use api.ProcessorSupplier instead.
    +
    +
    A TransformerSupplier interface which can create one or more Transformer instances. +

    + The supplier should always generate a new instance each time get() gets called. Creating + a single Transformer object and returning the same object reference in get() would be + a violation of the supplier pattern and leads to runtime exceptions.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      get()
      +
      +
      Deprecated.
      +
      Return a newly constructed Transformer instance.
      +
      +
      +
      +
      +
      +

      Methods inherited from interface org.apache.kafka.streams.processor.ConnectedStoreProvider

      +stores
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        get

        +
        Transformer<K,V,R> get()
        +
        Deprecated.
        +
        Return a newly constructed Transformer instance. + The supplier should always generate a new instance each time gets called. +

        + Creating a single Transformer object and returning the same object reference in get() + is a violation of the supplier pattern and leads to runtime exceptions.

        +
        +
        Specified by:
        +
        get in interface Supplier<K>
        +
        Returns:
        +
        a newly constructed Transformer instance
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/UnlimitedWindows.html b/static/41/javadoc/org/apache/kafka/streams/kstream/UnlimitedWindows.html new file mode 100644 index 000000000..c31bd93e9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/UnlimitedWindows.html @@ -0,0 +1,310 @@ + + + + +UnlimitedWindows (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UnlimitedWindows

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.Windows<org.apache.kafka.streams.kstream.internals.UnlimitedWindow> +
    org.apache.kafka.streams.kstream.UnlimitedWindows
    +
    +
    +
    +
    +
    public final class UnlimitedWindows +extends Windows<org.apache.kafka.streams.kstream.internals.UnlimitedWindow>
    +
    The unlimited window specifications used for aggregations. +

    + An unlimited time window is also called landmark window. + It has a fixed starting point while its window end is defined as infinite. + With this regard, it is a fixed-size window with infinite window size. +

    + For time semantics, see TimestampExtractor.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Field Summary

      +
      Fields
      +
      +
      Modifier and Type
      +
      Field
      +
      Description
      +
      final long
      + +
      +
      The start timestamp of the window.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      boolean
      + +
       
      +
      long
      + +
      +
      Return the window grace period (the time to admit + out-of-order events after the end of the window.) + + Delay is defined as (stream_time - record_timestamp).
      +
      +
      int
      + +
       
      + +
      of()
      +
      +
      Return an unlimited window starting at timestamp zero.
      +
      +
      long
      + +
      +
      Return the size of the specified windows in milliseconds.
      +
      + + +
      +
      Return a new unlimited window for the specified start timestamp.
      +
      + + +
       
      +
      Map<Long,org.apache.kafka.streams.kstream.internals.UnlimitedWindow>
      +
      windowsFor(long timestamp)
      +
      +
      Create all windows that contain the provided timestamp, indexed by non-negative window start timestamps.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +getClass, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        startMs

        +
        public final long startMs
        +
        The start timestamp of the window.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        of

        +
        public static UnlimitedWindows of()
        +
        Return an unlimited window starting at timestamp zero.
        +
        +
      • +
      • +
        +

        startOn

        +
        public UnlimitedWindows startOn(Instant start) + throws IllegalArgumentException
        +
        Return a new unlimited window for the specified start timestamp.
        +
        +
        Parameters:
        +
        start - the window start time
        +
        Returns:
        +
        a new unlimited window that starts at start
        +
        Throws:
        +
        IllegalArgumentException - if the start time is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        windowsFor

        +
        public Map<Long,org.apache.kafka.streams.kstream.internals.UnlimitedWindow> windowsFor(long timestamp)
        +
        Description copied from class: Windows
        +
        Create all windows that contain the provided timestamp, indexed by non-negative window start timestamps.
        +
        +
        Specified by:
        +
        windowsFor in class Windows<org.apache.kafka.streams.kstream.internals.UnlimitedWindow>
        +
        Parameters:
        +
        timestamp - the timestamp window should get created for
        +
        Returns:
        +
        a map of windowStartTimestamp -> Window entries
        +
        +
        +
      • +
      • +
        +

        size

        +
        public long size()
        +
        Return the size of the specified windows in milliseconds. + As unlimited windows have conceptually infinite size, this method just returns Long.MAX_VALUE.
        +
        +
        Specified by:
        +
        size in class Windows<org.apache.kafka.streams.kstream.internals.UnlimitedWindow>
        +
        Returns:
        +
        the size of the specified windows which is Long.MAX_VALUE
        +
        +
        +
      • +
      • +
        +

        gracePeriodMs

        +
        public long gracePeriodMs()
        +
        Description copied from class: Windows
        +
        Return the window grace period (the time to admit + out-of-order events after the end of the window.) + + Delay is defined as (stream_time - record_timestamp).
        +
        +
        Specified by:
        +
        gracePeriodMs in class Windows<org.apache.kafka.streams.kstream.internals.UnlimitedWindow>
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/ValueJoiner.html b/static/41/javadoc/org/apache/kafka/streams/kstream/ValueJoiner.html new file mode 100644 index 000000000..d0b4f6242 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/ValueJoiner.html @@ -0,0 +1,171 @@ + + + + +ValueJoiner (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ValueJoiner<V1,V2,VR>

    +
    +
    +
    +
    Type Parameters:
    +
    V1 - first value type
    +
    V2 - second value type
    +
    VR - joined value type
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface ValueJoiner<V1,V2,VR>
    +
    The ValueJoiner interface for joining two values into a new value of arbitrary type. + This is a stateless operation, i.e, apply(Object, Object) is invoked individually for each joining + record-pair of a KStream-KStream, KStream-KTable, or KTable-KTable + join.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      apply(V1 value1, + V2 value2)
      +
      +
      Return a joined value consisting of value1 and value2.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        apply

        +
        VR apply(V1 value1, + V2 value2)
        +
        Return a joined value consisting of value1 and value2.
        +
        +
        Parameters:
        +
        value1 - the first value for joining
        +
        value2 - the second value for joining
        +
        Returns:
        +
        the joined value
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/ValueJoinerWithKey.html b/static/41/javadoc/org/apache/kafka/streams/kstream/ValueJoinerWithKey.html new file mode 100644 index 000000000..83a186853 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/ValueJoinerWithKey.html @@ -0,0 +1,178 @@ + + + + +ValueJoinerWithKey (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ValueJoinerWithKey<K1,V1,V2,VR>

    +
    +
    +
    +
    Type Parameters:
    +
    K1 - key value type
    +
    V1 - first value type
    +
    V2 - second value type
    +
    VR - joined value type
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface ValueJoinerWithKey<K1,V1,V2,VR>
    +
    The ValueJoinerWithKey interface for joining two values into a new value of arbitrary type. + This interface provides access to a read-only key that the user should not modify as this would lead to + undefined behavior + This is a stateless operation, i.e, apply(Object, Object, Object) is invoked individually for each joining + record-pair of a KStream-KStream, KStream-KTable, or KTable-KTable + join.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      apply(K1 readOnlyKey, + V1 value1, + V2 value2)
      +
      +
      Return a joined value consisting of readOnlyKey, value1 and value2.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        apply

        +
        VR apply(K1 readOnlyKey, + V1 value1, + V2 value2)
        +
        Return a joined value consisting of readOnlyKey, value1 and value2.
        +
        +
        Parameters:
        +
        readOnlyKey - the key
        +
        value1 - the first value for joining
        +
        value2 - the second value for joining
        +
        Returns:
        +
        the joined value
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/ValueMapper.html b/static/41/javadoc/org/apache/kafka/streams/kstream/ValueMapper.html new file mode 100644 index 000000000..b6162f2e0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/ValueMapper.html @@ -0,0 +1,164 @@ + + + + +ValueMapper (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ValueMapper<V,VR>

    +
    +
    +
    +
    Type Parameters:
    +
    V - value type
    +
    VR - mapped value type
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface ValueMapper<V,VR>
    +
    The ValueMapper interface for mapping a value to a new value of arbitrary type. + This is a stateless record-by-record operation, i.e, apply(Object) is invoked individually for each record + of a stream (cf. FixedKeyProcessor for stateful value transformation). + If ValueMapper is applied to a Record the record's + key is preserved. + If a record's key and value should be modified KeyValueMapper can be used.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      apply(V value)
      +
      +
      Map the given value to a new value.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        apply

        +
        VR apply(V value)
        +
        Map the given value to a new value.
        +
        +
        Parameters:
        +
        value - the value to be mapped
        +
        Returns:
        +
        the new value
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/ValueMapperWithKey.html b/static/41/javadoc/org/apache/kafka/streams/kstream/ValueMapperWithKey.html new file mode 100644 index 000000000..d265f4245 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/ValueMapperWithKey.html @@ -0,0 +1,169 @@ + + + + +ValueMapperWithKey (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ValueMapperWithKey<K,V,VR>

    +
    +
    +
    +
    Type Parameters:
    +
    K - key type
    +
    V - value type
    +
    VR - mapped value type
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface ValueMapperWithKey<K,V,VR>
    +
    The ValueMapperWithKey interface for mapping a value to a new value of arbitrary type. + This is a stateless record-by-record operation, i.e, apply(Object, Object) is invoked individually for each + record of a stream (cf. FixedKeyProcessor for stateful value transformation). + If ValueMapperWithKey is applied to a Record the + record's key is preserved. + Note that the key is read-only and should not be modified, as this can lead to corrupt partitioning. + If a record's key and value should be modified KeyValueMapper can be used.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      apply(K readOnlyKey, + V value)
      +
      +
      Map the given [key and ]value to a new value.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        apply

        +
        VR apply(K readOnlyKey, + V value)
        +
        Map the given [key and ]value to a new value.
        +
        +
        Parameters:
        +
        readOnlyKey - the read-only key
        +
        value - the value to be mapped
        +
        Returns:
        +
        the new value
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/ValueTransformer.html b/static/41/javadoc/org/apache/kafka/streams/kstream/ValueTransformer.html new file mode 100644 index 000000000..f32fc462e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/ValueTransformer.html @@ -0,0 +1,226 @@ + + + + +ValueTransformer (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ValueTransformer<V,VR>

    +
    +
    +
    +
    Type Parameters:
    +
    V - value type
    +
    VR - transformed value type
    +
    +
    +
    @Deprecated +public interface ValueTransformer<V,VR>
    +
    Deprecated. +
    Since 4.0. Use FixedKeyProcessor instead.
    +
    +
    The ValueTransformer interface for stateful mapping of a value to a new value (with possible new type). + This is a stateful record-by-record operation, i.e, transform(Object) is invoked individually for each + record of a stream and can access and modify a state that is available beyond a single call of + transform(Object) (cf. ValueMapper for stateless value transformation). + Additionally, this ValueTransformer can schedule + a method to be called periodically with the provided context. + If ValueTransformer is applied to a KeyValue pair record the record's key is preserved. +

    + Use ValueTransformerSupplier to provide new instances of ValueTransformer to Kafka Stream's runtime. +

    + If a record's key and value should be modified Transformer can be used.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
      +
      Deprecated.
      +
      Close this transformer and clean up any resources.
      +
      +
      void
      + +
      +
      Deprecated.
      +
      Initialize this transformer.
      +
      + +
      transform(V value)
      +
      +
      Deprecated.
      +
      Transform the given value to a new value.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/ValueTransformerSupplier.html b/static/41/javadoc/org/apache/kafka/streams/kstream/ValueTransformerSupplier.html new file mode 100644 index 000000000..1bb3545d7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/ValueTransformerSupplier.html @@ -0,0 +1,176 @@ + + + + +ValueTransformerSupplier (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ValueTransformerSupplier<V,VR>

    +
    +
    +
    +
    Type Parameters:
    +
    V - value type
    +
    VR - transformed value type
    +
    +
    +
    All Superinterfaces:
    +
    ConnectedStoreProvider
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @Deprecated +@FunctionalInterface +public interface ValueTransformerSupplier<V,VR> +extends ConnectedStoreProvider
    +
    Deprecated. +
    Since 4.0. Use FixedKeyProcessorSupplier instead.
    +
    +
    A ValueTransformerSupplier interface which can create one or more ValueTransformer instances. +

    + The supplier should always generate a new instance each time get() gets called. Creating + a single ValueTransformer object and returning the same object reference in get() would be + a violation of the supplier pattern and leads to runtime exceptions.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      get()
      +
      +
      Deprecated.
      +
      Return a newly constructed ValueTransformer instance.
      +
      +
      +
      +
      +
      +

      Methods inherited from interface org.apache.kafka.streams.processor.ConnectedStoreProvider

      +stores
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        get

        + +
        Deprecated.
        +
        Return a newly constructed ValueTransformer instance. + The supplier should always generate a new instance each time get() gets called. +

        + Creating a single ValueTransformer object and returning the same object reference in get() + is a violation of the supplier pattern and leads to runtime exceptions.

        +
        +
        Returns:
        +
        a newly constructed ValueTransformer instance
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/ValueTransformerWithKey.html b/static/41/javadoc/org/apache/kafka/streams/kstream/ValueTransformerWithKey.html new file mode 100644 index 000000000..af6367fc5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/ValueTransformerWithKey.html @@ -0,0 +1,227 @@ + + + + +ValueTransformerWithKey (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ValueTransformerWithKey<K,V,VR>

    +
    +
    +
    +
    Type Parameters:
    +
    K - key type
    +
    V - value type
    +
    VR - transformed value type
    +
    +
    +
    public interface ValueTransformerWithKey<K,V,VR>
    +
    The ValueTransformerWithKey interface for stateful mapping of a value to a new value (with possible new type). + This is a stateful record-by-record operation, i.e, transform(Object, Object) is invoked individually for each + record of a stream and can access and modify a state that is available beyond a single call of + transform(Object, Object) (cf. ValueMapper for stateless value transformation). + Additionally, this ValueTransformerWithKey can + schedule a method to be + called periodically with the provided context. + Note that the key is read-only and should not be modified, as this can lead to corrupt partitioning. + If ValueTransformerWithKey is applied to a KeyValue pair record the record's key is preserved. +

    + Use ValueTransformerWithKeySupplier to provide new instances of ValueTransformerWithKey to + Kafka Stream's runtime. +

    + If a record's key and value should be modified Transformer can be used.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
      +
      Close this processor and clean up any resources.
      +
      +
      void
      + +
      +
      Initialize this transformer.
      +
      + +
      transform(K readOnlyKey, + V value)
      +
      +
      Transform the given [key and] value to a new value.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/ValueTransformerWithKeySupplier.html b/static/41/javadoc/org/apache/kafka/streams/kstream/ValueTransformerWithKeySupplier.html new file mode 100644 index 000000000..d01e25155 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/ValueTransformerWithKeySupplier.html @@ -0,0 +1,172 @@ + + + + +ValueTransformerWithKeySupplier (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ValueTransformerWithKeySupplier<K,V,VR>

    +
    +
    +
    +
    Type Parameters:
    +
    K - key type
    +
    V - value type
    +
    VR - transformed value type
    +
    +
    +
    All Superinterfaces:
    +
    ConnectedStoreProvider, Supplier<ValueTransformerWithKey<K,V,VR>>
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface ValueTransformerWithKeySupplier<K,V,VR> +extends ConnectedStoreProvider, Supplier<ValueTransformerWithKey<K,V,VR>>
    +
    A ValueTransformerWithKeySupplier interface which can create one or more ValueTransformerWithKey instances. +

    + The supplier should always generate a new instance each time get() gets called. Creating + a single ValueTransformerWithKey object and returning the same object reference in get() would be + a violation of the supplier pattern and leads to runtime exceptions.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Window.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Window.html new file mode 100644 index 000000000..cad571fcf --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Window.html @@ -0,0 +1,303 @@ + + + + +Window (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Window

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.Window
    +
    +
    +
    +
    public abstract class Window +extends Object
    +
    A single window instance, defined by its start and end timestamp. + Window is agnostic if start/end boundaries are inclusive or exclusive; this is defined by concrete + window implementations. +

    + To specify how Window boundaries are defined use Windows. + For time semantics, see TimestampExtractor.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      +
      Window(long startMs, + long endMs)
      +
      +
      Create a new window for the given start and end time.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      long
      +
      end()
      +
      +
      Return the end timestamp of this window.
      +
      + + +
      +
      Return the end time of this window.
      +
      +
      boolean
      + +
       
      +
      int
      + +
       
      +
      abstract boolean
      +
      overlap(Window other)
      +
      +
      Check if the given window overlaps with this window.
      +
      +
      long
      + +
      +
      Return the start timestamp of this window.
      +
      + + +
      +
      Return the start time of this window.
      +
      + + +
       
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +getClass, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Window

        +
        public Window(long startMs, + long endMs) + throws IllegalArgumentException
        +
        Create a new window for the given start and end time.
        +
        +
        Parameters:
        +
        startMs - the start timestamp of the window
        +
        endMs - the end timestamp of the window
        +
        Throws:
        +
        IllegalArgumentException - if startMs is negative or if endMs is smaller than startMs
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        start

        +
        public long start()
        +
        Return the start timestamp of this window.
        +
        +
        Returns:
        +
        The start timestamp of this window.
        +
        +
        +
      • +
      • +
        +

        end

        +
        public long end()
        +
        Return the end timestamp of this window.
        +
        +
        Returns:
        +
        The end timestamp of this window.
        +
        +
        +
      • +
      • +
        +

        startTime

        +
        public Instant startTime()
        +
        Return the start time of this window.
        +
        +
        Returns:
        +
        The start time of this window.
        +
        +
        +
      • +
      • +
        +

        endTime

        +
        public Instant endTime()
        +
        Return the end time of this window.
        +
        +
        Returns:
        +
        The end time of this window.
        +
        +
        +
      • +
      • +
        +

        overlap

        +
        public abstract boolean overlap(Window other)
        +
        Check if the given window overlaps with this window. + Should throw an IllegalArgumentException if the other window has a different type than + this window.
        +
        +
        Parameters:
        +
        other - another window of the same type
        +
        Returns:
        +
        true if other overlaps with this window—false otherwise
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object obj)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Windowed.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Windowed.html new file mode 100644 index 000000000..5d9d50556 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Windowed.html @@ -0,0 +1,241 @@ + + + + +Windowed (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Windowed<K>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.Windowed<K>
    +
    +
    +
    +
    Type Parameters:
    +
    K - type of the key
    +
    +
    +
    public class Windowed<K> +extends Object
    +
    The result key type of a windowed stream aggregation. +

    + If a KStream gets grouped and aggregated using a window-aggregation the resulting KTable is a + so-called "windowed KTable" with a combined key type that encodes the corresponding aggregation window and + the original record key. + Thus, a windowed KTable has type <Windowed<K>,V>.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      +
      Windowed(K key, + Window window)
      +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      boolean
      + +
       
      +
      int
      + +
       
      + +
      key()
      +
      +
      Return the key of the window.
      +
      + + +
       
      + + +
      +
      Return the window containing the values associated with this key.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +getClass, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Windowed

        +
        public Windowed(K key, + Window window)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        key

        +
        public K key()
        +
        Return the key of the window.
        +
        +
        Returns:
        +
        the key of the window
        +
        +
        +
      • +
      • +
        +

        window

        +
        public Window window()
        +
        Return the window containing the values associated with this key.
        +
        +
        Returns:
        +
        the window containing the values
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object obj)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/WindowedSerdes.SessionWindowedSerde.html b/static/41/javadoc/org/apache/kafka/streams/kstream/WindowedSerdes.SessionWindowedSerde.html new file mode 100644 index 000000000..0ee924a9e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/WindowedSerdes.SessionWindowedSerde.html @@ -0,0 +1,148 @@ + + + + +WindowedSerdes.SessionWindowedSerde (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class WindowedSerdes.SessionWindowedSerde<T>

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.Serdes.WrapperSerde<Windowed<T>> +
    org.apache.kafka.streams.kstream.WindowedSerdes.SessionWindowedSerde<T>
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serde<Windowed<T>>
    +
    +
    +
    Enclosing class:
    +
    WindowedSerdes
    +
    +
    +
    public static class WindowedSerdes.SessionWindowedSerde<T> +extends Serdes.WrapperSerde<Windowed<T>>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        SessionWindowedSerde

        +
        public SessionWindowedSerde()
        +
        +
      • +
      • +
        +

        SessionWindowedSerde

        +
        public SessionWindowedSerde(Serde<T> inner)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/WindowedSerdes.TimeWindowedSerde.html b/static/41/javadoc/org/apache/kafka/streams/kstream/WindowedSerdes.TimeWindowedSerde.html new file mode 100644 index 000000000..9895a146e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/WindowedSerdes.TimeWindowedSerde.html @@ -0,0 +1,181 @@ + + + + +WindowedSerdes.TimeWindowedSerde (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class WindowedSerdes.TimeWindowedSerde<T>

    +
    +
    java.lang.Object +
    org.apache.kafka.common.serialization.Serdes.WrapperSerde<Windowed<T>> +
    org.apache.kafka.streams.kstream.WindowedSerdes.TimeWindowedSerde<T>
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Closeable, AutoCloseable, Serde<Windowed<T>>
    +
    +
    +
    Enclosing class:
    +
    WindowedSerdes
    +
    +
    +
    public static class WindowedSerdes.TimeWindowedSerde<T> +extends Serdes.WrapperSerde<Windowed<T>>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TimeWindowedSerde

        +
        public TimeWindowedSerde()
        +
        +
      • +
      • +
        +

        TimeWindowedSerde

        +
        public TimeWindowedSerde(Serde<T> inner, + long windowSize)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/WindowedSerdes.html b/static/41/javadoc/org/apache/kafka/streams/kstream/WindowedSerdes.html new file mode 100644 index 000000000..60806e395 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/WindowedSerdes.html @@ -0,0 +1,195 @@ + + + + +WindowedSerdes (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class WindowedSerdes

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.WindowedSerdes
    +
    +
    +
    +
    public class WindowedSerdes +extends Object
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        WindowedSerdes

        +
        public WindowedSerdes()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        timeWindowedSerdeFrom

        +
        public static <T> Serde<Windowed<T>> timeWindowedSerdeFrom(Class<T> type, + long windowSize)
        +
        Construct a TimeWindowedSerde object to deserialize changelog topic + for the specified inner class type and window size.
        +
        +
      • +
      • +
        +

        sessionWindowedSerdeFrom

        +
        public static <T> Serde<Windowed<T>> sessionWindowedSerdeFrom(Class<T> type)
        +
        Construct a SessionWindowedSerde object for the specified inner class type.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/Windows.html b/static/41/javadoc/org/apache/kafka/streams/kstream/Windows.html new file mode 100644 index 000000000..605967e28 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/Windows.html @@ -0,0 +1,201 @@ + + + + +Windows (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Windows<W extends Window>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.kstream.Windows<W>
    +
    +
    +
    +
    Type Parameters:
    +
    W - type of the window instance
    +
    +
    +
    Direct Known Subclasses:
    +
    JoinWindows, TimeWindows, UnlimitedWindows
    +
    +
    +
    public abstract class Windows<W extends Window> +extends Object
    +
    The window specification for fixed size windows that is used to define window boundaries and grace period. +

    + Grace period defines how long to wait on out-of-order events. That is, windows will continue to accept new records until stream_time >= window_end + grace_period. + Records that arrive after the grace period passed are considered late and will not be processed but are dropped. +

    + Warning: It may be unsafe to use objects of this class in set- or map-like collections, + since the equals and hashCode methods depend on mutable fields.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      abstract long
      + +
      +
      Return the window grace period (the time to admit + out-of-order events after the end of the window.) + + Delay is defined as (stream_time - record_timestamp).
      +
      +
      abstract long
      + +
      +
      Return the size of the specified windows in milliseconds.
      +
      +
      abstract Map<Long,W>
      +
      windowsFor(long timestamp)
      +
      +
      Create all windows that contain the provided timestamp, indexed by non-negative window start timestamps.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        windowsFor

        +
        public abstract Map<Long,W> windowsFor(long timestamp)
        +
        Create all windows that contain the provided timestamp, indexed by non-negative window start timestamps.
        +
        +
        Parameters:
        +
        timestamp - the timestamp window should get created for
        +
        Returns:
        +
        a map of windowStartTimestamp -> Window entries
        +
        +
        +
      • +
      • +
        +

        size

        +
        public abstract long size()
        +
        Return the size of the specified windows in milliseconds.
        +
        +
        Returns:
        +
        the size of the specified windows
        +
        +
        +
      • +
      • +
        +

        gracePeriodMs

        +
        public abstract long gracePeriodMs()
        +
        Return the window grace period (the time to admit + out-of-order events after the end of the window.) + + Delay is defined as (stream_time - record_timestamp).
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/package-summary.html b/static/41/javadoc/org/apache/kafka/streams/kstream/package-summary.html new file mode 100644 index 000000000..df3c174b4 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/package-summary.html @@ -0,0 +1,364 @@ + + + + +org.apache.kafka.streams.kstream (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.streams.kstream

    +
    +
    +
    package org.apache.kafka.streams.kstream
    +
    +
    Provides a high-level programming model (DSL) to express a (stateful) data flow computation over input streams and tables. + Use StreamsBuilder as entry for your program.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/kstream/package-tree.html b/static/41/javadoc/org/apache/kafka/streams/kstream/package-tree.html new file mode 100644 index 000000000..0e42c5df9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/kstream/package-tree.html @@ -0,0 +1,174 @@ + + + + +org.apache.kafka.streams.kstream Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.streams.kstream

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/package-summary.html b/static/41/javadoc/org/apache/kafka/streams/package-summary.html new file mode 100644 index 000000000..beedcb720 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/package-summary.html @@ -0,0 +1,253 @@ + + + + +org.apache.kafka.streams (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.streams

    +
    +
    +
    package org.apache.kafka.streams
    +
    +
    Provides the Kafka Streams library for building streaming data applications.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/package-tree.html b/static/41/javadoc/org/apache/kafka/streams/package-tree.html new file mode 100644 index 000000000..590b6bcb7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/package-tree.html @@ -0,0 +1,128 @@ + + + + +org.apache.kafka.streams Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.streams

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/BatchingStateRestoreCallback.html b/static/41/javadoc/org/apache/kafka/streams/processor/BatchingStateRestoreCallback.html new file mode 100644 index 000000000..e888fa5df --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/BatchingStateRestoreCallback.html @@ -0,0 +1,155 @@ + + + + +BatchingStateRestoreCallback (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface BatchingStateRestoreCallback

    +
    +
    +
    +
    All Superinterfaces:
    +
    StateRestoreCallback
    +
    +
    +
    public interface BatchingStateRestoreCallback +extends StateRestoreCallback
    +
    Interface for batching restoration of a StateStore + + It is expected that implementations of this class will not call the StateRestoreCallback.restore(byte[], byte[]) method.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      default void
      +
      restore(byte[] key, + byte[] value)
      +
       
      +
      void
      +
      restoreAll(Collection<KeyValue<byte[],byte[]>> records)
      +
      +
      Called to restore a number of records.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        restoreAll

        +
        void restoreAll(Collection<KeyValue<byte[],byte[]>> records)
        +
        Called to restore a number of records. This method is called repeatedly until the StateStore is fully + restored.
        +
        +
        Parameters:
        +
        records - the records to restore.
        +
        +
        +
      • +
      • +
        +

        restore

        +
        default void restore(byte[] key, + byte[] value)
        +
        +
        Specified by:
        +
        restore in interface StateRestoreCallback
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/Cancellable.html b/static/41/javadoc/org/apache/kafka/streams/processor/Cancellable.html new file mode 100644 index 000000000..6b0a00766 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/Cancellable.html @@ -0,0 +1,141 @@ + + + + +Cancellable (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Cancellable

    +
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface Cancellable
    + +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
      +
      Cancel the scheduled operation to avoid future calls.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        cancel

        +
        void cancel()
        +
        Cancel the scheduled operation to avoid future calls.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/CommitCallback.html b/static/41/javadoc/org/apache/kafka/streams/processor/CommitCallback.html new file mode 100644 index 000000000..fac5524e5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/CommitCallback.html @@ -0,0 +1,136 @@ + + + + +CommitCallback (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface CommitCallback

    +
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @Evolving +@FunctionalInterface +public interface CommitCallback
    +
    Stores can register this callback to be notified upon successful commit.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
       
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/ConnectedStoreProvider.html b/static/41/javadoc/org/apache/kafka/streams/processor/ConnectedStoreProvider.html new file mode 100644 index 000000000..f357febf4 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/ConnectedStoreProvider.html @@ -0,0 +1,206 @@ + + + + +ConnectedStoreProvider (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ConnectedStoreProvider

    +
    +
    +
    +
    All Known Subinterfaces:
    +
    FixedKeyProcessorSupplier<KIn,VIn,VOut>, ProcessorSupplier<KIn,VIn,KOut,VOut>, TransformerSupplier<K,V,R>, ValueTransformerSupplier<V,VR>, ValueTransformerWithKeySupplier<K,V,VR>, WrappedFixedKeyProcessorSupplier<KIn,VIn,VOut>, WrappedProcessorSupplier<KIn,VIn,KOut,VOut>
    +
    +
    +
    public interface ConnectedStoreProvider
    +
    Provides a set of StoreBuilders that will be automatically added to the topology and connected to the + associated processor. +

    + Implementing this interface is recommended when the associated processor wants to encapsulate its usage of its state + stores, rather than exposing them to the user building the topology. +

    + In the event that separate but related processors may want to share the same store, different ConnectedStoreProviders + may provide the same instance of StoreBuilder, as shown below. +

    
    + class StateSharingProcessors {
    +     StoreBuilder<KeyValueStore<String, String>> storeBuilder =
    +         Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myStore"), Serdes.String(), Serdes.String());
    +
    +     class SupplierA implements ProcessorSupplier<String, Integer> {
    +         Processor<String, Integer> get() {
    +             return new Processor() {
    +                 private StateStore store;
    +
    +                 void init(ProcessorContext context) {
    +                     this.store = context.getStateStore("myStore");
    +                 }
    +
    +                 void process(String key, Integer value) {
    +                     // can access this.store
    +                 }
    +
    +                 void close() {
    +                     // can access this.store
    +                 }
    +             }
    +         }
    +
    +         Set<StoreBuilder<?>> stores() {
    +             return Collections.singleton(storeBuilder);
    +         }
    +     }
    +
    +     class SupplierB implements ProcessorSupplier<String, String> {
    +         Processor<String, String> get() {
    +             return new Processor() {
    +                 private StateStore store;
    +
    +                 void init(ProcessorContext context) {
    +                     this.store = context.getStateStore("myStore");
    +                 }
    +
    +                 void process(String key, String value) {
    +                     // can access this.store
    +                 }
    +
    +                 void close() {
    +                     // can access this.store
    +                 }
    +             }
    +         }
    +
    +         Set<StoreBuilder<?>> stores() {
    +             return Collections.singleton(storeBuilder);
    +         }
    +     }
    + }
    + 
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      default Set<StoreBuilder<?>>
      + +
       
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        stores

        +
        default Set<StoreBuilder<?>> stores()
        +
        +
        Returns:
        +
        the state stores to be connected and added, or null if no stores should be automatically connected and added.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/FailOnInvalidTimestamp.html b/static/41/javadoc/org/apache/kafka/streams/processor/FailOnInvalidTimestamp.html new file mode 100644 index 000000000..dfbf74ff9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/FailOnInvalidTimestamp.html @@ -0,0 +1,230 @@ + + + + +FailOnInvalidTimestamp (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class FailOnInvalidTimestamp

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.FailOnInvalidTimestamp
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    TimestampExtractor
    +
    +
    +
    public class FailOnInvalidTimestamp +extends Object
    +
    Retrieves embedded metadata timestamps from Kafka messages. + If a record has a negative (invalid) timestamp value, this extractor raises an exception. +

    + Embedded metadata timestamp was introduced in "KIP-32: Add timestamps to Kafka message" for the new + 0.10+ Kafka message format. +

    + Here, "embedded metadata" refers to the fact that compatible Kafka producer clients automatically and + transparently embed such timestamps into message metadata they send to Kafka, which can then be retrieved + via this timestamp extractor. +

    + If the embedded metadata timestamp represents CreateTime (cf. Kafka broker setting + message.timestamp.type and Kafka topic setting log.message.timestamp.type), + this extractor effectively provides event-time semantics. + If LogAppendTime is used as broker/topic setting to define the embedded metadata timestamps, + using this extractor effectively provides ingestion-time semantics. +

    + If you need processing-time semantics, use WallclockTimestampExtractor.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        FailOnInvalidTimestamp

        +
        public FailOnInvalidTimestamp()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        onInvalidTimestamp

        +
        public long onInvalidTimestamp(ConsumerRecord<Object,Object> record, + long recordTimestamp, + long partitionTime) + throws StreamsException
        +
        Raises an exception on every call.
        +
        +
        Parameters:
        +
        record - a data record
        +
        recordTimestamp - the timestamp extractor from the record
        +
        partitionTime - the highest extracted valid timestamp of the current record's partition˙ (could be -1 if unknown)
        +
        Returns:
        +
        nothing; always raises an exception
        +
        Throws:
        +
        StreamsException - on every invocation
        +
        +
        +
      • +
      • +
        +

        extract

        +
        public long extract(ConsumerRecord<Object,Object> record, + long partitionTime)
        +
        Extracts the embedded metadata timestamp from the given ConsumerRecord.
        +
        +
        Specified by:
        +
        extract in interface TimestampExtractor
        +
        Parameters:
        +
        record - a data record
        +
        partitionTime - the highest extracted valid timestamp of the current record's partition˙ (could be -1 if unknown)
        +
        Returns:
        +
        the embedded metadata timestamp of the given ConsumerRecord
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/LogAndSkipOnInvalidTimestamp.html b/static/41/javadoc/org/apache/kafka/streams/processor/LogAndSkipOnInvalidTimestamp.html new file mode 100644 index 000000000..f17d17548 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/LogAndSkipOnInvalidTimestamp.html @@ -0,0 +1,231 @@ + + + + +LogAndSkipOnInvalidTimestamp (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class LogAndSkipOnInvalidTimestamp

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    TimestampExtractor
    +
    +
    +
    public class LogAndSkipOnInvalidTimestamp +extends Object
    +
    Retrieves embedded metadata timestamps from Kafka messages. + If a record has a negative (invalid) timestamp value the timestamp is returned as-is; + in addition, a WARN message is logged in your application. + Returning the timestamp as-is results in dropping the record, i.e., the record will not be processed. +

    + Embedded metadata timestamp was introduced in "KIP-32: Add timestamps to Kafka message" for the new + 0.10+ Kafka message format. +

    + Here, "embedded metadata" refers to the fact that compatible Kafka producer clients automatically and + transparently embed such timestamps into message metadata they send to Kafka, which can then be retrieved + via this timestamp extractor. +

    + If the embedded metadata timestamp represents CreateTime (cf. Kafka broker setting + message.timestamp.type and Kafka topic setting log.message.timestamp.type), + this extractor effectively provides event-time semantics. + If LogAppendTime is used as broker/topic setting to define the embedded metadata timestamps, + using this extractor effectively provides ingestion-time semantics. +

    + If you need processing-time semantics, use WallclockTimestampExtractor.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        LogAndSkipOnInvalidTimestamp

        +
        public LogAndSkipOnInvalidTimestamp()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        onInvalidTimestamp

        +
        public long onInvalidTimestamp(ConsumerRecord<Object,Object> record, + long recordTimestamp, + long partitionTime)
        +
        Writes a log WARN message when the extracted timestamp is invalid (negative) but returns the invalid timestamp as-is, + which ultimately causes the record to be skipped and not to be processed.
        +
        +
        Parameters:
        +
        record - a data record
        +
        recordTimestamp - the timestamp extractor from the record
        +
        partitionTime - the highest extracted valid timestamp of the current record's partition˙ (could be -1 if unknown)
        +
        Returns:
        +
        the originally extracted timestamp of the record
        +
        +
        +
      • +
      • +
        +

        extract

        +
        public long extract(ConsumerRecord<Object,Object> record, + long partitionTime)
        +
        Extracts the embedded metadata timestamp from the given ConsumerRecord.
        +
        +
        Specified by:
        +
        extract in interface TimestampExtractor
        +
        Parameters:
        +
        record - a data record
        +
        partitionTime - the highest extracted valid timestamp of the current record's partition˙ (could be -1 if unknown)
        +
        Returns:
        +
        the embedded metadata timestamp of the given ConsumerRecord
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/MockProcessorContext.CapturedForward.html b/static/41/javadoc/org/apache/kafka/streams/processor/MockProcessorContext.CapturedForward.html new file mode 100644 index 000000000..d290731c1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/MockProcessorContext.CapturedForward.html @@ -0,0 +1,196 @@ + + + + +MockProcessorContext.CapturedForward (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class MockProcessorContext.CapturedForward

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.MockProcessorContext.CapturedForward
    +
    +
    +
    +
    Enclosing class:
    +
    MockProcessorContext
    +
    +
    +
    public static class MockProcessorContext.CapturedForward +extends Object
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        childName

        +
        public String childName()
        +
        The child this data was forwarded to.
        +
        +
        Returns:
        +
        The child name, or null if it was broadcast.
        +
        +
        +
      • +
      • +
        +

        timestamp

        +
        public long timestamp()
        +
        The timestamp attached to the forwarded record.
        +
        +
        Returns:
        +
        A timestamp, or -1 if none was forwarded.
        +
        +
        +
      • +
      • +
        +

        keyValue

        +
        public KeyValue keyValue()
        +
        The data forwarded.
        +
        +
        Returns:
        +
        A key/value pair. Not null.
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        headers

        +
        public Headers headers()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/MockProcessorContext.CapturedPunctuator.html b/static/41/javadoc/org/apache/kafka/streams/processor/MockProcessorContext.CapturedPunctuator.html new file mode 100644 index 000000000..9251ae98a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/MockProcessorContext.CapturedPunctuator.html @@ -0,0 +1,172 @@ + + + + +MockProcessorContext.CapturedPunctuator (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class MockProcessorContext.CapturedPunctuator

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.MockProcessorContext.CapturedPunctuator
    +
    +
    +
    +
    Enclosing class:
    +
    MockProcessorContext
    +
    +
    +
    public static class MockProcessorContext.CapturedPunctuator +extends Object
    +
    MockProcessorContext.CapturedPunctuator holds captured punctuators, along with their scheduling information.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        getIntervalMs

        +
        public long getIntervalMs()
        +
        +
      • +
      • +
        +

        getType

        +
        public PunctuationType getType()
        +
        +
      • +
      • +
        +

        getPunctuator

        +
        public Punctuator getPunctuator()
        +
        +
      • +
      • +
        +

        cancel

        +
        public void cancel()
        +
        +
      • +
      • +
        +

        cancelled

        +
        public boolean cancelled()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/MockProcessorContext.html b/static/41/javadoc/org/apache/kafka/streams/processor/MockProcessorContext.html new file mode 100644 index 000000000..9b12bb2d3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/MockProcessorContext.html @@ -0,0 +1,1094 @@ + + + + +MockProcessorContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class MockProcessorContext

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.MockProcessorContext
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    org.apache.kafka.streams.processor.internals.RecordCollector.Supplier, ProcessorContext
    +
    +
    +
    @Deprecated +public class MockProcessorContext +extends Object +implements ProcessorContext, org.apache.kafka.streams.processor.internals.RecordCollector.Supplier
    +
    Deprecated. +
    Since 4.0. Use MockProcessorContext instead.
    +
    +
    MockProcessorContext is a mock of ProcessorContext for users to test their + ValueTransformer implementations. +

    + The tests for this class (org.apache.kafka.streams.MockProcessorContextTest) include several behavioral + tests that serve as example usage. +

    + Note that this class does not take any automated actions (such as firing scheduled punctuators). + It simply captures any data it witnesses.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        MockProcessorContext

        +
        public MockProcessorContext()
        +
        Deprecated.
        +
        Create a MockProcessorContext with dummy config and taskId and null stateDir. + Most unit tests using this mock won't need to know the taskId, + and most unit tests should be able to get by with the + InMemoryKeyValueStore, so the stateDir won't matter.
        +
        +
      • +
      • +
        +

        MockProcessorContext

        +
        public MockProcessorContext(Properties config)
        +
        Deprecated.
        +
        Create a MockProcessorContext with dummy taskId and null stateDir. + Most unit tests using this mock won't need to know the taskId, + and most unit tests should be able to get by with the + InMemoryKeyValueStore, so the stateDir won't matter.
        +
        +
        Parameters:
        +
        config - a Properties object, used to configure the context and the processor.
        +
        +
        +
      • +
      • +
        +

        MockProcessorContext

        +
        public MockProcessorContext(Properties config, + TaskId taskId, + File stateDir)
        +
        Deprecated.
        +
        Create a MockProcessorContext with a specified taskId and null stateDir.
        +
        +
        Parameters:
        +
        config - a Properties object, used to configure the context and the processor.
        +
        taskId - a TaskId, which the context makes available via taskId().
        +
        stateDir - a File, which the context makes available viw stateDir().
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        applicationId

        +
        public String applicationId()
        +
        Deprecated.
        +
        Description copied from interface: ProcessorContext
        +
        Return the application id.
        +
        +
        Specified by:
        +
        applicationId in interface ProcessorContext
        +
        Returns:
        +
        the application id
        +
        +
        +
      • +
      • +
        +

        taskId

        +
        public TaskId taskId()
        +
        Deprecated.
        +
        Description copied from interface: ProcessorContext
        +
        Return the task id.
        +
        +
        Specified by:
        +
        taskId in interface ProcessorContext
        +
        Returns:
        +
        the task id
        +
        +
        +
      • +
      • +
        +

        appConfigs

        +
        public Map<String,Object> appConfigs()
        +
        Deprecated.
        +
        Description copied from interface: ProcessorContext
        +
        Return all the application config properties as key/value pairs. + +

        The config properties are defined in the StreamsConfig + object and associated to the ProcessorContext. + +

        The type of the values is dependent on the type of the property + (e.g. the value of DEFAULT_KEY_SERDE_CLASS_CONFIG + will be of type Class, even if it was specified as a String to + StreamsConfig(Map)).

        +
        +
        Specified by:
        +
        appConfigs in interface ProcessorContext
        +
        Returns:
        +
        all the key/values from the StreamsConfig properties
        +
        +
        +
      • +
      • +
        +

        appConfigsWithPrefix

        +
        public Map<String,Object> appConfigsWithPrefix(String prefix)
        +
        Deprecated.
        +
        Description copied from interface: ProcessorContext
        +
        Return all the application config properties with the given key prefix, as key/value pairs + stripping the prefix. + +

        The config properties are defined in the StreamsConfig + object and associated to the ProcessorContext.

        +
        +
        Specified by:
        +
        appConfigsWithPrefix in interface ProcessorContext
        +
        Parameters:
        +
        prefix - the properties prefix
        +
        Returns:
        +
        the key/values matching the given prefix from the StreamsConfig properties.
        +
        +
        +
      • +
      • +
        +

        currentSystemTimeMs

        +
        public long currentSystemTimeMs()
        +
        Deprecated.
        +
        Description copied from interface: ProcessorContext
        +
        Return the current system timestamp (also called wall-clock time) in milliseconds. + +

        Note: this method returns the internally cached system timestamp from the Kafka Stream runtime. + Thus, it may return a different value compared to System.currentTimeMillis().

        +
        +
        Specified by:
        +
        currentSystemTimeMs in interface ProcessorContext
        +
        Returns:
        +
        the current system timestamp in milliseconds
        +
        +
        +
      • +
      • +
        +

        currentStreamTimeMs

        +
        public long currentStreamTimeMs()
        +
        Deprecated.
        +
        Description copied from interface: ProcessorContext
        +
        Return the current stream-time in milliseconds. + +

        Stream-time is the maximum observed record timestamp so far + (including the currently processed record), i.e., it can be considered a high-watermark. + Stream-time is tracked on a per-task basis and is preserved across restarts and during task migration. + +

        Note: this method is not supported for global processors (cf. + Topology.addGlobalStore(...) + and StreamsBuilder.addGlobalStore(...)), + because there is no concept of stream-time for this case. + Calling this method in a global processor will result in an UnsupportedOperationException.

        +
        +
        Specified by:
        +
        currentStreamTimeMs in interface ProcessorContext
        +
        Returns:
        +
        the current stream-time in milliseconds
        +
        +
        +
      • +
      • +
        +

        keySerde

        +
        public Serde<?> keySerde()
        +
        Deprecated.
        +
        Description copied from interface: ProcessorContext
        +
        Return the default key serde.
        +
        +
        Specified by:
        +
        keySerde in interface ProcessorContext
        +
        Returns:
        +
        the key serializer
        +
        +
        +
      • +
      • +
        +

        valueSerde

        +
        public Serde<?> valueSerde()
        +
        Deprecated.
        +
        Description copied from interface: ProcessorContext
        +
        Return the default value serde.
        +
        +
        Specified by:
        +
        valueSerde in interface ProcessorContext
        +
        Returns:
        +
        the value serializer
        +
        +
        +
      • +
      • +
        +

        stateDir

        +
        public File stateDir()
        +
        Deprecated.
        +
        Description copied from interface: ProcessorContext
        +
        Return the state directory for the partition.
        +
        +
        Specified by:
        +
        stateDir in interface ProcessorContext
        +
        Returns:
        +
        the state directory
        +
        +
        +
      • +
      • +
        +

        metrics

        +
        public StreamsMetrics metrics()
        +
        Deprecated.
        +
        Description copied from interface: ProcessorContext
        +
        Return Metrics instance.
        +
        +
        Specified by:
        +
        metrics in interface ProcessorContext
        +
        Returns:
        +
        StreamsMetrics
        +
        +
        +
      • +
      • +
        +

        setRecordMetadata

        +
        public void setRecordMetadata(String topic, + int partition, + long offset, + Headers headers, + long timestamp)
        +
        Deprecated.
        +
        The context exposes these metadata for use in the processor. Normally, they are set by the Kafka Streams framework, + but for the purpose of driving unit tests, you can set them directly.
        +
        +
        Parameters:
        +
        topic - A topic name
        +
        partition - A partition number
        +
        offset - A record offset
        +
        timestamp - A record timestamp
        +
        +
        +
      • +
      • +
        +

        setTopic

        +
        public void setTopic(String topic)
        +
        Deprecated.
        +
        The context exposes this metadata for use in the processor. Normally, they are set by the Kafka Streams framework, + but for the purpose of driving unit tests, you can set it directly. Setting this attribute doesn't affect the others.
        +
        +
        Parameters:
        +
        topic - A topic name
        +
        +
        +
      • +
      • +
        +

        setPartition

        +
        public void setPartition(int partition)
        +
        Deprecated.
        +
        The context exposes this metadata for use in the processor. Normally, they are set by the Kafka Streams framework, + but for the purpose of driving unit tests, you can set it directly. Setting this attribute doesn't affect the others.
        +
        +
        Parameters:
        +
        partition - A partition number
        +
        +
        +
      • +
      • +
        +

        setOffset

        +
        public void setOffset(long offset)
        +
        Deprecated.
        +
        The context exposes this metadata for use in the processor. Normally, they are set by the Kafka Streams framework, + but for the purpose of driving unit tests, you can set it directly. Setting this attribute doesn't affect the others.
        +
        +
        Parameters:
        +
        offset - A record offset
        +
        +
        +
      • +
      • +
        +

        setHeaders

        +
        public void setHeaders(Headers headers)
        +
        Deprecated.
        +
        The context exposes this metadata for use in the processor. Normally, they are set by the Kafka Streams framework, + but for the purpose of driving unit tests, you can set it directly. Setting this attribute doesn't affect the others.
        +
        +
        Parameters:
        +
        headers - Record headers
        +
        +
        +
      • +
      • +
        +

        setRecordTimestamp

        +
        public void setRecordTimestamp(long recordTimestamp)
        +
        Deprecated.
        +
        The context exposes this metadata for use in the processor. Normally, they are set by the Kafka Streams framework, + but for the purpose of driving unit tests, you can set it directly. Setting this attribute doesn't affect the others.
        +
        +
        Parameters:
        +
        recordTimestamp - A record timestamp
        +
        +
        +
      • +
      • +
        +

        setCurrentSystemTimeMs

        +
        public void setCurrentSystemTimeMs(long currentSystemTimeMs)
        +
        Deprecated.
        +
        +
      • +
      • +
        +

        setCurrentStreamTimeMs

        +
        public void setCurrentStreamTimeMs(long currentStreamTimeMs)
        +
        Deprecated.
        +
        +
      • +
      • +
        +

        topic

        +
        public String topic()
        +
        Deprecated.
        +
        Description copied from interface: ProcessorContext
        +
        Return the topic name of the current input record; could be null if it is not + available. + +

        For example, if this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, the record won't have an associated topic. + Another example is + KTable.transformValues(ValueTransformerWithKeySupplier, String...) + (and siblings), that do not always guarantee to provide a valid topic name, as they might be + executed "out-of-band" due to some internal optimizations applied by the Kafka Streams DSL.

        +
        +
        Specified by:
        +
        topic in interface ProcessorContext
        +
        Returns:
        +
        the topic name
        +
        +
        +
      • +
      • +
        +

        partition

        +
        public int partition()
        +
        Deprecated.
        +
        Description copied from interface: ProcessorContext
        +
        Return the partition id of the current input record; could be -1 if it is not + available. + +

        For example, if this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, the record won't have an associated partition id. + Another example is + KTable.transformValues(ValueTransformerWithKeySupplier, String...) + (and siblings), that do not always guarantee to provide a valid partition id, as they might be + executed "out-of-band" due to some internal optimizations applied by the Kafka Streams DSL.

        +
        +
        Specified by:
        +
        partition in interface ProcessorContext
        +
        Returns:
        +
        the partition id
        +
        +
        +
      • +
      • +
        +

        offset

        +
        public long offset()
        +
        Deprecated.
        +
        Description copied from interface: ProcessorContext
        +
        Return the offset of the current input record; could be -1 if it is not + available. + +

        For example, if this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, the record won't have an associated offset. + Another example is + KTable.transformValues(ValueTransformerWithKeySupplier, String...) + (and siblings), that do not always guarantee to provide a valid offset, as they might be + executed "out-of-band" due to some internal optimizations applied by the Kafka Streams DSL.

        +
        +
        Specified by:
        +
        offset in interface ProcessorContext
        +
        Returns:
        +
        the offset
        +
        +
        +
      • +
      • +
        +

        headers

        +
        public Headers headers()
        +
        Deprecated.
        +
        Returns the headers of the current input record; could be null if it is not + available. + +

        Note, that headers should never be null in the actual Kafka Streams runtime, + even if they could be empty. However, this mock does not guarantee non-null headers. + Thus, you either need to add a null check to your production code to use this mock + for testing, or you always need to set headers manually via setHeaders(Headers) to + avoid a NullPointerException from your ValueTransformerimplementation.

        +
        +
        Specified by:
        +
        headers in interface ProcessorContext
        +
        Returns:
        +
        the headers
        +
        +
        +
      • +
      • +
        +

        timestamp

        +
        public long timestamp()
        +
        Deprecated.
        +
        Description copied from interface: ProcessorContext
        +
        Return the current timestamp. + +

        If it is triggered while processing a record streamed from the source processor, + timestamp is defined as the timestamp of the current input record; the timestamp is extracted from + ConsumerRecord by TimestampExtractor. + Note, that an upstream Processor might have set a new timestamp by calling + ProcessorContext.forward(org.apache.kafka.streams.processor.api.Record). + In particular, some Kafka Streams DSL operators set result record timestamps explicitly, + to guarantee deterministic results. + +

        If it is triggered while processing a record generated not from the source processor (for example, + if this method is invoked from the punctuate call), timestamp is defined as the current + task's stream time, which is defined as the largest timestamp of any record processed by the task.

        +
        +
        Specified by:
        +
        timestamp in interface ProcessorContext
        +
        Returns:
        +
        the timestamp
        +
        +
        +
      • +
      • +
        +

        register

        +
        public void register(StateStore store, + StateRestoreCallback stateRestoreCallbackIsIgnoredInMock)
        +
        Deprecated.
        +
        Description copied from interface: ProcessorContext
        +
        Register and possibly restores the specified storage engine.
        +
        +
        Specified by:
        +
        register in interface ProcessorContext
        +
        Parameters:
        +
        store - the storage engine
        +
        stateRestoreCallbackIsIgnoredInMock - the restoration callback logic for log-backed state stores upon restart
        +
        +
        +
      • +
      • +
        +

        getStateStore

        +
        public <S extends StateStore> S getStateStore(String name)
        +
        Deprecated.
        +
        Description copied from interface: ProcessorContext
        +
        Get the state store given the store name. + +

        The returned state store represent one shard of the overall state, which belongs to the current task. + The returned shard of the state store may only be used by the current + Transformer, ValueTransformer, + or ValueTransformerWithKey instance. + Sharing a shard across different transformers (ie, from different "sibling" tasks; same sub-topology but different + partition) may lead to data corruption and/or data loss.

        +
        +
        Specified by:
        +
        getStateStore in interface ProcessorContext
        +
        Type Parameters:
        +
        S - The type or interface of the store to return
        +
        Parameters:
        +
        name - The store name
        +
        Returns:
        +
        The state store instance
        +
        +
        +
      • +
      • +
        +

        schedule

        +
        public Cancellable schedule(Duration interval, + PunctuationType type, + Punctuator callback) + throws IllegalArgumentException
        +
        Deprecated.
        +
        Description copied from interface: ProcessorContext
        +
        Schedule a periodic operation for processors. A processor may call this method during a + KTable.transformValues(ValueTransformerWithKeySupplier, String...)'s + initialization or + processing to + schedule a periodic callback — called a punctuation — to Punctuator.punctuate(long). + The type parameter controls what notion of time is used for punctuation: +
          +
        • PunctuationType.STREAM_TIME — uses "stream time", which is advanced by the processing of messages + in accordance with the timestamp as extracted by the TimestampExtractor in use. + The first punctuation will be triggered by the first record that is processed. + NOTE: Only advanced if messages arrive
        • +
        • PunctuationType.WALL_CLOCK_TIME — uses system time (the wall-clock time), + which is advanced independent of whether new messages arrive. + The first punctuation will be triggered after interval has elapsed. + NOTE: This is best effort only as its granularity is limited by how long an iteration of the + processing loop takes to complete
        • +
        + + Skipping punctuations: Punctuations will not be triggered more than once at any given timestamp. + This means that "missed" punctuation will be skipped. + It's possible to "miss" a punctuation if: +
        +
        +
        Specified by:
        +
        schedule in interface ProcessorContext
        +
        Parameters:
        +
        interval - the time interval between punctuations (supported minimum is 1 millisecond)
        +
        type - one of: PunctuationType.STREAM_TIME, PunctuationType.WALL_CLOCK_TIME
        +
        callback - a function consuming timestamps representing the current stream or system time
        +
        Returns:
        +
        a handle allowing cancellation of the punctuation schedule established by this method
        +
        Throws:
        +
        IllegalArgumentException - if the interval is not representable in milliseconds
        +
        +
        +
      • +
      • +
        +

        scheduledPunctuators

        +
        public List<MockProcessorContext.CapturedPunctuator> scheduledPunctuators()
        +
        Deprecated.
        +
        Get the punctuators scheduled so far. The returned list is not affected by subsequent calls to schedule(...).
        +
        +
        Returns:
        +
        A list of captured punctuators.
        +
        +
        +
      • +
      • +
        +

        forward

        +
        public <K, +V> void forward(K key, + V value)
        +
        Deprecated.
        +
        Description copied from interface: ProcessorContext
        +
        Forward a key/value pair to all downstream processors. + Used the input record's timestamp as timestamp for the output record. + +

        If this method is called with Punctuator.punctuate(long) the record that + is sent downstream won't have any associated record metadata like topic, partition, or offset.

        +
        +
        Specified by:
        +
        forward in interface ProcessorContext
        +
        Parameters:
        +
        key - key
        +
        value - value
        +
        +
        +
      • +
      • +
        +

        forward

        +
        public <K, +V> void forward(K key, + V value, + To to)
        +
        Deprecated.
        +
        Description copied from interface: ProcessorContext
        +
        Forward a key/value pair to the specified downstream processors. + Can be used to set the timestamp of the output record. + +

        If this method is called with Punctuator.punctuate(long) the record that + is sent downstream won't have any associated record metadata like topic, partition, or offset.

        +
        +
        Specified by:
        +
        forward in interface ProcessorContext
        +
        Parameters:
        +
        key - key
        +
        value - value
        +
        to - the options to use when forwarding
        +
        +
        +
      • +
      • +
        +

        forwarded

        + +
        Deprecated.
        +
        Get all the forwarded data this context has observed. The returned list will not be + affected by subsequent interactions with the context. The data in the list is in the same order as the calls to + forward(...).
        +
        +
        Returns:
        +
        A list of key/value pairs that were previously passed to the context.
        +
        +
        +
      • +
      • +
        +

        forwarded

        +
        public List<MockProcessorContext.CapturedForward> forwarded(String childName)
        +
        Deprecated.
        +
        Get all the forwarded data this context has observed for a specific child by name. + The returned list will not be affected by subsequent interactions with the context. + The data in the list is in the same order as the calls to forward(...).
        +
        +
        Parameters:
        +
        childName - The child name to retrieve forwards for
        +
        Returns:
        +
        A list of key/value pairs that were previously passed to the context.
        +
        +
        +
      • +
      • +
        +

        resetForwards

        +
        public void resetForwards()
        +
        Deprecated.
        +
        Clear the captured forwarded data.
        +
        +
      • +
      • +
        +

        commit

        +
        public void commit()
        +
        Deprecated.
        +
        Description copied from interface: ProcessorContext
        +
        Request a commit.
        +
        +
        Specified by:
        +
        commit in interface ProcessorContext
        +
        +
        +
      • +
      • +
        +

        committed

        +
        public boolean committed()
        +
        Deprecated.
        +
        Whether ProcessorContext.commit() has been called in this context.
        +
        +
        Returns:
        +
        true iff ProcessorContext.commit() has been called in this context since construction or reset.
        +
        +
        +
      • +
      • +
        +

        resetCommit

        +
        public void resetCommit()
        +
        Deprecated.
        +
        Reset the commit capture to false (whether or not it was previously true).
        +
        +
      • +
      • +
        +

        recordCollector

        +
        public org.apache.kafka.streams.processor.internals.RecordCollector recordCollector()
        +
        Deprecated.
        +
        +
        Specified by:
        +
        recordCollector in interface org.apache.kafka.streams.processor.internals.RecordCollector.Supplier
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/ProcessorContext.html b/static/41/javadoc/org/apache/kafka/streams/processor/ProcessorContext.html new file mode 100644 index 000000000..70d2d05e9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/ProcessorContext.html @@ -0,0 +1,602 @@ + + + + +ProcessorContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ProcessorContext

    +
    +
    +
    +
    All Known Implementing Classes:
    +
    MockProcessorContext
    +
    +
    +
    public interface ProcessorContext
    +
    Processor context interface.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Return all the application config properties as key/value pairs.
      +
      + + +
      +
      Return all the application config properties with the given key prefix, as key/value pairs + stripping the prefix.
      +
      + + +
      +
      Return the application id.
      +
      +
      void
      + +
      +
      Request a commit.
      +
      +
      long
      + +
      +
      Return the current stream-time in milliseconds.
      +
      +
      long
      + +
      +
      Return the current system timestamp (also called wall-clock time) in milliseconds.
      +
      +
      <K, +V> void
      +
      forward(K key, + V value)
      +
      +
      Forward a key/value pair to all downstream processors.
      +
      +
      <K, +V> void
      +
      forward(K key, + V value, + To to)
      +
      +
      Forward a key/value pair to the specified downstream processors.
      +
      +
      <S extends StateStore>
      S
      + +
      +
      Get the state store given the store name.
      +
      + + +
      +
      Return the headers of the current input record; could be an empty header if it is not + available.
      +
      + + +
      +
      Return the default key serde.
      +
      + + +
      +
      Return Metrics instance.
      +
      +
      long
      + +
      +
      Return the offset of the current input record; could be -1 if it is not + available.
      +
      +
      int
      + +
      +
      Return the partition id of the current input record; could be -1 if it is not + available.
      +
      +
      void
      +
      register(StateStore store, + StateRestoreCallback stateRestoreCallback)
      +
      +
      Register and possibly restores the specified storage engine.
      +
      + +
      schedule(Duration interval, + PunctuationType type, + Punctuator callback)
      +
      +
      Schedule a periodic operation for processors.
      +
      + + +
      +
      Return the state directory for the partition.
      +
      + + +
      +
      Return the task id.
      +
      +
      long
      + +
      +
      Return the current timestamp.
      +
      + + +
      +
      Return the topic name of the current input record; could be null if it is not + available.
      +
      + + +
      +
      Return the default value serde.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        applicationId

        +
        String applicationId()
        +
        Return the application id.
        +
        +
        Returns:
        +
        the application id
        +
        +
        +
      • +
      • +
        +

        taskId

        +
        TaskId taskId()
        +
        Return the task id.
        +
        +
        Returns:
        +
        the task id
        +
        +
        +
      • +
      • +
        +

        keySerde

        +
        Serde<?> keySerde()
        +
        Return the default key serde.
        +
        +
        Returns:
        +
        the key serializer
        +
        +
        +
      • +
      • +
        +

        valueSerde

        +
        Serde<?> valueSerde()
        +
        Return the default value serde.
        +
        +
        Returns:
        +
        the value serializer
        +
        +
        +
      • +
      • +
        +

        stateDir

        +
        File stateDir()
        +
        Return the state directory for the partition.
        +
        +
        Returns:
        +
        the state directory
        +
        +
        +
      • +
      • +
        +

        metrics

        +
        StreamsMetrics metrics()
        +
        Return Metrics instance.
        +
        +
        Returns:
        +
        StreamsMetrics
        +
        +
        +
      • +
      • +
        +

        register

        +
        void register(StateStore store, + StateRestoreCallback stateRestoreCallback)
        +
        Register and possibly restores the specified storage engine.
        +
        +
        Parameters:
        +
        store - the storage engine
        +
        stateRestoreCallback - the restoration callback logic for log-backed state stores upon restart
        +
        Throws:
        +
        IllegalStateException - If store gets registered after initialized is already finished
        +
        StreamsException - if the store's change log does not contain the partition
        +
        +
        +
      • +
      • +
        +

        getStateStore

        +
        <S extends StateStore> S getStateStore(String name)
        +
        Get the state store given the store name. + +

        The returned state store represent one shard of the overall state, which belongs to the current task. + The returned shard of the state store may only be used by the current + Transformer, ValueTransformer, + or ValueTransformerWithKey instance. + Sharing a shard across different transformers (ie, from different "sibling" tasks; same sub-topology but different + partition) may lead to data corruption and/or data loss.

        +
        +
        Type Parameters:
        +
        S - The type or interface of the store to return
        +
        Parameters:
        +
        name - The store name
        +
        Returns:
        +
        The state store instance
        +
        Throws:
        +
        ClassCastException - if the return type isn't a type or interface of the actual returned store.
        +
        +
        +
      • +
      • +
        +

        schedule

        +
        Cancellable schedule(Duration interval, + PunctuationType type, + Punctuator callback)
        +
        Schedule a periodic operation for processors. A processor may call this method during a + KTable.transformValues(ValueTransformerWithKeySupplier, String...)'s + initialization or + processing to + schedule a periodic callback — called a punctuation — to Punctuator.punctuate(long). + The type parameter controls what notion of time is used for punctuation: +
          +
        • PunctuationType.STREAM_TIME — uses "stream time", which is advanced by the processing of messages + in accordance with the timestamp as extracted by the TimestampExtractor in use. + The first punctuation will be triggered by the first record that is processed. + NOTE: Only advanced if messages arrive
        • +
        • PunctuationType.WALL_CLOCK_TIME — uses system time (the wall-clock time), + which is advanced independent of whether new messages arrive. + The first punctuation will be triggered after interval has elapsed. + NOTE: This is best effort only as its granularity is limited by how long an iteration of the + processing loop takes to complete
        • +
        + + Skipping punctuations: Punctuations will not be triggered more than once at any given timestamp. + This means that "missed" punctuation will be skipped. + It's possible to "miss" a punctuation if: +
        +
        +
        Parameters:
        +
        interval - the time interval between punctuations (supported minimum is 1 millisecond)
        +
        type - one of: PunctuationType.STREAM_TIME, PunctuationType.WALL_CLOCK_TIME
        +
        callback - a function consuming timestamps representing the current stream or system time
        +
        Returns:
        +
        a handle allowing cancellation of the punctuation schedule established by this method
        +
        Throws:
        +
        IllegalArgumentException - if the interval is not representable in milliseconds
        +
        +
        +
      • +
      • +
        +

        forward

        +
        <K, +V> void forward(K key, + V value)
        +
        Forward a key/value pair to all downstream processors. + Used the input record's timestamp as timestamp for the output record. + +

        If this method is called with Punctuator.punctuate(long) the record that + is sent downstream won't have any associated record metadata like topic, partition, or offset.

        +
        +
        Parameters:
        +
        key - key
        +
        value - value
        +
        +
        +
      • +
      • +
        +

        forward

        +
        <K, +V> void forward(K key, + V value, + To to)
        +
        Forward a key/value pair to the specified downstream processors. + Can be used to set the timestamp of the output record. + +

        If this method is called with Punctuator.punctuate(long) the record that + is sent downstream won't have any associated record metadata like topic, partition, or offset.

        +
        +
        Parameters:
        +
        key - key
        +
        value - value
        +
        to - the options to use when forwarding
        +
        +
        +
      • +
      • +
        +

        commit

        +
        void commit()
        +
        Request a commit.
        +
        +
      • +
      • +
        +

        topic

        +
        String topic()
        +
        Return the topic name of the current input record; could be null if it is not + available. + +

        For example, if this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, the record won't have an associated topic. + Another example is + KTable.transformValues(ValueTransformerWithKeySupplier, String...) + (and siblings), that do not always guarantee to provide a valid topic name, as they might be + executed "out-of-band" due to some internal optimizations applied by the Kafka Streams DSL.

        +
        +
        Returns:
        +
        the topic name
        +
        +
        +
      • +
      • +
        +

        partition

        +
        int partition()
        +
        Return the partition id of the current input record; could be -1 if it is not + available. + +

        For example, if this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, the record won't have an associated partition id. + Another example is + KTable.transformValues(ValueTransformerWithKeySupplier, String...) + (and siblings), that do not always guarantee to provide a valid partition id, as they might be + executed "out-of-band" due to some internal optimizations applied by the Kafka Streams DSL.

        +
        +
        Returns:
        +
        the partition id
        +
        +
        +
      • +
      • +
        +

        offset

        +
        long offset()
        +
        Return the offset of the current input record; could be -1 if it is not + available. + +

        For example, if this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, the record won't have an associated offset. + Another example is + KTable.transformValues(ValueTransformerWithKeySupplier, String...) + (and siblings), that do not always guarantee to provide a valid offset, as they might be + executed "out-of-band" due to some internal optimizations applied by the Kafka Streams DSL.

        +
        +
        Returns:
        +
        the offset
        +
        +
        +
      • +
      • +
        +

        headers

        +
        Headers headers()
        +
        Return the headers of the current input record; could be an empty header if it is not + available. + +

        For example, if this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, the record might not have any associated headers. + Another example is + KTable.transformValues(ValueTransformerWithKeySupplier, String...) + (and siblings), that do not always guarantee to provide valid headers, as they might be + executed "out-of-band" due to some internal optimizations applied by the Kafka Streams DSL.

        +
        +
        Returns:
        +
        the headers
        +
        +
        +
      • +
      • +
        +

        timestamp

        +
        long timestamp()
        +
        Return the current timestamp. + +

        If it is triggered while processing a record streamed from the source processor, + timestamp is defined as the timestamp of the current input record; the timestamp is extracted from + ConsumerRecord by TimestampExtractor. + Note, that an upstream Processor might have set a new timestamp by calling + ProcessorContext.forward(org.apache.kafka.streams.processor.api.Record). + In particular, some Kafka Streams DSL operators set result record timestamps explicitly, + to guarantee deterministic results. + +

        If it is triggered while processing a record generated not from the source processor (for example, + if this method is invoked from the punctuate call), timestamp is defined as the current + task's stream time, which is defined as the largest timestamp of any record processed by the task.

        +
        +
        Returns:
        +
        the timestamp
        +
        +
        +
      • +
      • +
        +

        appConfigs

        +
        Map<String,Object> appConfigs()
        +
        Return all the application config properties as key/value pairs. + +

        The config properties are defined in the StreamsConfig + object and associated to the ProcessorContext. + +

        The type of the values is dependent on the type of the property + (e.g. the value of DEFAULT_KEY_SERDE_CLASS_CONFIG + will be of type Class, even if it was specified as a String to + StreamsConfig(Map)).

        +
        +
        Returns:
        +
        all the key/values from the StreamsConfig properties
        +
        +
        +
      • +
      • +
        +

        appConfigsWithPrefix

        +
        Map<String,Object> appConfigsWithPrefix(String prefix)
        +
        Return all the application config properties with the given key prefix, as key/value pairs + stripping the prefix. + +

        The config properties are defined in the StreamsConfig + object and associated to the ProcessorContext.

        +
        +
        Parameters:
        +
        prefix - the properties prefix
        +
        Returns:
        +
        the key/values matching the given prefix from the StreamsConfig properties.
        +
        +
        +
      • +
      • +
        +

        currentSystemTimeMs

        +
        long currentSystemTimeMs()
        +
        Return the current system timestamp (also called wall-clock time) in milliseconds. + +

        Note: this method returns the internally cached system timestamp from the Kafka Stream runtime. + Thus, it may return a different value compared to System.currentTimeMillis().

        +
        +
        Returns:
        +
        the current system timestamp in milliseconds
        +
        +
        +
      • +
      • +
        +

        currentStreamTimeMs

        +
        long currentStreamTimeMs()
        +
        Return the current stream-time in milliseconds. + +

        Stream-time is the maximum observed record timestamp so far + (including the currently processed record), i.e., it can be considered a high-watermark. + Stream-time is tracked on a per-task basis and is preserved across restarts and during task migration. + +

        Note: this method is not supported for global processors (cf. + Topology.addGlobalStore(...) + and StreamsBuilder.addGlobalStore(...)), + because there is no concept of stream-time for this case. + Calling this method in a global processor will result in an UnsupportedOperationException.

        +
        +
        Returns:
        +
        the current stream-time in milliseconds
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/PunctuationType.html b/static/41/javadoc/org/apache/kafka/streams/processor/PunctuationType.html new file mode 100644 index 000000000..e7e337141 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/PunctuationType.html @@ -0,0 +1,228 @@ + + + + +PunctuationType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class PunctuationType

    +
    +
    java.lang.Object +
    java.lang.Enum<PunctuationType> +
    org.apache.kafka.streams.processor.PunctuationType
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<PunctuationType>, Constable
    +
    +
    +
    public enum PunctuationType +extends Enum<PunctuationType>
    +
    Controls what notion of time is used for punctuation scheduled via + schedule: +
      +
    • STREAM_TIME - uses "stream time", which is advanced by the processing of messages + in accordance with the timestamp as extracted by the TimestampExtractor in use. + NOTE: Only advanced if messages arrive
    • +
    • WALL_CLOCK_TIME - uses system time (the wall-clock time), + which is advanced at the polling interval (StreamsConfig.POLL_MS_CONFIG) + independent of whether new messages arrive. NOTE: This is best effort only as its granularity is limited + by how long an iteration of the processing loop takes to complete
    • +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      +
        +
      • +
        +

        STREAM_TIME

        +
        public static final PunctuationType STREAM_TIME
        +
        +
      • +
      • +
        +

        WALL_CLOCK_TIME

        +
        public static final PunctuationType WALL_CLOCK_TIME
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static PunctuationType[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static PunctuationType valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/Punctuator.html b/static/41/javadoc/org/apache/kafka/streams/processor/Punctuator.html new file mode 100644 index 000000000..ffa4cf2de --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/Punctuator.html @@ -0,0 +1,158 @@ + + + + +Punctuator (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Punctuator

    +
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface Punctuator
    +
    A functional interface used as an argument to + ProcessingContext.schedule(Duration, PunctuationType, Punctuator).
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      +
      punctuate(long timestamp)
      +
      +
      Perform the scheduled periodic operation.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/RecordContext.html b/static/41/javadoc/org/apache/kafka/streams/processor/RecordContext.html new file mode 100644 index 000000000..e6b7223ad --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/RecordContext.html @@ -0,0 +1,290 @@ + + + + +RecordContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface RecordContext

    +
    +
    +
    +
    public interface RecordContext
    +
    The context associated with the current record being processed by + a Processor
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Return the headers of the current input record; could be an empty header if it is not + available.
      +
      +
      long
      + +
      +
      Return the offset of the current input record; could be -1 if it is not + available.
      +
      +
      int
      + +
      +
      Return the partition id of the current input record; could be -1 if it is not + available.
      +
      +
      byte[]
      + +
      +
      Return the non-deserialized byte[] of the input message key if the context has been triggered by a message.
      +
      +
      byte[]
      + +
      +
      Return the non-deserialized byte[] of the input message value if the context has been triggered by a message.
      +
      +
      long
      + +
      +
      Return the current timestamp.
      +
      + + +
      +
      Return the topic name of the current input record; could be null if it is not + available.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        topic

        +
        String topic()
        +
        Return the topic name of the current input record; could be null if it is not + available. + +

        For example, if this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, the record won't have an associated topic. + Another example is + KTable.transformValues(ValueTransformerWithKeySupplier, String...) + (and siblings), that do not always guarantee to provide a valid topic name, as they might be + executed "out-of-band" due to some internal optimizations applied by the Kafka Streams DSL.

        +
        +
        Returns:
        +
        the topic name
        +
        +
        +
      • +
      • +
        +

        partition

        +
        int partition()
        +
        Return the partition id of the current input record; could be -1 if it is not + available. + +

        For example, if this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, the record won't have an associated partition id. + Another example is + KTable.transformValues(ValueTransformerWithKeySupplier, String...) + (and siblings), that do not always guarantee to provide a valid partition id, as they might be + executed "out-of-band" due to some internal optimizations applied by the Kafka Streams DSL.

        +
        +
        Returns:
        +
        the partition id
        +
        +
        +
      • +
      • +
        +

        offset

        +
        long offset()
        +
        Return the offset of the current input record; could be -1 if it is not + available. + +

        For example, if this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, the record won't have an associated offset. + Another example is + KTable.transformValues(ValueTransformerWithKeySupplier, String...) + (and siblings), that do not always guarantee to provide a valid offset, as they might be + executed "out-of-band" due to some internal optimizations applied by the Kafka Streams DSL.

        +
        +
        Returns:
        +
        the offset
        +
        +
        +
      • +
      • +
        +

        timestamp

        +
        long timestamp()
        +
        Return the current timestamp. + +

        If it is triggered while processing a record streamed from the source processor, + timestamp is defined as the timestamp of the current input record; the timestamp is extracted from + ConsumerRecord by TimestampExtractor. + Note, that an upstream Processor + might have set a new timestamp by calling + forward(..., To.all().withTimestamp(...)). + In particular, some Kafka Streams DSL operators set result record timestamps explicitly, + to guarantee deterministic results. + +

        If it is triggered while processing a record generated not from the source processor (for example, + if this method is invoked from the punctuate call), timestamp is defined as the current + task's stream time, which is defined as the largest timestamp of any record processed by the task.

        +
        +
        Returns:
        +
        the timestamp
        +
        +
        +
      • +
      • +
        +

        headers

        +
        Headers headers()
        +
        Return the headers of the current input record; could be an empty header if it is not + available. + +

        For example, if this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, the record might not have any associated headers. + Another example is + KTable.transformValues(ValueTransformerWithKeySupplier, String...) + (and siblings), that do not always guarantee to provide a valid headers, as they might be + executed "out-of-band" due to some internal optimizations applied by the Kafka Streams DSL.

        +
        +
        Returns:
        +
        the headers
        +
        +
        +
      • +
      • +
        +

        sourceRawKey

        +
        byte[] sourceRawKey()
        +
        Return the non-deserialized byte[] of the input message key if the context has been triggered by a message. + +

        If this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, it will return null. + +

        If this method is invoked in a sub-topology due to a repartition, the returned key would be one sent + to the repartition topic.

        +
        +
        Returns:
        +
        the raw byte of the key of the source message
        +
        +
        +
      • +
      • +
        +

        sourceRawValue

        +
        byte[] sourceRawValue()
        +
        Return the non-deserialized byte[] of the input message value if the context has been triggered by a message. + +

        If this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, it will return null. + +

        If this method is invoked in a sub-topology due to a repartition, the returned key would be one sent + to the repartition topic.

        +
        +
        Returns:
        +
        the raw byte of the value of the source message
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/StandbyUpdateListener.SuspendReason.html b/static/41/javadoc/org/apache/kafka/streams/processor/StandbyUpdateListener.SuspendReason.html new file mode 100644 index 000000000..7d9b34d15 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/StandbyUpdateListener.SuspendReason.html @@ -0,0 +1,221 @@ + + + + +StandbyUpdateListener.SuspendReason (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class StandbyUpdateListener.SuspendReason

    +
    +
    java.lang.Object +
    java.lang.Enum<StandbyUpdateListener.SuspendReason> +
    org.apache.kafka.streams.processor.StandbyUpdateListener.SuspendReason
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<StandbyUpdateListener.SuspendReason>, Constable
    +
    +
    +
    Enclosing interface:
    +
    StandbyUpdateListener
    +
    +
    +
    public static enum StandbyUpdateListener.SuspendReason +extends Enum<StandbyUpdateListener.SuspendReason>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static StandbyUpdateListener.SuspendReason[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static StandbyUpdateListener.SuspendReason valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/StandbyUpdateListener.html b/static/41/javadoc/org/apache/kafka/streams/processor/StandbyUpdateListener.html new file mode 100644 index 000000000..4401e02cb --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/StandbyUpdateListener.html @@ -0,0 +1,227 @@ + + + + +StandbyUpdateListener (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface StandbyUpdateListener

    +
    +
    +
    +
    public interface StandbyUpdateListener
    +
    +
    +
      + +
    • +
      +

      Nested Class Summary

      +
      Nested Classes
      +
      +
      Modifier and Type
      +
      Interface
      +
      Description
      +
      static enum 
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      +
      onBatchLoaded(TopicPartition topicPartition, + String storeName, + TaskId taskId, + long batchEndOffset, + long batchSize, + long currentEndOffset)
      +
      +
      Method called after loading a batch of records.
      +
      +
      void
      +
      onUpdateStart(TopicPartition topicPartition, + String storeName, + long startingOffset)
      +
      +
      A callback that will be invoked after registering the changelogs for each state store in a standby + task.
      +
      +
      void
      +
      onUpdateSuspended(TopicPartition topicPartition, + String storeName, + long storeOffset, + long currentEndOffset, + StandbyUpdateListener.SuspendReason reason)
      +
      +
      This method is called when the corresponding standby task stops updating, for the provided reason.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        onUpdateStart

        +
        void onUpdateStart(TopicPartition topicPartition, + String storeName, + long startingOffset)
        +
        A callback that will be invoked after registering the changelogs for each state store in a standby + task. It is guaranteed to always be invoked before any records are loaded into the standby store.
        +
        +
        Parameters:
        +
        topicPartition - the changelog TopicPartition for this standby task
        +
        storeName - the name of the store being loaded
        +
        startingOffset - the offset from which the standby task begins consuming from the changelog
        +
        +
        +
      • +
      • +
        +

        onBatchLoaded

        +
        void onBatchLoaded(TopicPartition topicPartition, + String storeName, + TaskId taskId, + long batchEndOffset, + long batchSize, + long currentEndOffset)
        +
        Method called after loading a batch of records. In this case the maximum size of the batch is whatever + the value of the MAX_POLL_RECORDS is set to. + + This method is called after loading each batch and it is advised to keep processing to a minimum. + Any heavy processing will block the state updater thread and slow down the rate of standby task + loading. Therefore, if you need to do any extended processing or connect to an external service, + consider doing so asynchronously.
        +
        +
        Parameters:
        +
        topicPartition - the changelog TopicPartition for this standby task
        +
        storeName - the name of the store being loaded
        +
        batchEndOffset - batchEndOffset the changelog end offset (inclusive) of the batch that was just loaded
        +
        batchSize - the total number of records in the batch that was just loaded
        +
        currentEndOffset - the current end offset of the changelog topic partition.
        +
        +
        +
      • +
      • +
        +

        onUpdateSuspended

        +
        void onUpdateSuspended(TopicPartition topicPartition, + String storeName, + long storeOffset, + long currentEndOffset, + StandbyUpdateListener.SuspendReason reason)
        +
        This method is called when the corresponding standby task stops updating, for the provided reason. +

        + If the task was MIGRATED to another instance, this callback will be invoked after this + state store (and the task itself) are closed (in which case the data will be cleaned up after + state.cleanup.delay.ms). + If the task was PROMOTED to an active task, the state store will not be closed, and the + callback will be invoked after unregistering it as a standby task but before re-registering it as an active task + and beginning restoration. In other words, this will always called before the corresponding + StateRestoreListener.onRestoreStart(org.apache.kafka.common.TopicPartition, java.lang.String, long, long) call is made.

        +
        +
        Parameters:
        +
        topicPartition - the changelog TopicPartition for this standby task
        +
        storeName - the name of the store being loaded
        +
        storeOffset - is the offset of the last changelog record that was read and put into the store at the time + of suspension.
        +
        currentEndOffset - the current end offset of the changelog topic partition.
        +
        reason - is the reason why the standby task was suspended.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/StateRestoreCallback.html b/static/41/javadoc/org/apache/kafka/streams/processor/StateRestoreCallback.html new file mode 100644 index 000000000..4b45fe878 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/StateRestoreCallback.html @@ -0,0 +1,137 @@ + + + + +StateRestoreCallback (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface StateRestoreCallback

    +
    +
    +
    +
    All Known Subinterfaces:
    +
    BatchingStateRestoreCallback
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface StateRestoreCallback
    +
    Restoration logic for log-backed state stores upon restart, + it takes one record at a time from the logs to apply to the restoring state.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      +
      restore(byte[] key, + byte[] value)
      +
       
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        restore

        +
        void restore(byte[] key, + byte[] value)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/StateRestoreListener.html b/static/41/javadoc/org/apache/kafka/streams/processor/StateRestoreListener.html new file mode 100644 index 000000000..a35299783 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/StateRestoreListener.html @@ -0,0 +1,245 @@ + + + + +StateRestoreListener (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface StateRestoreListener

    +
    +
    +
    +
    public interface StateRestoreListener
    +
    Class for listening to various states of the restoration process of a StateStore. + +

    + When calling KafkaStreams.setGlobalStateRestoreListener(StateRestoreListener) + the passed instance is expected to be stateless since the StateRestoreListener is shared + across all StreamThread instances. + +

    + Users desiring stateful operations will need to provide synchronization internally in + the StateRestorerListener implementation. + +

    + Note that this listener is only registered at the per-client level and users can base on the storeName + parameter to define specific monitoring for different StateStores. There is another + StateRestoreCallback interface which is registered via the + StateStoreContext.register(StateStore, StateRestoreCallback, CommitCallback) + function per-store, and it is used to apply the fetched changelog records into the local state store during restoration. + These two interfaces serve different restoration purposes and users should not try to implement both of them in a single + class during state store registration. + +

    + Also note that the update process of standby tasks is not monitored via this interface, since a standby task does + note actually restore state, but keeps updating its state from the changelogs written by the active task + which does not ever finish. + +

    + Incremental updates are exposed so users can estimate how much progress has been made.

    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      +
      onBatchRestored(TopicPartition topicPartition, + String storeName, + long batchEndOffset, + long numRestored)
      +
      +
      Method called after restoring a batch of records.
      +
      +
      void
      +
      onRestoreEnd(TopicPartition topicPartition, + String storeName, + long totalRestored)
      +
      +
      Method called when restoring the StateStore is complete.
      +
      +
      void
      +
      onRestoreStart(TopicPartition topicPartition, + String storeName, + long startingOffset, + long endingOffset)
      +
      +
      Method called at the very beginning of StateStore restoration.
      +
      +
      default void
      +
      onRestoreSuspended(TopicPartition topicPartition, + String storeName, + long totalRestored)
      +
      +
      Method called when restoring the StateStore is suspended due to the task being migrated out of the host.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        onRestoreStart

        +
        void onRestoreStart(TopicPartition topicPartition, + String storeName, + long startingOffset, + long endingOffset)
        +
        Method called at the very beginning of StateStore restoration.
        +
        +
        Parameters:
        +
        topicPartition - the TopicPartition containing the values to restore
        +
        storeName - the name of the store undergoing restoration
        +
        startingOffset - the starting offset of the entire restoration process for this TopicPartition
        +
        endingOffset - the exclusive ending offset of the entire restoration process for this TopicPartition
        +
        +
        +
      • +
      • +
        +

        onBatchRestored

        +
        void onBatchRestored(TopicPartition topicPartition, + String storeName, + long batchEndOffset, + long numRestored)
        +
        Method called after restoring a batch of records. In this case the maximum size of the batch is whatever + the value of the MAX_POLL_RECORDS is set to. + + This method is called after restoring each batch and it is advised to keep processing to a minimum. + Any heavy processing will hold up recovering the next batch, hence slowing down the restore process as a + whole. + + If you need to do any extended processing or connecting to an external service consider doing so asynchronously.
        +
        +
        Parameters:
        +
        topicPartition - the TopicPartition containing the values to restore
        +
        storeName - the name of the store undergoing restoration
        +
        batchEndOffset - the inclusive ending offset for the current restored batch for this TopicPartition
        +
        numRestored - the total number of records restored in this batch for this TopicPartition
        +
        +
        +
      • +
      • +
        +

        onRestoreEnd

        +
        void onRestoreEnd(TopicPartition topicPartition, + String storeName, + long totalRestored)
        +
        Method called when restoring the StateStore is complete.
        +
        +
        Parameters:
        +
        topicPartition - the TopicPartition containing the values to restore
        +
        storeName - the name of the store just restored
        +
        totalRestored - the total number of records restored for this TopicPartition
        +
        +
        +
      • +
      • +
        +

        onRestoreSuspended

        +
        default void onRestoreSuspended(TopicPartition topicPartition, + String storeName, + long totalRestored)
        +
        Method called when restoring the StateStore is suspended due to the task being migrated out of the host. + If the migrated task is recycled or re-assigned back to the current host, another + onRestoreStart(TopicPartition, String, long, long) would be called.
        +
        +
        Parameters:
        +
        topicPartition - the TopicPartition containing the values to restore
        +
        storeName - the name of the store just restored
        +
        totalRestored - the total number of records restored for this TopicPartition before being paused
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/StateStore.html b/static/41/javadoc/org/apache/kafka/streams/processor/StateStore.html new file mode 100644 index 000000000..92e27e267 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/StateStore.html @@ -0,0 +1,293 @@ + + + + +StateStore (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface StateStore

    +
    +
    +
    +
    All Known Subinterfaces:
    +
    KeyValueStore<K,V>, SessionStore<K,AGG>, TimestampedKeyValueStore<K,V>, TimestampedWindowStore<K,V>, VersionedBytesStore, VersionedKeyValueStore<K,V>, WindowStore<K,V>
    +
    +
    +
    public interface StateStore
    +
    A storage engine for managing state maintained by a stream processor. +

    + If the store is implemented as a persistent store, it must use the store name as directory name and write + all data into this store directory. + The store directory must be created with the state directory. + The state directory can be obtained via #stateDir() using the + ProcessorContext provided via init(...). +

    + Using nested store directories within the state directory isolates different state stores. + If a state store would write into the state directory directly, it might conflict with others state stores and thus, + data might get corrupted and/or Streams might fail with an error. + Furthermore, Kafka Streams relies on using the store name as store directory name to perform internal cleanup tasks. +

    + This interface does not specify any query capabilities, which, of course, + would be query engine specific. Instead, it just specifies the minimum + functionality required to reload a storage engine from its changelog as well + as basic lifecycle management.

    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
      +
      Close the storage engine.
      +
      +
      void
      + +
      +
      Flush any cached data
      +
      +
      default Position
      + +
      +
      Returns the position the state store is at with respect to the input topic/partitions
      +
      +
      void
      +
      init(StateStoreContext stateStoreContext, + StateStore root)
      +
      +
      Initializes this state store.
      +
      +
      boolean
      + +
      +
      Is this store open for reading and writing
      +
      + + +
      +
      The name of this store.
      +
      +
      boolean
      + +
      +
      Return if the storage is persistent or not.
      +
      +
      default <R> QueryResult<R>
      +
      query(Query<R> query, + PositionBound positionBound, + QueryConfig config)
      +
      +
      Execute a query.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        name

        +
        String name()
        +
        The name of this store.
        +
        +
        Returns:
        +
        the storage name
        +
        +
        +
      • +
      • +
        +

        init

        +
        void init(StateStoreContext stateStoreContext, + StateStore root)
        +
        Initializes this state store. +

        + The implementation of this function must register the root store in the stateStoreContext via the + StateStoreContext.register(StateStore, StateRestoreCallback, CommitCallback) function, where the + first StateStore parameter should always be the passed-in root object, and + the second parameter should be an object of user's implementation + of the StateRestoreCallback interface used for restoring the state store from the changelog. +

        + Note that if the state store engine itself supports bulk writes, users can implement another + interface BatchingStateRestoreCallback which extends StateRestoreCallback to + let users implement bulk-load restoration logic instead of restoring one record at a time.

        +
        +
        Throws:
        +
        IllegalStateException - If store gets registered after initialized is already finished
        +
        StreamsException - if the store's change log does not contain the partition
        +
        +
        +
      • +
      • +
        +

        flush

        +
        void flush()
        +
        Flush any cached data
        +
        +
      • +
      • +
        +

        close

        +
        void close()
        +
        Close the storage engine. + Note that this function needs to be idempotent since it may be called + several times on the same state store. +

        + Users only need to implement this function but should NEVER need to call this api explicitly + as it will be called by the library automatically when necessary

        +
        +
      • +
      • +
        +

        persistent

        +
        boolean persistent()
        +
        Return if the storage is persistent or not.
        +
        +
        Returns:
        +
        true if the storage is persistent—false otherwise
        +
        +
        +
      • +
      • +
        +

        isOpen

        +
        boolean isOpen()
        +
        Is this store open for reading and writing
        +
        +
        Returns:
        +
        true if the store is open
        +
        +
        +
      • +
      • +
        +

        query

        +
        @Evolving +default <R> QueryResult<R> query(Query<R> query, + PositionBound positionBound, + QueryConfig config)
        +
        Execute a query. Returns a QueryResult containing either result data or + a failure. +

        + If the store doesn't know how to handle the given query, the result + shall be a FailureReason.UNKNOWN_QUERY_TYPE. + If the store couldn't satisfy the given position bound, the result + shall be a FailureReason.NOT_UP_TO_BOUND. +

        + Note to store implementers: if your store does not support position tracking, + you can correctly respond FailureReason.NOT_UP_TO_BOUND if the argument is + anything but PositionBound.unbounded(). Be sure to explain in the failure message + that bounded positions are not supported. +

        +
        +
        Type Parameters:
        +
        R - The result type
        +
        Parameters:
        +
        query - The query to execute
        +
        positionBound - The position the store must be at or past
        +
        config - Per query configuration parameters, such as whether the store should collect detailed execution + info for the query
        +
        +
        +
      • +
      • +
        +

        getPosition

        +
        @Evolving +default Position getPosition()
        +
        Returns the position the state store is at with respect to the input topic/partitions
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/StateStoreContext.html b/static/41/javadoc/org/apache/kafka/streams/processor/StateStoreContext.html new file mode 100644 index 000000000..8232800bf --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/StateStoreContext.html @@ -0,0 +1,333 @@ + + + + +StateStoreContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface StateStoreContext

    +
    +
    +
    +
    public interface StateStoreContext
    +
    State store context interface.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        applicationId

        +
        String applicationId()
        +
        Returns the application id.
        +
        +
        Returns:
        +
        the application id
        +
        +
        +
      • +
      • +
        +

        taskId

        +
        TaskId taskId()
        +
        Returns the task id.
        +
        +
        Returns:
        +
        the task id
        +
        +
        +
      • +
      • +
        +

        recordMetadata

        +
        Optional<RecordMetadata> recordMetadata()
        +
        Return the metadata of the current topic/partition/offset if available. + This is defined as the metadata of the record that is currently being + processed (or was last processed) by the StreamTask that holds the store. +

        + Note that the metadata is not defined during all store interactions, for + example, while the StreamTask is running a punctuation.

        +
        +
        Returns:
        +
        metadata of the current record
        +
        +
        +
      • +
      • +
        +

        keySerde

        +
        Serde<?> keySerde()
        +
        Returns the default key serde.
        +
        +
        Returns:
        +
        the key serializer
        +
        +
        +
      • +
      • +
        +

        valueSerde

        +
        Serde<?> valueSerde()
        +
        Returns the default value serde.
        +
        +
        Returns:
        +
        the value serializer
        +
        +
        +
      • +
      • +
        +

        stateDir

        +
        File stateDir()
        +
        Returns the state directory for the partition.
        +
        +
        Returns:
        +
        the state directory
        +
        +
        +
      • +
      • +
        +

        metrics

        +
        StreamsMetrics metrics()
        +
        Returns Metrics instance.
        +
        +
        Returns:
        +
        StreamsMetrics
        +
        +
        +
      • +
      • +
        +

        register

        +
        void register(StateStore store, + StateRestoreCallback stateRestoreCallback)
        +
        Registers and possibly restores the specified storage engine.
        +
        +
        Parameters:
        +
        store - the storage engine
        +
        stateRestoreCallback - the restoration callback logic for log-backed state stores upon restart
        +
        Throws:
        +
        IllegalStateException - If store gets registered after initialized is already finished
        +
        StreamsException - if the store's change log does not contain the partition
        +
        +
        +
      • +
      • +
        +

        register

        +
        @Evolving +void register(StateStore store, + StateRestoreCallback stateRestoreCallback, + CommitCallback commitCallback)
        +
        Registers and possibly restores the specified storage engine.
        +
        +
        Parameters:
        +
        store - the storage engine
        +
        stateRestoreCallback - the restoration callback logic for log-backed state stores upon restart
        +
        commitCallback - a callback to be invoked upon successful task commit, in case the store + needs to perform any state tracking when the task is known to be in + a consistent state. If the store has no such state to track, it may + use register(StateStore, StateRestoreCallback) instead. + Persistent stores provided by Kafka Streams use this method to save + their Position information to local disk, for example.
        +
        Throws:
        +
        IllegalStateException - If store gets registered after initialized is already finished
        +
        StreamsException - if the store's change log does not contain the partition
        +
        +
        +
      • +
      • +
        +

        appConfigs

        +
        Map<String,Object> appConfigs()
        +
        Returns all the application config properties as key/value pairs. + +

        The config properties are defined in the StreamsConfig + object and associated to the StateStoreContext. + +

        The type of the values is dependent on the type of the property + (e.g. the value of DEFAULT_KEY_SERDE_CLASS_CONFIG + will be of type Class, even if it was specified as a String to + StreamsConfig(Map)).

        +
        +
        Returns:
        +
        all the key/values from the StreamsConfig properties
        +
        +
        +
      • +
      • +
        +

        appConfigsWithPrefix

        +
        Map<String,Object> appConfigsWithPrefix(String prefix)
        +
        Returns all the application config properties with the given key prefix, as key/value pairs + stripping the prefix. + +

        The config properties are defined in the StreamsConfig + object and associated to the StateStoreContext.

        +
        +
        Parameters:
        +
        prefix - the properties prefix
        +
        Returns:
        +
        the key/values matching the given prefix from the StreamsConfig properties.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/StreamPartitioner.html b/static/41/javadoc/org/apache/kafka/streams/processor/StreamPartitioner.html new file mode 100644 index 000000000..6e2a56d54 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/StreamPartitioner.html @@ -0,0 +1,187 @@ + + + + +StreamPartitioner (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface StreamPartitioner<K,V>

    +
    +
    +
    +
    Type Parameters:
    +
    K - the type of keys
    +
    V - the type of values
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface StreamPartitioner<K,V>
    +
    Determine how records are distributed among the partitions in a Kafka topic. If not specified, the underlying producer's + default partitioning strategy will be used to determine the partition. +

    + Kafka topics are divided into one or more partitions. Since each partition must fit on the servers that host it, so + using multiple partitions allows the topic to scale beyond a size that will fit on a single machine. Partitions also enable you + to use multiple instances of your topology to process in parallel all of the records on the topology's source topics. +

    + When a topology is instantiated, each of its sources are assigned a subset of that topic's partitions. That means that only + those processors in that topology instance will consume the records from those partitions. In many cases, Kafka Streams will + automatically manage these instances, and adjust when new topology instances are added or removed. +

    + Some topologies, though, need more control over which records appear in each partition. For example, some topologies that have + stateful processors may want all records within a range of keys to always be delivered to and handled by the same topology instance. + An upstream topology producing records to that topic can use a custom stream partitioner to precisely and consistently + determine to which partition each record should be written. +

    + To do this, create a StreamPartitioner implementation, and when you build your topology specify that custom partitioner + when adding a sink + for that topic. +

    + All StreamPartitioner implementations should be stateless and a pure function so they can be shared across topic and sink nodes.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      partitions(String topic, + K key, + V value, + int numPartitions)
      +
      +
      Determine the number(s) of the partition(s) to which a record with the given key and value should be sent, + for the given topic and current partition count
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        partitions

        +
        Optional<Set<Integer>> partitions(String topic, + K key, + V value, + int numPartitions)
        +
        Determine the number(s) of the partition(s) to which a record with the given key and value should be sent, + for the given topic and current partition count
        +
        +
        Parameters:
        +
        topic - the topic name this record is sent to
        +
        key - the key of the record
        +
        value - the value of the record
        +
        numPartitions - the total number of partitions
        +
        Returns:
        +
        an Optional of Set of integers between 0 and numPartitions-1, + Empty optional means using default partitioner + Optional of an empty set means the record won't be sent to any partitions i.e drop it. + Optional of Set of integers means the partitions to which the record should be sent to.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/TaskId.html b/static/41/javadoc/org/apache/kafka/streams/processor/TaskId.html new file mode 100644 index 000000000..548bfd22b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/TaskId.html @@ -0,0 +1,301 @@ + + + + +TaskId (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TaskId

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.TaskId
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Comparable<TaskId>
    +
    +
    +
    public class TaskId +extends Object +implements Comparable<TaskId>
    +
    The task ID representation composed as subtopology plus the assigned partition ID.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        NAMED_TOPOLOGY_DELIMITER

        +
        public static final String NAMED_TOPOLOGY_DELIMITER
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TaskId

        +
        public TaskId(int subtopology, + int partition)
        +
        +
      • +
      • +
        +

        TaskId

        +
        public TaskId(int subtopology, + int partition, + String topologyName)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        subtopology

        +
        public int subtopology()
        +
        +
      • +
      • +
        +

        partition

        +
        public int partition()
        +
        +
      • +
      • +
        +

        topologyName

        +
        public String topologyName()
        +
        Experimental feature -- will return null
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        parse

        +
        public static TaskId parse(String taskIdStr)
        +
        +
        Throws:
        +
        TaskIdFormatException - if the taskIdStr is not a valid TaskId
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        compareTo

        +
        public int compareTo(TaskId other)
        +
        +
        Specified by:
        +
        compareTo in interface Comparable<TaskId>
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/TimestampExtractor.html b/static/41/javadoc/org/apache/kafka/streams/processor/TimestampExtractor.html new file mode 100644 index 000000000..58ea93a4d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/TimestampExtractor.html @@ -0,0 +1,161 @@ + + + + +TimestampExtractor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface TimestampExtractor

    +
    +
    +
    +
    All Known Implementing Classes:
    +
    FailOnInvalidTimestamp, LogAndSkipOnInvalidTimestamp, UsePartitionTimeOnInvalidTimestamp, WallclockTimestampExtractor
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface TimestampExtractor
    +
    An interface that allows the Kafka Streams framework to extract a timestamp from an instance of ConsumerRecord. + The extracted timestamp is defined as milliseconds.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      long
      +
      extract(ConsumerRecord<Object,Object> record, + long partitionTime)
      +
      +
      Extracts a timestamp from a record.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        extract

        +
        long extract(ConsumerRecord<Object,Object> record, + long partitionTime)
        +
        Extracts a timestamp from a record. The timestamp must be positive to be considered a valid timestamp. + Returning a negative timestamp will cause the record not to be processed but rather silently skipped. + In case the record contains a negative timestamp and this is considered a fatal error for the application, + throwing a RuntimeException instead of returning the timestamp is a valid option too. + For this case, Streams will stop processing and shut down to allow you investigate in the root cause of the + negative timestamp. +

        + The timestamp extractor implementation must be stateless. +

        + The extracted timestamp MUST represent the milliseconds since midnight, January 1, 1970 UTC. +

        + It is important to note that this timestamp may become the message timestamp for any messages sent to changelogs + updated by KTables and joins. + The message timestamp is used for log retention and log rolling, so using nonsensical values may result in + excessive log rolling and therefore broker performance degradation.

        +
        +
        Parameters:
        +
        record - a data record
        +
        partitionTime - the highest extracted valid timestamp of the current record's partition˙ (could be -1 if unknown)
        +
        Returns:
        +
        the timestamp of the record
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/To.html b/static/41/javadoc/org/apache/kafka/streams/processor/To.html new file mode 100644 index 000000000..2fe4bc73b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/To.html @@ -0,0 +1,218 @@ + + + + +To (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class To

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.To
    +
    +
    +
    +
    public class To +extends Object
    +
    This class is used to provide the optional parameters when sending output records to downstream processor + using ProcessorContext.forward(Object, Object, To).
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      static To
      +
      all()
      +
      +
      Forward the key/value pair to all downstream processors
      +
      +
      static To
      +
      child(String childName)
      +
      +
      Forward the key/value pair to one of the downstream processors designated by the downstream processor name.
      +
      +
      boolean
      + +
       
      +
      int
      + +
      +
      Equality is implemented in support of tests, *not* for use in Hash collections, since this class is mutable.
      +
      + + +
       
      + +
      withTimestamp(long timestamp)
      +
      +
      Set the timestamp of the output record.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +getClass, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        child

        +
        public static To child(String childName)
        +
        Forward the key/value pair to one of the downstream processors designated by the downstream processor name.
        +
        +
        Parameters:
        +
        childName - name of downstream processor
        +
        Returns:
        +
        a new To instance configured with childName
        +
        +
        +
      • +
      • +
        +

        all

        +
        public static To all()
        +
        Forward the key/value pair to all downstream processors
        +
        +
        Returns:
        +
        a new To instance configured for all downstream processor
        +
        +
        +
      • +
      • +
        +

        withTimestamp

        +
        public To withTimestamp(long timestamp)
        +
        Set the timestamp of the output record.
        +
        +
        Parameters:
        +
        timestamp - the output record timestamp
        +
        Returns:
        +
        itself (i.e., this)
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        Equality is implemented in support of tests, *not* for use in Hash collections, since this class is mutable.
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/TopicNameExtractor.html b/static/41/javadoc/org/apache/kafka/streams/processor/TopicNameExtractor.html new file mode 100644 index 000000000..62da786d2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/TopicNameExtractor.html @@ -0,0 +1,146 @@ + + + + +TopicNameExtractor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface TopicNameExtractor<K,V>

    +
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface TopicNameExtractor<K,V>
    +
    An interface that allows to dynamically determine the name of the Kafka topic to send at the sink node of the topology.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      extract(K key, + V value, + RecordContext recordContext)
      +
      +
      Extracts the topic name to send to.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        extract

        +
        String extract(K key, + V value, + RecordContext recordContext)
        +
        Extracts the topic name to send to. The topic name must already exist, since the Kafka Streams library will not + try to automatically create the topic with the extracted name.
        +
        +
        Parameters:
        +
        key - the record key
        +
        value - the record value
        +
        recordContext - current context metadata of the record
        +
        Returns:
        +
        the topic name this record should be sent to
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/UsePartitionTimeOnInvalidTimestamp.html b/static/41/javadoc/org/apache/kafka/streams/processor/UsePartitionTimeOnInvalidTimestamp.html new file mode 100644 index 000000000..63615b80a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/UsePartitionTimeOnInvalidTimestamp.html @@ -0,0 +1,230 @@ + + + + +UsePartitionTimeOnInvalidTimestamp (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class UsePartitionTimeOnInvalidTimestamp

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.UsePartitionTimeOnInvalidTimestamp
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    TimestampExtractor
    +
    +
    +
    public class UsePartitionTimeOnInvalidTimestamp +extends Object
    +
    Retrieves embedded metadata timestamps from Kafka messages. + If a record has a negative (invalid) timestamp, a new timestamp will be inferred from the current stream-time. +

    + Embedded metadata timestamp was introduced in "KIP-32: Add timestamps to Kafka message" for the new + 0.10+ Kafka message format. +

    + Here, "embedded metadata" refers to the fact that compatible Kafka producer clients automatically and + transparently embed such timestamps into message metadata they send to Kafka, which can then be retrieved + via this timestamp extractor. +

    + If the embedded metadata timestamp represents CreateTime (cf. Kafka broker setting + message.timestamp.type and Kafka topic setting log.message.timestamp.type), + this extractor effectively provides event-time semantics. + If LogAppendTime is used as broker/topic setting to define the embedded metadata timestamps, + using this extractor effectively provides ingestion-time semantics. +

    + If you need processing-time semantics, use WallclockTimestampExtractor.

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        UsePartitionTimeOnInvalidTimestamp

        +
        public UsePartitionTimeOnInvalidTimestamp()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        onInvalidTimestamp

        +
        public long onInvalidTimestamp(ConsumerRecord<Object,Object> record, + long recordTimestamp, + long partitionTime) + throws StreamsException
        +
        Returns the current stream-time as new timestamp for the record.
        +
        +
        Parameters:
        +
        record - a data record
        +
        recordTimestamp - the timestamp extractor from the record
        +
        partitionTime - the highest extracted valid timestamp of the current record's partition˙ (could be -1 if unknown)
        +
        Returns:
        +
        the provided highest extracted valid timestamp as new timestamp for the record
        +
        Throws:
        +
        StreamsException - if highest extracted valid timestamp is unknown
        +
        +
        +
      • +
      • +
        +

        extract

        +
        public long extract(ConsumerRecord<Object,Object> record, + long partitionTime)
        +
        Extracts the embedded metadata timestamp from the given ConsumerRecord.
        +
        +
        Specified by:
        +
        extract in interface TimestampExtractor
        +
        Parameters:
        +
        record - a data record
        +
        partitionTime - the highest extracted valid timestamp of the current record's partition˙ (could be -1 if unknown)
        +
        Returns:
        +
        the embedded metadata timestamp of the given ConsumerRecord
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/WallclockTimestampExtractor.html b/static/41/javadoc/org/apache/kafka/streams/processor/WallclockTimestampExtractor.html new file mode 100644 index 000000000..cc31b3620 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/WallclockTimestampExtractor.html @@ -0,0 +1,193 @@ + + + + +WallclockTimestampExtractor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class WallclockTimestampExtractor

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.WallclockTimestampExtractor
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    TimestampExtractor
    +
    +
    +
    public class WallclockTimestampExtractor +extends Object +implements TimestampExtractor
    +
    Retrieves current wall clock timestamps as System.currentTimeMillis(). +

    + Using this extractor effectively provides processing-time semantics. +

    + If you need event-time semantics, use FailOnInvalidTimestamp with + built-in CreateTime or LogAppendTime timestamp (see KIP-32: Add timestamps to Kafka message for details).

    +
    +
    See Also:
    +
    + +
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        WallclockTimestampExtractor

        +
        public WallclockTimestampExtractor()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        extract

        +
        public long extract(ConsumerRecord<Object,Object> record, + long partitionTime)
        +
        Return the current wall clock time as timestamp.
        +
        +
        Specified by:
        +
        extract in interface TimestampExtractor
        +
        Parameters:
        +
        record - a data record
        +
        partitionTime - the highest extracted valid timestamp of the current record's partition˙ (could be -1 if unknown)
        +
        Returns:
        +
        the current wall clock time, expressed in milliseconds since midnight, January 1, 1970 UTC
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/api/ContextualFixedKeyProcessor.html b/static/41/javadoc/org/apache/kafka/streams/processor/api/ContextualFixedKeyProcessor.html new file mode 100644 index 000000000..5b625ab5c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/api/ContextualFixedKeyProcessor.html @@ -0,0 +1,162 @@ + + + + +ContextualFixedKeyProcessor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ContextualFixedKeyProcessor<KIn,VIn,VOut>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.api.ContextualFixedKeyProcessor<KIn,VIn,VOut>
    +
    +
    +
    +
    Type Parameters:
    +
    KIn - the type of input keys
    +
    VIn - the type of input values
    +
    VOut - the type of output values
    +
    +
    +
    All Implemented Interfaces:
    +
    FixedKeyProcessor<KIn,VIn,VOut>
    +
    +
    +
    public abstract class ContextualFixedKeyProcessor<KIn,VIn,VOut> +extends Object +implements FixedKeyProcessor<KIn,VIn,VOut>
    +
    An abstract implementation of FixedKeyProcessor that manages the FixedKeyProcessorContext instance.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        init

        +
        public void init(FixedKeyProcessorContext<KIn,VOut> context)
        +
        Description copied from interface: FixedKeyProcessor
        +
        Initialize this processor with the given context. The framework ensures this is called once per processor when the topology + that contains it is initialized. When the framework is done with the processor, FixedKeyProcessor.close() will be called on it; the + framework may later re-use the processor by calling #init() again. +

        + The provided context can be used to access topology and record metadata, to + schedule a method to be + called periodically and to access attached StateStores.

        +
        +
        Specified by:
        +
        init in interface FixedKeyProcessor<KIn,VIn,VOut>
        +
        Parameters:
        +
        context - the context; may not be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/api/ContextualProcessor.html b/static/41/javadoc/org/apache/kafka/streams/processor/api/ContextualProcessor.html new file mode 100644 index 000000000..631659ad8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/api/ContextualProcessor.html @@ -0,0 +1,163 @@ + + + + +ContextualProcessor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ContextualProcessor<KIn,VIn,KOut,VOut>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.api.ContextualProcessor<KIn,VIn,KOut,VOut>
    +
    +
    +
    +
    Type Parameters:
    +
    KIn - the type of input keys
    +
    VIn - the type of input values
    +
    KOut - the type of output keys
    +
    VOut - the type of output values
    +
    +
    +
    All Implemented Interfaces:
    +
    Processor<KIn,VIn,KOut,VOut>
    +
    +
    +
    public abstract class ContextualProcessor<KIn,VIn,KOut,VOut> +extends Object +implements Processor<KIn,VIn,KOut,VOut>
    +
    An abstract implementation of Processor that manages the ProcessorContext instance.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        init

        +
        public void init(ProcessorContext<KOut,VOut> context)
        +
        Description copied from interface: Processor
        +
        Initialize this processor with the given context. The framework ensures this is called once per processor when the topology + that contains it is initialized. When the framework is done with the processor, Processor.close() will be called on it; the + framework may later re-use the processor by calling #init() again. +

        + The provided context can be used to access topology and record meta data, to + schedule a method to be + called periodically and to access attached StateStores.

        +
        +
        Specified by:
        +
        init in interface Processor<KIn,VIn,KOut,VOut>
        +
        Parameters:
        +
        context - the context; may not be null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/api/FixedKeyProcessor.html b/static/41/javadoc/org/apache/kafka/streams/processor/api/FixedKeyProcessor.html new file mode 100644 index 000000000..c4d176c89 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/api/FixedKeyProcessor.html @@ -0,0 +1,185 @@ + + + + +FixedKeyProcessor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface FixedKeyProcessor<KIn,VIn,VOut>

    +
    +
    +
    +
    Type Parameters:
    +
    KIn - the type of input keys
    +
    VIn - the type of input values
    +
    VOut - the type of output values
    +
    +
    +
    All Known Implementing Classes:
    +
    ContextualFixedKeyProcessor
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface FixedKeyProcessor<KIn,VIn,VOut>
    +
    A processor of key-value pair records where keys are immutable.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      default void
      + +
      +
      Close this processor and clean up any resources.
      +
      +
      default void
      + +
      +
      Initialize this processor with the given context.
      +
      +
      void
      + +
      +
      Process the record.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        init

        +
        default void init(FixedKeyProcessorContext<KIn,VOut> context)
        +
        Initialize this processor with the given context. The framework ensures this is called once per processor when the topology + that contains it is initialized. When the framework is done with the processor, close() will be called on it; the + framework may later re-use the processor by calling #init() again. +

        + The provided context can be used to access topology and record metadata, to + schedule a method to be + called periodically and to access attached StateStores.

        +
        +
        Parameters:
        +
        context - the context; may not be null
        +
        +
        +
      • +
      • +
        +

        process

        +
        void process(FixedKeyRecord<KIn,VIn> record)
        +
        Process the record. Note that record metadata is undefined in cases such as a forward call from a punctuator.
        +
        +
        Parameters:
        +
        record - the record to process
        +
        +
        +
      • +
      • +
        +

        close

        +
        default void close()
        +
        Close this processor and clean up any resources. Be aware that #close() is called after an internal cleanup. + Thus, it is not possible to write anything to Kafka as underlying clients are already closed. The framework may + later re-use this processor by calling #init() on it again. +

        + Note: Do not close any streams managed resources, like StateStores here, as they are managed by the library.

        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/api/FixedKeyProcessorContext.html b/static/41/javadoc/org/apache/kafka/streams/processor/api/FixedKeyProcessorContext.html new file mode 100644 index 000000000..37a769bc7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/api/FixedKeyProcessorContext.html @@ -0,0 +1,227 @@ + + + + +FixedKeyProcessorContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface FixedKeyProcessorContext<KForward,VForward>

    +
    +
    +
    +
    Type Parameters:
    +
    KForward - a bound on the types of keys that may be forwarded
    +
    VForward - a bound on the types of values that may be forwarded
    +
    +
    +
    All Superinterfaces:
    +
    ProcessingContext
    +
    +
    +
    public interface FixedKeyProcessorContext<KForward,VForward> +extends ProcessingContext
    +
    Processor context interface for FixedKeyRecord.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        forward

        +
        <K extends KForward, +V extends VForward> void forward(FixedKeyRecord<K,V> record)
        +
        Forward a record to all child processors. +

        + Note that the forwarded FixedKeyRecord is shared between the parent and child + processors. And of course, the parent may forward the same object to multiple children, + and the child may forward it to grandchildren, etc. Therefore, you should be mindful + of mutability. +

        + The FixedKeyRecord class itself is immutable (all the setter-style methods return an + independent copy of the instance). However, the value and headers referenced by + the Record may themselves be mutable. +

        + Some programs may opt to make use of this mutability for high performance, in which case + the input record may be mutated and then forwarded by each FixedKeyProcessor. However, + most applications should instead favor safety. +

        + Forwarding records safely simply means to make a copy of the record before you mutate it. + This is trivial when using the FixedKeyRecord.withValue(Object), + and FixedKeyRecord.withTimestamp(long) methods, as each of these methods make a copy of the + record as a matter of course. But a little extra care must be taken with headers, since + the Header class is mutable. The easiest way to + safely handle headers is to use the FixedKeyRecord constructors to make a copy before + modifying headers. +

        + In other words, this would be considered unsafe: + + process(FixedKeyRecord inputRecord) { + inputRecord.headers().add(...); + context.forward(inputRecord); + } + + This is unsafe because the parent, and potentially siblings, grandparents, etc., + all will see this modification to their shared Headers reference. This is a violation + of causality and could lead to undefined behavior. +

        + A safe usage would look like this: + + process(FixedKeyRecord inputRecord) { + // makes a copy of the headers + FixedKeyRecord toForward = inputRecord.withHeaders(inputRecord.headers()); + // Other options to create a safe copy are: + // * use any copy-on-write method, which makes a copy of all fields: + // toForward = inputRecord.withValue(); + // * explicitly copy all fields: + // toForward = new FixedKeyRecord(inputRecord.key(), inputRecord.value(), inputRecord.timestamp(), inputRecord.headers()); + // * create a fresh, empty Headers: + // toForward = new FixedKeyRecord(inputRecord.key(), inputRecord.value(), inputRecord.timestamp()); + // * etc. + + // now, we are modifying our own independent copy of the headers. + toForward.headers().add(...); + context.forward(toForward); + } +

        +
        +
        Parameters:
        +
        record - The record to forward to all children
        +
        +
        +
      • +
      • +
        +

        forward

        +
        <K extends KForward, +V extends VForward> void forward(FixedKeyRecord<K,V> record, + String childName)
        +
        Forward a record to the specified child processor. + See forward(FixedKeyRecord) for considerations.
        +
        +
        Parameters:
        +
        record - The record to forward
        +
        childName - The name of the child processor to receive the record
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/api/FixedKeyProcessorSupplier.html b/static/41/javadoc/org/apache/kafka/streams/processor/api/FixedKeyProcessorSupplier.html new file mode 100644 index 000000000..3d7b36d1d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/api/FixedKeyProcessorSupplier.html @@ -0,0 +1,165 @@ + + + + +FixedKeyProcessorSupplier (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface FixedKeyProcessorSupplier<KIn,VIn,VOut>

    +
    +
    +
    +
    Type Parameters:
    +
    KIn - the type of input keys
    +
    VIn - the type of input values
    +
    VOut - the type of output values
    +
    +
    +
    All Superinterfaces:
    +
    ConnectedStoreProvider, Supplier<FixedKeyProcessor<KIn,VIn,VOut>>
    +
    +
    +
    All Known Subinterfaces:
    +
    WrappedFixedKeyProcessorSupplier<KIn,VIn,VOut>
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface FixedKeyProcessorSupplier<KIn,VIn,VOut> +extends ConnectedStoreProvider, Supplier<FixedKeyProcessor<KIn,VIn,VOut>>
    +
    A processor supplier that can create one or more FixedKeyProcessor instances. +

    + The supplier should always generate a new instance each time get() gets called. Creating + a single FixedKeyProcessor object and returning the same object reference in get() would be + a violation of the supplier pattern and leads to runtime exceptions.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        get

        + +
        Return a newly constructed FixedKeyProcessor instance. + The supplier should always generate a new instance each time FixedKeyProcessorSupplier#get() gets called. +

        + Creating a single FixedKeyProcessor object and returning the same object reference in FixedKeyProcessorSupplier#get() + is a violation of the supplier pattern and leads to runtime exceptions.

        +
        +
        Specified by:
        +
        get in interface Supplier<KIn>
        +
        Returns:
        +
        a new FixedKeyProcessor instance
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/api/FixedKeyRecord.html b/static/41/javadoc/org/apache/kafka/streams/processor/api/FixedKeyRecord.html new file mode 100644 index 000000000..f98e59a17 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/api/FixedKeyRecord.html @@ -0,0 +1,289 @@ + + + + +FixedKeyRecord (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class FixedKeyRecord<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.api.FixedKeyRecord<K,V>
    +
    +
    +
    +
    Type Parameters:
    +
    K - The type of the fixed key
    +
    V - The type of the value
    +
    +
    +
    public final class FixedKeyRecord<K,V> +extends Object
    +
    A data class representing an incoming record with fixed key for processing in a FixedKeyProcessor + or a record to forward to downstream processors via FixedKeyProcessorContext. + + This class encapsulates all the data attributes of a record: the key and value, but + also the timestamp of the record and any record headers. + Though key is not allowed to be changes. + + This class is immutable, though the objects referenced in the attributes of this class + may themselves be mutable.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        key

        +
        public K key()
        +
        The key of the record. May be null.
        +
        +
      • +
      • +
        +

        value

        +
        public V value()
        +
        The value of the record. May be null.
        +
        +
      • +
      • +
        +

        timestamp

        +
        public long timestamp()
        +
        The timestamp of the record. Will never be negative.
        +
        +
      • +
      • +
        +

        headers

        +
        public Headers headers()
        +
        The headers of the record. Never null.
        +
        +
      • +
      • +
        +

        withValue

        +
        public <NewV> FixedKeyRecord<K,NewV> withValue(NewV value)
        +
        A convenient way to produce a new record if you only need to change the value. + + Copies the attributes of this record with the value replaced.
        +
        +
        Type Parameters:
        +
        NewV - The type of the new record's value.
        +
        Parameters:
        +
        value - The value of the result record.
        +
        Returns:
        +
        A new Record instance with all the same attributes (except that the value is replaced).
        +
        +
        +
      • +
      • +
        +

        withTimestamp

        +
        public FixedKeyRecord<K,V> withTimestamp(long timestamp)
        +
        A convenient way to produce a new record if you only need to change the timestamp. + + Copies the attributes of this record with the timestamp replaced.
        +
        +
        Parameters:
        +
        timestamp - The timestamp of the result record.
        +
        Returns:
        +
        A new Record instance with all the same attributes (except that the timestamp is replaced).
        +
        +
        +
      • +
      • +
        +

        withHeaders

        +
        public FixedKeyRecord<K,V> withHeaders(Headers headers)
        +
        A convenient way to produce a new record if you only need to change the headers. + + Copies the attributes of this record with the headers replaced. + Also makes a copy of the provided headers. + + See FixedKeyProcessorContext.forward(FixedKeyRecord) for + considerations around mutability of keys, values, and headers.
        +
        +
        Parameters:
        +
        headers - The headers of the result record.
        +
        Returns:
        +
        A new Record instance with all the same attributes (except that the headers are replaced).
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/api/InternalFixedKeyRecordFactory.html b/static/41/javadoc/org/apache/kafka/streams/processor/api/InternalFixedKeyRecordFactory.html new file mode 100644 index 000000000..dfe7f3ee6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/api/InternalFixedKeyRecordFactory.html @@ -0,0 +1,147 @@ + + + + +InternalFixedKeyRecordFactory (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class InternalFixedKeyRecordFactory

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.api.InternalFixedKeyRecordFactory
    +
    +
    +
    +
    public final class InternalFixedKeyRecordFactory +extends Object
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        create

        +
        public static <KIn, +VIn> FixedKeyRecord<KIn,VIn> create(Record<KIn,VIn> record)
        +
        Only allowed way to create FixedKeyRecords. +

        + DO NOT USE THIS FACTORY OUTSIDE THE FRAMEWORK. + This could produce undesired results by not partitioning record properly.

        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/api/MockProcessorContext.CapturedForward.html b/static/41/javadoc/org/apache/kafka/streams/processor/api/MockProcessorContext.CapturedForward.html new file mode 100644 index 000000000..16349e8fc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/api/MockProcessorContext.CapturedForward.html @@ -0,0 +1,235 @@ + + + + +MockProcessorContext.CapturedForward (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class MockProcessorContext.CapturedForward<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.api.MockProcessorContext.CapturedForward<K,V>
    +
    +
    +
    +
    Enclosing class:
    +
    MockProcessorContext<KForward,VForward>
    +
    +
    +
    public static final class MockProcessorContext.CapturedForward<K,V> +extends Object
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        CapturedForward

        +
        public CapturedForward(Record<K,V> record)
        +
        +
      • +
      • +
        +

        CapturedForward

        +
        public CapturedForward(Record<K,V> record, + Optional<String> childName)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        childName

        +
        public Optional<String> childName()
        +
        The child this data was forwarded to.
        +
        +
        Returns:
        +
        If present, the child name the record was forwarded to. + If empty, the forward was a broadcast.
        +
        +
        +
      • +
      • +
        +

        record

        +
        public Record<K,V> record()
        +
        The record that was forwarded.
        +
        +
        Returns:
        +
        The forwarded record. Not null.
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/api/MockProcessorContext.CapturedPunctuator.html b/static/41/javadoc/org/apache/kafka/streams/processor/api/MockProcessorContext.CapturedPunctuator.html new file mode 100644 index 000000000..83e26d1ef --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/api/MockProcessorContext.CapturedPunctuator.html @@ -0,0 +1,172 @@ + + + + +MockProcessorContext.CapturedPunctuator (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class MockProcessorContext.CapturedPunctuator

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.api.MockProcessorContext.CapturedPunctuator
    +
    +
    +
    +
    Enclosing class:
    +
    MockProcessorContext<KForward,VForward>
    +
    +
    +
    public static final class MockProcessorContext.CapturedPunctuator +extends Object
    +
    MockProcessorContext.CapturedPunctuator holds captured punctuators, along with their scheduling information.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        getInterval

        +
        public Duration getInterval()
        +
        +
      • +
      • +
        +

        getType

        +
        public PunctuationType getType()
        +
        +
      • +
      • +
        +

        getPunctuator

        +
        public Punctuator getPunctuator()
        +
        +
      • +
      • +
        +

        cancel

        +
        public void cancel()
        +
        +
      • +
      • +
        +

        cancelled

        +
        public boolean cancelled()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/api/MockProcessorContext.html b/static/41/javadoc/org/apache/kafka/streams/processor/api/MockProcessorContext.html new file mode 100644 index 000000000..287e552a8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/api/MockProcessorContext.html @@ -0,0 +1,862 @@ + + + + +MockProcessorContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class MockProcessorContext<KForward,VForward>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.api.MockProcessorContext<KForward,VForward>
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    ProcessingContext, ProcessorContext<KForward,VForward>, org.apache.kafka.streams.processor.internals.RecordCollector.Supplier
    +
    +
    +
    public class MockProcessorContext<KForward,VForward> +extends Object +implements ProcessorContext<KForward,VForward>, org.apache.kafka.streams.processor.internals.RecordCollector.Supplier
    +
    MockProcessorContext is a mock of ProcessorContext for users to test their Processor + implementations. +

    + The tests for this class (org.apache.kafka.streams.MockProcessorContextTest) include several behavioral + tests that serve as example usage. +

    + Note that this class does not take any automated actions (such as firing scheduled punctuators). + It simply captures any data it witnesses. + If you require more automated tests, we recommend wrapping your Processor in a minimal source-processor-sink + Topology and using the TopologyTestDriver.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        MockProcessorContext

        +
        public MockProcessorContext()
        +
        Create a MockProcessorContext with dummy config and taskId and null stateDir. + Most unit tests using this mock won't need to know the taskId, + and most unit tests should be able to get by with the + InMemoryKeyValueStore, so the stateDir won't matter.
        +
        +
      • +
      • +
        +

        MockProcessorContext

        +
        public MockProcessorContext(Properties config)
        +
        Create a MockProcessorContext with dummy taskId and null stateDir. + Most unit tests using this mock won't need to know the taskId, + and most unit tests should be able to get by with the + InMemoryKeyValueStore, so the stateDir won't matter.
        +
        +
        Parameters:
        +
        config - a Properties object, used to configure the context and the processor.
        +
        +
        +
      • +
      • +
        +

        MockProcessorContext

        +
        public MockProcessorContext(Properties config, + TaskId taskId, + File stateDir)
        +
        Create a MockProcessorContext with a specified taskId and null stateDir.
        +
        +
        Parameters:
        +
        config - a Properties object, used to configure the context and the processor.
        +
        taskId - a TaskId, which the context makes available via taskId().
        +
        stateDir - a File, which the context makes available viw stateDir().
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        applicationId

        +
        public String applicationId()
        +
        Description copied from interface: ProcessingContext
        +
        Return the application id.
        +
        +
        Specified by:
        +
        applicationId in interface ProcessingContext
        +
        Returns:
        +
        the application id
        +
        +
        +
      • +
      • +
        +

        taskId

        +
        public TaskId taskId()
        +
        Description copied from interface: ProcessingContext
        +
        Return the task id.
        +
        +
        Specified by:
        +
        taskId in interface ProcessingContext
        +
        Returns:
        +
        the task id
        +
        +
        +
      • +
      • +
        +

        appConfigs

        +
        public Map<String,Object> appConfigs()
        +
        Description copied from interface: ProcessingContext
        +
        Returns all the application config properties as key/value pairs. + +

        The config properties are defined in the StreamsConfig + object and associated to the ProcessorContext. + +

        The type of the values is dependent on the type of the property + (e.g. the value of DEFAULT_KEY_SERDE_CLASS_CONFIG + will be of type Class, even if it was specified as a String to + StreamsConfig(Map)).

        +
        +
        Specified by:
        +
        appConfigs in interface ProcessingContext
        +
        Returns:
        +
        all the key/values from the StreamsConfig properties
        +
        +
        +
      • +
      • +
        +

        appConfigsWithPrefix

        +
        public Map<String,Object> appConfigsWithPrefix(String prefix)
        +
        Description copied from interface: ProcessingContext
        +
        Return all the application config properties with the given key prefix, as key/value pairs + stripping the prefix. + +

        The config properties are defined in the StreamsConfig + object and associated to the ProcessorContext.

        +
        +
        Specified by:
        +
        appConfigsWithPrefix in interface ProcessingContext
        +
        Parameters:
        +
        prefix - the properties prefix
        +
        Returns:
        +
        the key/values matching the given prefix from the StreamsConfig properties.
        +
        +
        +
      • +
      • +
        +

        currentSystemTimeMs

        +
        public long currentSystemTimeMs()
        +
        Description copied from interface: ProcessingContext
        +
        Return the current system timestamp (also called wall-clock time) in milliseconds. + +

        Note: this method returns the internally cached system timestamp from the Kafka Stream runtime. + Thus, it may return a different value compared to System.currentTimeMillis().

        +
        +
        Specified by:
        +
        currentSystemTimeMs in interface ProcessingContext
        +
        Returns:
        +
        the current system timestamp in milliseconds
        +
        +
        +
      • +
      • +
        +

        currentStreamTimeMs

        +
        public long currentStreamTimeMs()
        +
        Description copied from interface: ProcessingContext
        +
        Return the current stream-time in milliseconds. + +

        Stream-time is the maximum observed record timestamp so far + (including the currently processed record), i.e., it can be considered a high-watermark. + Stream-time is tracked on a per-task basis and is preserved across restarts and during task migration. + +

        Note: this method is not supported for global processors (cf. + Topology#addGlobalStore(...) + and StreamsBuilder.addGlobalStore(...)), + because there is no concept of stream-time for this case. + Calling this method in a global processor will result in an UnsupportedOperationException.

        +
        +
        Specified by:
        +
        currentStreamTimeMs in interface ProcessingContext
        +
        Returns:
        +
        the current stream-time in milliseconds
        +
        +
        +
      • +
      • +
        +

        keySerde

        +
        public Serde<?> keySerde()
        +
        Description copied from interface: ProcessingContext
        +
        Return the default key serde.
        +
        +
        Specified by:
        +
        keySerde in interface ProcessingContext
        +
        Returns:
        +
        the key serializer
        +
        +
        +
      • +
      • +
        +

        valueSerde

        +
        public Serde<?> valueSerde()
        +
        Description copied from interface: ProcessingContext
        +
        Return the default value serde.
        +
        +
        Specified by:
        +
        valueSerde in interface ProcessingContext
        +
        Returns:
        +
        the value serializer
        +
        +
        +
      • +
      • +
        +

        stateDir

        +
        public File stateDir()
        +
        Description copied from interface: ProcessingContext
        +
        Return the state directory for the partition.
        +
        +
        Specified by:
        +
        stateDir in interface ProcessingContext
        +
        Returns:
        +
        the state directory
        +
        +
        +
      • +
      • +
        +

        metrics

        +
        public StreamsMetrics metrics()
        +
        Description copied from interface: ProcessingContext
        +
        Return Metrics instance.
        +
        +
        Specified by:
        +
        metrics in interface ProcessingContext
        +
        Returns:
        +
        StreamsMetrics
        +
        +
        +
      • +
      • +
        +

        setRecordMetadata

        +
        public void setRecordMetadata(String topic, + int partition, + long offset)
        +
        The context exposes these metadata for use in the processor. Normally, they are set by the Kafka Streams framework, + but for the purpose of driving unit tests, you can set them directly.
        +
        +
        Parameters:
        +
        topic - A topic name
        +
        partition - A partition number
        +
        offset - A record offset
        +
        +
        +
      • +
      • +
        +

        setCurrentSystemTimeMs

        +
        public void setCurrentSystemTimeMs(long currentSystemTimeMs)
        +
        +
      • +
      • +
        +

        setCurrentStreamTimeMs

        +
        public void setCurrentStreamTimeMs(long currentStreamTimeMs)
        +
        +
      • +
      • +
        +

        recordMetadata

        +
        public Optional<RecordMetadata> recordMetadata()
        +
        Description copied from interface: ProcessingContext
        +
        Return the metadata of the current record if available. Processors may be invoked to + process a source record from an input topic, to run a scheduled punctuation + (see ProcessingContext.schedule(Duration, PunctuationType, Punctuator)), + or because a parent processor called forward(Record). +

        + In the case of a punctuation, there is no source record, so this metadata would be + undefined. Note that when a punctuator invokes forward(Record), + downstream processors will receive the forwarded record as a regular + Processor.process(Record) or FixedKeyProcessor.process(FixedKeyRecord) invocation. + In other words, it wouldn't be apparent to + downstream processors whether the record being processed came from an input topic + or punctuation and therefore whether this metadata is defined. This is why + the return type of this method is Optional. +

        + If there is any possibility of punctuators upstream, any access + to this field should consider the case of + "recordMetadata().isPresent() == false". + Of course, it would be safest to always guard this condition.

        +
        +
        Specified by:
        +
        recordMetadata in interface ProcessingContext
        +
        +
        +
      • +
      • +
        +

        getStateStore

        +
        public <S extends StateStore> S getStateStore(String name)
        +
        Description copied from interface: ProcessingContext
        +
        Get the state store given the store name. + +

        The returned state store represent one shard of the overall state, which belongs to the current task. + The returned shard of the state store may only be used by the current Processor or + FixedKeyProcessor instance. + Sharing a shard across different processors (ie, from different "sibling" tasks; same sub-topology but different + partition) may lead to data corruption and/or data loss.

        +
        +
        Specified by:
        +
        getStateStore in interface ProcessingContext
        +
        Type Parameters:
        +
        S - The type or interface of the store to return
        +
        Parameters:
        +
        name - The store name
        +
        Returns:
        +
        The state store instance
        +
        +
        +
      • +
      • +
        +

        addStateStore

        +
        public <S extends StateStore> void addStateStore(S stateStore)
        +
        +
      • +
      • +
        +

        schedule

        +
        public Cancellable schedule(Duration interval, + PunctuationType type, + Punctuator callback)
        +
        Description copied from interface: ProcessingContext
        +
        Schedule a periodic operation for processors. A processor may call this method during + initialization, + processing, + initialization, or + processing to + schedule a periodic callback — called a punctuation — to Punctuator.punctuate(long). + The type parameter controls what notion of time is used for punctuation: +
          +
        • PunctuationType.STREAM_TIME — uses "stream time", which is advanced by the processing of messages + in accordance with the timestamp as extracted by the TimestampExtractor in use. + The first punctuation will be triggered by the first record that is processed. + NOTE: Only advanced if messages arrive
        • +
        • PunctuationType.WALL_CLOCK_TIME — uses system time (the wall-clock time), + which is advanced independent of whether new messages arrive. + The first punctuation will be triggered after interval has elapsed. + NOTE: This is best effort only as its granularity is limited by how long an iteration of the + processing loop takes to complete
        • +
        + + Skipping punctuations: Punctuations will not be triggered more than once at any given timestamp. + This means that "missed" punctuation will be skipped. + It's possible to "miss" a punctuation if: +
        +
        +
        Specified by:
        +
        schedule in interface ProcessingContext
        +
        Parameters:
        +
        interval - the time interval between punctuations (supported minimum is 1 millisecond)
        +
        type - one of: PunctuationType.STREAM_TIME, PunctuationType.WALL_CLOCK_TIME
        +
        callback - a function consuming timestamps representing the current stream or system time
        +
        Returns:
        +
        a handle allowing cancellation of the punctuation schedule established by this method
        +
        +
        +
      • +
      • +
        +

        scheduledPunctuators

        +
        public List<MockProcessorContext.CapturedPunctuator> scheduledPunctuators()
        +
        Get the punctuators scheduled so far. The returned list is not affected by subsequent calls to schedule(...).
        +
        +
        Returns:
        +
        A list of captured punctuators.
        +
        +
        +
      • +
      • +
        +

        forward

        +
        public <K extends KForward, +V extends VForward> void forward(Record<K,V> record)
        +
        Description copied from interface: ProcessorContext
        +
        Forward a record to all child processors. +

        + Note that the forwarded Record is shared between the parent and child + processors. And of course, the parent may forward the same object to multiple children, + and the child may forward it to grandchildren, etc. Therefore, you should be mindful + of mutability. +

        + The Record class itself is immutable (all the setter-style methods return an + independent copy of the instance). However, the key, value, and headers referenced by + the Record may themselves be mutable. +

        + Some programs may opt to make use of this mutability for high performance, in which case + the input record may be mutated and then forwarded by each Processor. However, + most applications should instead favor safety. +

        + Forwarding records safely simply means to make a copy of the record before you mutate it. + This is trivial when using the Record.withKey(Object), Record.withValue(Object), + and Record.withTimestamp(long) methods, as each of these methods make a copy of the + record as a matter of course. But a little extra care must be taken with headers, since + the Header class is mutable. The easiest way to + safely handle headers is to use the Record constructors to make a copy before + modifying headers. +

        + In other words, this would be considered unsafe: + + process(Record inputRecord) { + inputRecord.headers().add(...); + context.forward(inputRecord); + } + + This is unsafe because the parent, and potentially siblings, grandparents, etc., + all will see this modification to their shared Headers reference. This is a violation + of causality and could lead to undefined behavior. +

        + A safe usage would look like this: +

        
        +     process(Record inputRecord) {
        +         // makes a copy of the headers
        +         Record toForward = inputRecord.withHeaders(inputRecord.headers());
        +         // Other options to create a safe copy are:
        +         // * use any copy-on-write method, which makes a copy of all fields:
        +         //   toForward = inputRecord.withValue();
        +         // * explicitly copy all fields:
        +         //   toForward = new Record(inputRecord.key(), inputRecord.value(), inputRecord.timestamp(), inputRecord.headers());
        +         // * create a fresh, empty Headers:
        +         //   toForward = new Record(inputRecord.key(), inputRecord.value(), inputRecord.timestamp());
        +         // * etc.
        +
        +         // now, we are modifying our own independent copy of the headers.
        +         toForward.headers().add(...);
        +         context.forward(toForward);
        +     }
        + 
        +
        +
        Specified by:
        +
        forward in interface ProcessorContext<KForward,VForward>
        +
        Parameters:
        +
        record - The record to forward to all children
        +
        +
        +
      • +
      • +
        +

        forward

        +
        public <K extends KForward, +V extends VForward> void forward(Record<K,V> record, + String childName)
        +
        Description copied from interface: ProcessorContext
        +
        Forward a record to the specified child processor. + See ProcessorContext.forward(Record) for considerations.
        +
        +
        Specified by:
        +
        forward in interface ProcessorContext<KForward,VForward>
        +
        Parameters:
        +
        record - The record to forward
        +
        childName - The name of the child processor to receive the record
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        forwarded

        +
        public List<MockProcessorContext.CapturedForward<? extends KForward,? extends VForward>> forwarded()
        +
        Get all the forwarded data this context has observed. The returned list will not be + affected by subsequent interactions with the context. The data in the list is in the same order as the calls to + forward(...).
        +
        +
        Returns:
        +
        A list of records that were previously passed to the context.
        +
        +
        +
      • +
      • +
        +

        forwarded

        +
        public List<MockProcessorContext.CapturedForward<? extends KForward,? extends VForward>> forwarded(String childName)
        +
        Get all the forwarded data this context has observed for a specific child by name. + The returned list will not be affected by subsequent interactions with the context. + The data in the list is in the same order as the calls to forward(...).
        +
        +
        Parameters:
        +
        childName - The child name to retrieve forwards for
        +
        Returns:
        +
        A list of records that were previously passed to the context.
        +
        +
        +
      • +
      • +
        +

        resetForwards

        +
        public void resetForwards()
        +
        Clear the captured forwarded data.
        +
        +
      • +
      • +
        +

        commit

        +
        public void commit()
        +
        Description copied from interface: ProcessingContext
        +
        Request a commit. Note that calling commit() is only a request for a commit, but it does not execute one. + Hence, when commit() returns, no commit was executed yet. However, Kafka Streams will commit as soon + as possible, instead of waiting for next commit.interval.ms to pass.
        +
        +
        Specified by:
        +
        commit in interface ProcessingContext
        +
        +
        +
      • +
      • +
        +

        committed

        +
        public boolean committed()
        +
        Whether ProcessingContext.commit() has been called in this context.
        +
        +
        Returns:
        +
        true iff ProcessingContext.commit() has been called in this context since construction or reset.
        +
        +
        +
      • +
      • +
        +

        resetCommit

        +
        public void resetCommit()
        +
        Reset the commit capture to false (whether or not it was previously true).
        +
        +
      • +
      • +
        +

        recordCollector

        +
        public org.apache.kafka.streams.processor.internals.RecordCollector recordCollector()
        +
        +
        Specified by:
        +
        recordCollector in interface org.apache.kafka.streams.processor.internals.RecordCollector.Supplier
        +
        +
        +
      • +
      • +
        +

        getStateStoreContext

        +
        public StateStoreContext getStateStoreContext()
        +
        Used to get a StateStoreContext for use with + StateStore.init(StateStoreContext, StateStore) + if you need to initialize a store for your tests.
        +
        +
        Returns:
        +
        a StateStoreContext that delegates to this ProcessorContext.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/api/ProcessingContext.html b/static/41/javadoc/org/apache/kafka/streams/processor/api/ProcessingContext.html new file mode 100644 index 000000000..40070051e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/api/ProcessingContext.html @@ -0,0 +1,434 @@ + + + + +ProcessingContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ProcessingContext

    +
    +
    +
    +
    All Known Subinterfaces:
    +
    FixedKeyProcessorContext<KForward,VForward>, ProcessorContext<KForward,VForward>
    +
    +
    +
    All Known Implementing Classes:
    +
    MockProcessorContext
    +
    +
    +
    public interface ProcessingContext
    +
    Processor context interface.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        applicationId

        +
        String applicationId()
        +
        Return the application id.
        +
        +
        Returns:
        +
        the application id
        +
        +
        +
      • +
      • +
        +

        taskId

        +
        TaskId taskId()
        +
        Return the task id.
        +
        +
        Returns:
        +
        the task id
        +
        +
        +
      • +
      • +
        +

        recordMetadata

        +
        Optional<RecordMetadata> recordMetadata()
        +
        Return the metadata of the current record if available. Processors may be invoked to + process a source record from an input topic, to run a scheduled punctuation + (see schedule(Duration, PunctuationType, Punctuator)), + or because a parent processor called forward(Record). +

        + In the case of a punctuation, there is no source record, so this metadata would be + undefined. Note that when a punctuator invokes forward(Record), + downstream processors will receive the forwarded record as a regular + Processor.process(Record) or FixedKeyProcessor.process(FixedKeyRecord) invocation. + In other words, it wouldn't be apparent to + downstream processors whether the record being processed came from an input topic + or punctuation and therefore whether this metadata is defined. This is why + the return type of this method is Optional. +

        + If there is any possibility of punctuators upstream, any access + to this field should consider the case of + "recordMetadata().isPresent() == false". + Of course, it would be safest to always guard this condition.

        +
        +
      • +
      • +
        +

        keySerde

        +
        Serde<?> keySerde()
        +
        Return the default key serde.
        +
        +
        Returns:
        +
        the key serializer
        +
        +
        +
      • +
      • +
        +

        valueSerde

        +
        Serde<?> valueSerde()
        +
        Return the default value serde.
        +
        +
        Returns:
        +
        the value serializer
        +
        +
        +
      • +
      • +
        +

        stateDir

        +
        File stateDir()
        +
        Return the state directory for the partition.
        +
        +
        Returns:
        +
        the state directory
        +
        +
        +
      • +
      • +
        +

        metrics

        +
        StreamsMetrics metrics()
        +
        Return Metrics instance.
        +
        +
        Returns:
        +
        StreamsMetrics
        +
        +
        +
      • +
      • +
        +

        getStateStore

        +
        <S extends StateStore> S getStateStore(String name)
        +
        Get the state store given the store name. + +

        The returned state store represent one shard of the overall state, which belongs to the current task. + The returned shard of the state store may only be used by the current Processor or + FixedKeyProcessor instance. + Sharing a shard across different processors (ie, from different "sibling" tasks; same sub-topology but different + partition) may lead to data corruption and/or data loss.

        +
        +
        Type Parameters:
        +
        S - The type or interface of the store to return
        +
        Parameters:
        +
        name - The store name
        +
        Returns:
        +
        The state store instance
        +
        Throws:
        +
        ClassCastException - if the return type isn't a type or interface of the actual returned store.
        +
        +
        +
      • +
      • +
        +

        schedule

        +
        Cancellable schedule(Duration interval, + PunctuationType type, + Punctuator callback)
        +
        Schedule a periodic operation for processors. A processor may call this method during + initialization, + processing, + initialization, or + processing to + schedule a periodic callback — called a punctuation — to Punctuator.punctuate(long). + The type parameter controls what notion of time is used for punctuation: +
          +
        • PunctuationType.STREAM_TIME — uses "stream time", which is advanced by the processing of messages + in accordance with the timestamp as extracted by the TimestampExtractor in use. + The first punctuation will be triggered by the first record that is processed. + NOTE: Only advanced if messages arrive
        • +
        • PunctuationType.WALL_CLOCK_TIME — uses system time (the wall-clock time), + which is advanced independent of whether new messages arrive. + The first punctuation will be triggered after interval has elapsed. + NOTE: This is best effort only as its granularity is limited by how long an iteration of the + processing loop takes to complete
        • +
        + + Skipping punctuations: Punctuations will not be triggered more than once at any given timestamp. + This means that "missed" punctuation will be skipped. + It's possible to "miss" a punctuation if: +
        +
        +
        Parameters:
        +
        interval - the time interval between punctuations (supported minimum is 1 millisecond)
        +
        type - one of: PunctuationType.STREAM_TIME, PunctuationType.WALL_CLOCK_TIME
        +
        callback - a function consuming timestamps representing the current stream or system time
        +
        Returns:
        +
        a handle allowing cancellation of the punctuation schedule established by this method
        +
        Throws:
        +
        IllegalArgumentException - if the interval is not representable in milliseconds
        +
        +
        +
      • +
      • +
        +

        commit

        +
        void commit()
        +
        Request a commit. Note that calling commit() is only a request for a commit, but it does not execute one. + Hence, when commit() returns, no commit was executed yet. However, Kafka Streams will commit as soon + as possible, instead of waiting for next commit.interval.ms to pass.
        +
        +
      • +
      • +
        +

        appConfigs

        +
        Map<String,Object> appConfigs()
        +
        Returns all the application config properties as key/value pairs. + +

        The config properties are defined in the StreamsConfig + object and associated to the ProcessorContext. + +

        The type of the values is dependent on the type of the property + (e.g. the value of DEFAULT_KEY_SERDE_CLASS_CONFIG + will be of type Class, even if it was specified as a String to + StreamsConfig(Map)).

        +
        +
        Returns:
        +
        all the key/values from the StreamsConfig properties
        +
        +
        +
      • +
      • +
        +

        appConfigsWithPrefix

        +
        Map<String,Object> appConfigsWithPrefix(String prefix)
        +
        Return all the application config properties with the given key prefix, as key/value pairs + stripping the prefix. + +

        The config properties are defined in the StreamsConfig + object and associated to the ProcessorContext.

        +
        +
        Parameters:
        +
        prefix - the properties prefix
        +
        Returns:
        +
        the key/values matching the given prefix from the StreamsConfig properties.
        +
        +
        +
      • +
      • +
        +

        currentSystemTimeMs

        +
        long currentSystemTimeMs()
        +
        Return the current system timestamp (also called wall-clock time) in milliseconds. + +

        Note: this method returns the internally cached system timestamp from the Kafka Stream runtime. + Thus, it may return a different value compared to System.currentTimeMillis().

        +
        +
        Returns:
        +
        the current system timestamp in milliseconds
        +
        +
        +
      • +
      • +
        +

        currentStreamTimeMs

        +
        long currentStreamTimeMs()
        +
        Return the current stream-time in milliseconds. + +

        Stream-time is the maximum observed record timestamp so far + (including the currently processed record), i.e., it can be considered a high-watermark. + Stream-time is tracked on a per-task basis and is preserved across restarts and during task migration. + +

        Note: this method is not supported for global processors (cf. + Topology#addGlobalStore(...) + and StreamsBuilder.addGlobalStore(...)), + because there is no concept of stream-time for this case. + Calling this method in a global processor will result in an UnsupportedOperationException.

        +
        +
        Returns:
        +
        the current stream-time in milliseconds
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/api/Processor.html b/static/41/javadoc/org/apache/kafka/streams/processor/api/Processor.html new file mode 100644 index 000000000..7aef9efd6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/api/Processor.html @@ -0,0 +1,186 @@ + + + + +Processor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Processor<KIn,VIn,KOut,VOut>

    +
    +
    +
    +
    Type Parameters:
    +
    KIn - the type of input keys
    +
    VIn - the type of input values
    +
    KOut - the type of output keys
    +
    VOut - the type of output values
    +
    +
    +
    All Known Implementing Classes:
    +
    ContextualProcessor, ForeachProcessor
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface Processor<KIn,VIn,KOut,VOut>
    +
    A processor of key-value pair records.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      default void
      + +
      +
      Close this processor and clean up any resources.
      +
      +
      default void
      + +
      +
      Initialize this processor with the given context.
      +
      +
      void
      +
      process(Record<KIn,VIn> record)
      +
      +
      Process the record.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        init

        +
        default void init(ProcessorContext<KOut,VOut> context)
        +
        Initialize this processor with the given context. The framework ensures this is called once per processor when the topology + that contains it is initialized. When the framework is done with the processor, close() will be called on it; the + framework may later re-use the processor by calling #init() again. +

        + The provided context can be used to access topology and record meta data, to + schedule a method to be + called periodically and to access attached StateStores.

        +
        +
        Parameters:
        +
        context - the context; may not be null
        +
        +
        +
      • +
      • +
        +

        process

        +
        void process(Record<KIn,VIn> record)
        +
        Process the record. Note that record metadata is undefined in cases such as a forward call from a punctuator.
        +
        +
        Parameters:
        +
        record - the record to process
        +
        +
        +
      • +
      • +
        +

        close

        +
        default void close()
        +
        Close this processor and clean up any resources. Be aware that #close() is called after an internal cleanup. + Thus, it is not possible to write anything to Kafka as underlying clients are already closed. The framework may + later re-use this processor by calling #init() on it again. +

        + Note: Do not close any streams managed resources, like StateStores here, as they are managed by the library.

        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/api/ProcessorContext.html b/static/41/javadoc/org/apache/kafka/streams/processor/api/ProcessorContext.html new file mode 100644 index 000000000..dde4398fb --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/api/ProcessorContext.html @@ -0,0 +1,231 @@ + + + + +ProcessorContext (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ProcessorContext<KForward,VForward>

    +
    +
    +
    +
    Type Parameters:
    +
    KForward - a bound on the types of keys that may be forwarded
    +
    VForward - a bound on the types of values that may be forwarded
    +
    +
    +
    All Superinterfaces:
    +
    ProcessingContext
    +
    +
    +
    All Known Implementing Classes:
    +
    MockProcessorContext
    +
    +
    +
    public interface ProcessorContext<KForward,VForward> +extends ProcessingContext
    +
    Processor context interface for Record.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        forward

        +
        <K extends KForward, +V extends VForward> void forward(Record<K,V> record)
        +
        Forward a record to all child processors. +

        + Note that the forwarded Record is shared between the parent and child + processors. And of course, the parent may forward the same object to multiple children, + and the child may forward it to grandchildren, etc. Therefore, you should be mindful + of mutability. +

        + The Record class itself is immutable (all the setter-style methods return an + independent copy of the instance). However, the key, value, and headers referenced by + the Record may themselves be mutable. +

        + Some programs may opt to make use of this mutability for high performance, in which case + the input record may be mutated and then forwarded by each Processor. However, + most applications should instead favor safety. +

        + Forwarding records safely simply means to make a copy of the record before you mutate it. + This is trivial when using the Record.withKey(Object), Record.withValue(Object), + and Record.withTimestamp(long) methods, as each of these methods make a copy of the + record as a matter of course. But a little extra care must be taken with headers, since + the Header class is mutable. The easiest way to + safely handle headers is to use the Record constructors to make a copy before + modifying headers. +

        + In other words, this would be considered unsafe: + + process(Record inputRecord) { + inputRecord.headers().add(...); + context.forward(inputRecord); + } + + This is unsafe because the parent, and potentially siblings, grandparents, etc., + all will see this modification to their shared Headers reference. This is a violation + of causality and could lead to undefined behavior. +

        + A safe usage would look like this: +

        
        +     process(Record inputRecord) {
        +         // makes a copy of the headers
        +         Record toForward = inputRecord.withHeaders(inputRecord.headers());
        +         // Other options to create a safe copy are:
        +         // * use any copy-on-write method, which makes a copy of all fields:
        +         //   toForward = inputRecord.withValue();
        +         // * explicitly copy all fields:
        +         //   toForward = new Record(inputRecord.key(), inputRecord.value(), inputRecord.timestamp(), inputRecord.headers());
        +         // * create a fresh, empty Headers:
        +         //   toForward = new Record(inputRecord.key(), inputRecord.value(), inputRecord.timestamp());
        +         // * etc.
        +
        +         // now, we are modifying our own independent copy of the headers.
        +         toForward.headers().add(...);
        +         context.forward(toForward);
        +     }
        + 
        +
        +
        Parameters:
        +
        record - The record to forward to all children
        +
        +
        +
      • +
      • +
        +

        forward

        +
        <K extends KForward, +V extends VForward> void forward(Record<K,V> record, + String childName)
        +
        Forward a record to the specified child processor. + See forward(Record) for considerations.
        +
        +
        Parameters:
        +
        record - The record to forward
        +
        childName - The name of the child processor to receive the record
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/api/ProcessorSupplier.html b/static/41/javadoc/org/apache/kafka/streams/processor/api/ProcessorSupplier.html new file mode 100644 index 000000000..bdb9e1468 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/api/ProcessorSupplier.html @@ -0,0 +1,170 @@ + + + + +ProcessorSupplier (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ProcessorSupplier<KIn,VIn,KOut,VOut>

    +
    +
    +
    +
    Type Parameters:
    +
    KIn - the type of input keys
    +
    VIn - the type of input values
    +
    KOut - the type of output keys
    +
    VOut - the type of output values
    +
    +
    +
    All Superinterfaces:
    +
    ConnectedStoreProvider, Supplier<Processor<KIn,VIn,KOut,VOut>>
    +
    +
    +
    All Known Subinterfaces:
    +
    WrappedProcessorSupplier<KIn,VIn,KOut,VOut>
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface ProcessorSupplier<KIn,VIn,KOut,VOut> +extends ConnectedStoreProvider, Supplier<Processor<KIn,VIn,KOut,VOut>>
    +
    A processor supplier that can create one or more Processor instances. +

    + It is used in Topology for adding new processor operators, whose generated + topology can then be replicated (and thus creating one or more Processor instances) + and distributed to multiple stream threads. + + The supplier should always generate a new instance each time get() gets called. Creating + a single Processor object and returning the same object reference in get() would be + a violation of the supplier pattern and leads to runtime exceptions.

    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      get()
      +
      +
      Return a newly constructed Processor instance.
      +
      +
      +
      +
      +
      +

      Methods inherited from interface org.apache.kafka.streams.processor.ConnectedStoreProvider

      +stores
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        get

        + +
        Return a newly constructed Processor instance. + The supplier should always generate a new instance each time get() gets called. +

        + Creating a single Processor object and returning the same object reference in get() + is a violation of the supplier pattern and leads to runtime exceptions.

        +
        +
        Specified by:
        +
        get in interface Supplier<KIn>
        +
        Returns:
        +
        a new Processor instance
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/api/ProcessorWrapper.html b/static/41/javadoc/org/apache/kafka/streams/processor/api/ProcessorWrapper.html new file mode 100644 index 000000000..77a895163 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/api/ProcessorWrapper.html @@ -0,0 +1,234 @@ + + + + +ProcessorWrapper (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ProcessorWrapper

    +
    +
    +
    +
    All Superinterfaces:
    +
    Configurable
    +
    +
    +
    public interface ProcessorWrapper +extends Configurable
    +
    Wrapper class that can be used to inject custom wrappers around the processors of their application topology. + The returned instance should wrap the supplied ProcessorSupplier and the Processor it supplies + to avoid disrupting the regular processing of the application, although this is not required and any processor + implementation can be substituted in to replace the original processor entirely (which may be useful for example + while testing or debugging an application topology). +

    + NOTE: in order to use this feature, you must set the StreamsConfig.PROCESSOR_WRAPPER_CLASS_CONFIG config and pass it + in as a TopologyConfig when creating the StreamsBuilder or Topology by using the + appropriate constructor (ie StreamsBuilder(TopologyConfig) or Topology(TopologyConfig)) +

    + Can be configured, if desired, by implementing the configure(Map) method. This will be invoked when + the ProcessorWrapper is instantiated, and will provide it with the TopologyConfigs that were passed in + to the StreamsBuilder or Topology constructor.

    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/api/Record.html b/static/41/javadoc/org/apache/kafka/streams/processor/api/Record.html new file mode 100644 index 000000000..821bb63ed --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/api/Record.html @@ -0,0 +1,390 @@ + + + + +Record (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Record<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.api.Record<K,V>
    +
    +
    +
    +
    Type Parameters:
    +
    K - The type of the key
    +
    V - The type of the value
    +
    +
    +
    public class Record<K,V> +extends Object
    +
    A data class representing an incoming record for processing in a Processor + or a record to forward to downstream processors via ProcessorContext. + + This class encapsulates all the data attributes of a record: the key and value, but + also the timestamp of the record and any record headers. + + This class is immutable, though the objects referenced in the attributes of this class + may themselves be mutable.
    +
    +
    +
      + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      +
      Record(K key, + V value, + long timestamp)
      +
      +
      Convenience constructor in case you do not wish to specify any headers.
      +
      +
      Record(K key, + V value, + long timestamp, + Headers headers)
      +
      +
      The full constructor, specifying all the attributes of the record.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      boolean
      + +
       
      +
      int
      + +
       
      + + +
      +
      The headers of the record.
      +
      + +
      key()
      +
      +
      The key of the record.
      +
      +
      long
      + +
      +
      The timestamp of the record.
      +
      + + +
       
      + + +
      +
      The value of the record.
      +
      + + +
      +
      A convenient way to produce a new record if you only need to change the headers.
      +
      +
      <NewK> Record<NewK,V>
      +
      withKey(NewK key)
      +
      +
      A convenient way to produce a new record if you only need to change the key.
      +
      + +
      withTimestamp(long timestamp)
      +
      +
      A convenient way to produce a new record if you only need to change the timestamp.
      +
      +
      <NewV> Record<K,NewV>
      +
      withValue(NewV value)
      +
      +
      A convenient way to produce a new record if you only need to change the value.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +getClass, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        Record

        +
        public Record(K key, + V value, + long timestamp, + Headers headers)
        +
        The full constructor, specifying all the attributes of the record. + + Note: this constructor makes a copy of the headers argument. + See ProcessorContext.forward(Record) for + considerations around mutability of keys, values, and headers.
        +
        +
        Parameters:
        +
        key - The key of the record. May be null.
        +
        value - The value of the record. May be null.
        +
        timestamp - The timestamp of the record. May not be negative.
        +
        headers - The headers of the record. May be null, which will cause subsequent calls + to headers() to return a non-null, empty, Headers collection.
        +
        Throws:
        +
        IllegalArgumentException - if the timestamp is negative.
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        Record

        +
        public Record(K key, + V value, + long timestamp)
        +
        Convenience constructor in case you do not wish to specify any headers. + Subsequent calls to headers() will return a non-null, empty, Headers collection.
        +
        +
        Parameters:
        +
        key - The key of the record. May be null.
        +
        value - The value of the record. May be null.
        +
        timestamp - The timestamp of the record. May not be negative.
        +
        Throws:
        +
        IllegalArgumentException - if the timestamp is negative.
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        key

        +
        public K key()
        +
        The key of the record. May be null.
        +
        +
      • +
      • +
        +

        value

        +
        public V value()
        +
        The value of the record. May be null.
        +
        +
      • +
      • +
        +

        timestamp

        +
        public long timestamp()
        +
        The timestamp of the record. Will never be negative.
        +
        +
      • +
      • +
        +

        headers

        +
        public Headers headers()
        +
        The headers of the record. Never null.
        +
        +
      • +
      • +
        +

        withKey

        +
        public <NewK> Record<NewK,V> withKey(NewK key)
        +
        A convenient way to produce a new record if you only need to change the key. + + Copies the attributes of this record with the key replaced.
        +
        +
        Type Parameters:
        +
        NewK - The type of the new record's key.
        +
        Parameters:
        +
        key - The key of the result record. May be null.
        +
        Returns:
        +
        A new Record instance with all the same attributes (except that the key is replaced).
        +
        +
        +
      • +
      • +
        +

        withValue

        +
        public <NewV> Record<K,NewV> withValue(NewV value)
        +
        A convenient way to produce a new record if you only need to change the value. + + Copies the attributes of this record with the value replaced.
        +
        +
        Type Parameters:
        +
        NewV - The type of the new record's value.
        +
        Parameters:
        +
        value - The value of the result record.
        +
        Returns:
        +
        A new Record instance with all the same attributes (except that the value is replaced).
        +
        +
        +
      • +
      • +
        +

        withTimestamp

        +
        public Record<K,V> withTimestamp(long timestamp)
        +
        A convenient way to produce a new record if you only need to change the timestamp. + + Copies the attributes of this record with the timestamp replaced.
        +
        +
        Parameters:
        +
        timestamp - The timestamp of the result record.
        +
        Returns:
        +
        A new Record instance with all the same attributes (except that the timestamp is replaced).
        +
        +
        +
      • +
      • +
        +

        withHeaders

        +
        public Record<K,V> withHeaders(Headers headers)
        +
        A convenient way to produce a new record if you only need to change the headers. + + Copies the attributes of this record with the headers replaced. + Also makes a copy of the provided headers. + + See ProcessorContext.forward(Record) for + considerations around mutability of keys, values, and headers.
        +
        +
        Parameters:
        +
        headers - The headers of the result record.
        +
        Returns:
        +
        A new Record instance with all the same attributes (except that the headers are replaced).
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/api/RecordMetadata.html b/static/41/javadoc/org/apache/kafka/streams/processor/api/RecordMetadata.html new file mode 100644 index 000000000..594628de1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/api/RecordMetadata.html @@ -0,0 +1,190 @@ + + + + +RecordMetadata (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface RecordMetadata

    +
    +
    +
    +
    public interface RecordMetadata
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      long
      + +
      +
      Return the offset of the current input record; could be -1 if it is not + available.
      +
      +
      int
      + +
      +
      Return the partition id of the current input record; could be -1 if it is not + available.
      +
      + + +
      +
      Return the topic name of the current input record; could be null if it is not + available.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        topic

        +
        String topic()
        +
        Return the topic name of the current input record; could be null if it is not + available. + +

        For example, if this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, the record won't have an associated topic. + Another example is + KTable.transformValues(ValueTransformerWithKeySupplier, String...) + (and siblings), that do not always guarantee to provide a valid topic name, as they might be + executed "out-of-band" due to some internal optimizations applied by the Kafka Streams DSL.

        +
        +
        Returns:
        +
        the topic name
        +
        +
        +
      • +
      • +
        +

        partition

        +
        int partition()
        +
        Return the partition id of the current input record; could be -1 if it is not + available. + +

        For example, if this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, the record won't have an associated partition id. + Another example is + KTable.transformValues(ValueTransformerWithKeySupplier, String...) + (and siblings), that do not always guarantee to provide a valid partition id, as they might be + executed "out-of-band" due to some internal optimizations applied by the Kafka Streams DSL.

        +
        +
        Returns:
        +
        the partition id
        +
        +
        +
      • +
      • +
        +

        offset

        +
        long offset()
        +
        Return the offset of the current input record; could be -1 if it is not + available. + +

        For example, if this method is invoked within a punctuation callback, or while processing a record that was forwarded by a punctuation + callback, the record won't have an associated offset. + Another example is + KTable.transformValues(ValueTransformerWithKeySupplier, String...) + (and siblings), that do not always guarantee to provide a valid offset, as they might be + executed "out-of-band" due to some internal optimizations applied by the Kafka Streams DSL.

        +
        +
        Returns:
        +
        the offset
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/api/WrappedFixedKeyProcessorSupplier.html b/static/41/javadoc/org/apache/kafka/streams/processor/api/WrappedFixedKeyProcessorSupplier.html new file mode 100644 index 000000000..5fbb1a712 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/api/WrappedFixedKeyProcessorSupplier.html @@ -0,0 +1,110 @@ + + + + +WrappedFixedKeyProcessorSupplier (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface WrappedFixedKeyProcessorSupplier<KIn,VIn,VOut>

    +
    +
    +
    +
    All Superinterfaces:
    +
    ConnectedStoreProvider, FixedKeyProcessorSupplier<KIn,VIn,VOut>, Supplier<FixedKeyProcessor<KIn,VIn,VOut>>
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface WrappedFixedKeyProcessorSupplier<KIn,VIn,VOut> +extends FixedKeyProcessorSupplier<KIn,VIn,VOut>
    +
    Marker interface for classes implementing FixedKeyProcessorSupplier + that have been wrapped via a ProcessorWrapper. +

    + To convert a FixedKeyProcessorSupplier instance into a WrappedFixedKeyProcessorSupplier, + use the ProcessorWrapper.asWrappedFixedKey(FixedKeyProcessorSupplier) method

    +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/api/WrappedProcessorSupplier.html b/static/41/javadoc/org/apache/kafka/streams/processor/api/WrappedProcessorSupplier.html new file mode 100644 index 000000000..cda635735 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/api/WrappedProcessorSupplier.html @@ -0,0 +1,110 @@ + + + + +WrappedProcessorSupplier (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface WrappedProcessorSupplier<KIn,VIn,KOut,VOut>

    +
    +
    +
    +
    All Superinterfaces:
    +
    ConnectedStoreProvider, ProcessorSupplier<KIn,VIn,KOut,VOut>, Supplier<Processor<KIn,VIn,KOut,VOut>>
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface WrappedProcessorSupplier<KIn,VIn,KOut,VOut> +extends ProcessorSupplier<KIn,VIn,KOut,VOut>
    +
    Marker interface for classes implementing ProcessorSupplier + that have been wrapped via a ProcessorWrapper. +

    + To convert a ProcessorSupplier instance into a WrappedProcessorSupplier, + use the ProcessorWrapper.asWrapped(ProcessorSupplier) method

    +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/api/package-summary.html b/static/41/javadoc/org/apache/kafka/streams/processor/api/package-summary.html new file mode 100644 index 000000000..53d9f832c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/api/package-summary.html @@ -0,0 +1,182 @@ + + + + +org.apache.kafka.streams.processor.api (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.streams.processor.api

    +
    +
    +
    package org.apache.kafka.streams.processor.api
    +
    +
    Provides a low-level programming model (Processor API, aka, PAPI) to express a (stateful) data flow computation over input topics. + Use Topology as the entry point for your program.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/api/package-tree.html b/static/41/javadoc/org/apache/kafka/streams/processor/api/package-tree.html new file mode 100644 index 000000000..758c6370f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/api/package-tree.html @@ -0,0 +1,125 @@ + + + + +org.apache.kafka.streams.processor.api Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.streams.processor.api

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/assignment/ApplicationState.html b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/ApplicationState.html new file mode 100644 index 000000000..dfab44cf6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/ApplicationState.html @@ -0,0 +1,166 @@ + + + + +ApplicationState (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ApplicationState

    +
    +
    +
    +
    public interface ApplicationState
    +
    A read-only metadata class representing the state of the application and the current rebalance. + This class wraps all the input parameters to the task assignment, including the current state + of each KafkaStreams client with at least one StreamThread participating in this rebalance, the + assignment-related configs, and the tasks to be assigned.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        kafkaStreamsStates

        +
        Map<ProcessId,KafkaStreamsState> kafkaStreamsStates(boolean computeTaskLags)
        +
        +
        Parameters:
        +
        computeTaskLags - whether to include task lag information in the returned metadata. Note that passing + in "true" will result in a remote call to fetch changelog topic end offsets, and you should pass in "false" unless + you specifically need the task lag information.
        +
        Returns:
        +
        a map from the processId to KafkaStreamsState for all KafkaStreams clients in this app
        +
        Throws:
        +
        TaskAssignmentException - if a retriable error occurs while computing KafkaStreamsState metadata. Re-throw + this exception to have Kafka Streams retry the rebalance by returning the same + assignment and scheduling an immediate followup rebalance
        +
        +
        +
      • +
      • +
        +

        assignmentConfigs

        +
        AssignmentConfigs assignmentConfigs()
        +
        +
        Returns:
        +
        a simple container class with the Streams configs relevant to assignment
        +
        +
        +
      • +
      • +
        +

        allTasks

        +
        Map<TaskId,TaskInfo> allTasks()
        +
        +
        Returns:
        +
        a map of task ids to all tasks in this topology to be assigned
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/assignment/AssignmentConfigs.html b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/AssignmentConfigs.html new file mode 100644 index 000000000..e7395b9a1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/AssignmentConfigs.html @@ -0,0 +1,336 @@ + + + + +AssignmentConfigs (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class AssignmentConfigs

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.assignment.AssignmentConfigs
    +
    +
    +
    +
    public class AssignmentConfigs +extends Object
    +
    Assignment related configs for the Kafka Streams TaskAssignor.
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/assignment/KafkaStreamsAssignment.AssignedTask.Type.html b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/KafkaStreamsAssignment.AssignedTask.Type.html new file mode 100644 index 000000000..c2880646f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/KafkaStreamsAssignment.AssignedTask.Type.html @@ -0,0 +1,221 @@ + + + + +KafkaStreamsAssignment.AssignedTask.Type (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class KafkaStreamsAssignment.AssignedTask.Type

    +
    +
    java.lang.Object +
    java.lang.Enum<KafkaStreamsAssignment.AssignedTask.Type> +
    org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment.AssignedTask.Type
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<KafkaStreamsAssignment.AssignedTask.Type>, Constable
    +
    +
    +
    Enclosing class:
    +
    KafkaStreamsAssignment.AssignedTask
    +
    +
    +
    public static enum KafkaStreamsAssignment.AssignedTask.Type +extends Enum<KafkaStreamsAssignment.AssignedTask.Type>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static KafkaStreamsAssignment.AssignedTask.Type[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static KafkaStreamsAssignment.AssignedTask.Type valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/assignment/KafkaStreamsAssignment.AssignedTask.html b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/KafkaStreamsAssignment.AssignedTask.html new file mode 100644 index 000000000..246a30eb2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/KafkaStreamsAssignment.AssignedTask.html @@ -0,0 +1,235 @@ + + + + +KafkaStreamsAssignment.AssignedTask (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class KafkaStreamsAssignment.AssignedTask

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment.AssignedTask
    +
    +
    +
    +
    Enclosing class:
    +
    KafkaStreamsAssignment
    +
    +
    +
    public static class KafkaStreamsAssignment.AssignedTask +extends Object
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/assignment/KafkaStreamsAssignment.html b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/KafkaStreamsAssignment.html new file mode 100644 index 000000000..087415574 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/KafkaStreamsAssignment.html @@ -0,0 +1,259 @@ + + + + +KafkaStreamsAssignment (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class KafkaStreamsAssignment

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.assignment.KafkaStreamsAssignment
    +
    +
    +
    +
    public class KafkaStreamsAssignment +extends Object
    +
    A simple container class for the assignor to return the desired placement of active and standby tasks on + KafkaStreams clients.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        of

        +
        public static KafkaStreamsAssignment of(ProcessId processId, + Set<KafkaStreamsAssignment.AssignedTask> assignment)
        +
        Construct an instance of KafkaStreamsAssignment with this processId and the given set of + assigned tasks. If you want this KafkaStreams client to request a followup rebalance, you + can set the followupRebalanceDeadline via the withFollowupRebalance(Instant) API.
        +
        +
        Parameters:
        +
        processId - the processId for the KafkaStreams client that should receive this assignment
        +
        assignment - the set of tasks to be assigned to this KafkaStreams client
        +
        Returns:
        +
        a new KafkaStreamsAssignment object with the given processId and assignment
        +
        +
        +
      • +
      • +
        +

        withFollowupRebalance

        +
        public KafkaStreamsAssignment withFollowupRebalance(Instant rebalanceDeadline)
        +
        This API can be used to request that a followup rebalance be triggered by the KafkaStreams client + receiving this assignment. The followup rebalance will be initiated after the provided deadline + has passed, although it will always wait until it has finished the current rebalance before + triggering a new one. This request will last until the new rebalance, and will be erased if a + new rebalance begins before the scheduled followup rebalance deadline has elapsed. The next + assignment must request the followup rebalance again if it still wants to schedule one for + the given instant, otherwise no additional rebalance will be triggered after that.
        +
        +
        Parameters:
        +
        rebalanceDeadline - the instant after which this KafkaStreams client will trigger a followup rebalance
        +
        Returns:
        +
        a new KafkaStreamsAssignment object with the same processId and assignment but with the given rebalanceDeadline
        +
        +
        +
      • +
      • +
        +

        processId

        +
        public ProcessId processId()
        +
        +
        Returns:
        +
        the ProcessID associated with this KafkaStreamsAssignment
        +
        +
        +
      • +
      • +
        +

        tasks

        + +
        +
        Returns:
        +
        a read-only set of assigned tasks that are part of this KafkaStreamsAssignment
        +
        +
        +
      • +
      • +
        +

        assignTask

        +
        public void assignTask(KafkaStreamsAssignment.AssignedTask newTask)
        +
        +
      • +
      • +
        +

        removeTask

        +
        public void removeTask(KafkaStreamsAssignment.AssignedTask removedTask)
        +
        +
      • +
      • +
        +

        followupRebalanceDeadline

        +
        public Optional<Instant> followupRebalanceDeadline()
        +
        +
        Returns:
        +
        the followup rebalance deadline in epoch time, after which this KafkaStreams + client will trigger a new rebalance.
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/assignment/KafkaStreamsState.html b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/KafkaStreamsState.html new file mode 100644 index 000000000..1c333cb0b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/KafkaStreamsState.html @@ -0,0 +1,293 @@ + + + + +KafkaStreamsState (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface KafkaStreamsState

    +
    +
    +
    +
    public interface KafkaStreamsState
    +
    A read-only metadata class representing the current state of each KafkaStreams client with at least one StreamThread participating in this rebalance
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        processId

        +
        ProcessId processId()
        +
        +
        Returns:
        +
        the processId of the application instance running on this KafkaStreams client
        +
        +
        +
      • +
      • +
        +

        numProcessingThreads

        +
        int numProcessingThreads()
        +
        Returns the number of processing threads available to work on tasks for this KafkaStreams client, + which represents its overall capacity for work relative to other KafkaStreams clients.
        +
        +
        Returns:
        +
        the number of processing threads on this KafkaStreams client
        +
        +
        +
      • +
      • +
        +

        consumerClientIds

        +
        SortedSet<String> consumerClientIds()
        +
        +
        Returns:
        +
        the set of consumer client ids for this KafkaStreams client
        +
        +
        +
      • +
      • +
        +

        previousActiveTasks

        +
        SortedSet<TaskId> previousActiveTasks()
        +
        +
        Returns:
        +
        the set of all active tasks owned by consumers on this KafkaStreams client since the previous rebalance
        +
        +
        +
      • +
      • +
        +

        previousStandbyTasks

        +
        SortedSet<TaskId> previousStandbyTasks()
        +
        +
        Returns:
        +
        the set of all standby tasks owned by consumers on this KafkaStreams client since the previous rebalance
        +
        +
        +
      • +
      • +
        +

        lagFor

        +
        long lagFor(TaskId task)
        +
        Returns the total lag across all logged stores in the task. Equal to the end offset sum if this client + did not have any state for this task on disk.
        +
        +
        Returns:
        +
        end offset sum - offset sum + Task.LATEST_OFFSET if this was previously an active running task on this client
        +
        Throws:
        +
        UnsupportedOperationException - if the user did not request task lags be computed.
        +
        +
        +
      • +
      • +
        +

        prevTasksByLag

        +
        SortedSet<TaskId> prevTasksByLag(String consumerClientId)
        +
        +
        Returns:
        +
        the previous tasks assigned to this consumer ordered by lag, filtered for any tasks that don't exist in this assignment
        +
        Throws:
        +
        UnsupportedOperationException - if the user did not request task lags be computed.
        +
        +
        +
      • +
      • +
        +

        statefulTasksToLagSums

        +
        Map<TaskId,Long> statefulTasksToLagSums()
        +
        Returns a collection containing all (and only) stateful tasks in the topology by TaskId, + mapped to its "offset lag sum". This is computed as the difference between the changelog end offset + and the current offset, summed across all logged state stores in the task.
        +
        +
        Returns:
        +
        a map from all stateful tasks to their lag sum
        +
        Throws:
        +
        UnsupportedOperationException - if the user did not request task lags be computed.
        +
        +
        +
      • +
      • +
        +

        hostInfo

        +
        Optional<HostInfo> hostInfo()
        +
        The HostInfo of this KafkaStreams client, if set via the + application.server config
        +
        +
        Returns:
        +
        the host info for this KafkaStreams client if configured, else Optional.empty()
        +
        +
        +
      • +
      • +
        +

        clientTags

        +
        Map<String,String> clientTags()
        +
        The client tags for this KafkaStreams client, if set any have been via configs using the + StreamsConfig.clientTagPrefix(java.lang.String) +

        + Can be used however you want, or passed in to enable the rack-aware standby task assignor.

        +
        +
        Returns:
        +
        all the client tags found in this KafkaStreams client's StreamsConfig
        +
        +
        +
      • +
      • +
        +

        rackId

        +
        Optional<String> rackId()
        +
        +
        Returns:
        +
        the rackId for this KafkaStreams client, or Optional.empty() if none was configured
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/assignment/ProcessId.html b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/ProcessId.html new file mode 100644 index 000000000..f7461eecb --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/ProcessId.html @@ -0,0 +1,233 @@ + + + + +ProcessId (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ProcessId

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.assignment.ProcessId
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Comparable<ProcessId>
    +
    +
    +
    public class ProcessId +extends Object +implements Comparable<ProcessId>
    +
    A simple wrapper around UUID that abstracts a Process ID
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ProcessId

        +
        public ProcessId(UUID id)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        id

        +
        public UUID id()
        +
        +
        Returns:
        +
        the underlying UUID that this ProcessID is wrapping.
        +
        +
        +
      • +
      • +
        +

        randomProcessId

        +
        public static ProcessId randomProcessId()
        +
        +
        Returns:
        +
        a randomly generated process id
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object obj)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        compareTo

        +
        public int compareTo(ProcessId o)
        +
        +
        Specified by:
        +
        compareTo in interface Comparable<ProcessId>
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskAssignmentUtils.MoveStandbyTaskPredicate.html b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskAssignmentUtils.MoveStandbyTaskPredicate.html new file mode 100644 index 000000000..cbea8923d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskAssignmentUtils.MoveStandbyTaskPredicate.html @@ -0,0 +1,139 @@ + + + + +TaskAssignmentUtils.MoveStandbyTaskPredicate (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface TaskAssignmentUtils.MoveStandbyTaskPredicate

    +
    +
    +
    +
    Enclosing class:
    +
    TaskAssignmentUtils
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public static interface TaskAssignmentUtils.MoveStandbyTaskPredicate
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskAssignmentUtils.RackAwareOptimizationParams.html b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskAssignmentUtils.RackAwareOptimizationParams.html new file mode 100644 index 000000000..8bfefd6b9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskAssignmentUtils.RackAwareOptimizationParams.html @@ -0,0 +1,200 @@ + + + + +TaskAssignmentUtils.RackAwareOptimizationParams (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TaskAssignmentUtils.RackAwareOptimizationParams

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.assignment.TaskAssignmentUtils.RackAwareOptimizationParams
    +
    +
    +
    +
    Enclosing class:
    +
    TaskAssignmentUtils
    +
    +
    +
    public static final class TaskAssignmentUtils.RackAwareOptimizationParams +extends Object
    +
    A simple config container for necessary parameters and optional overrides to apply when + running the active or standby task rack-aware optimizations.
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskAssignmentUtils.html b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskAssignmentUtils.html new file mode 100644 index 000000000..6e8f7de23 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskAssignmentUtils.html @@ -0,0 +1,293 @@ + + + + +TaskAssignmentUtils (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TaskAssignmentUtils

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.assignment.TaskAssignmentUtils
    +
    +
    +
    +
    public final class TaskAssignmentUtils +extends Object
    +
    A set of utilities to help implement task assignment via the TaskAssignor
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskAssignor.AssignmentError.html b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskAssignor.AssignmentError.html new file mode 100644 index 000000000..4e37936c3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskAssignor.AssignmentError.html @@ -0,0 +1,259 @@ + + + + +TaskAssignor.AssignmentError (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class TaskAssignor.AssignmentError

    +
    +
    java.lang.Object +
    java.lang.Enum<TaskAssignor.AssignmentError> +
    org.apache.kafka.streams.processor.assignment.TaskAssignor.AssignmentError
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<TaskAssignor.AssignmentError>, Constable
    +
    +
    +
    Enclosing interface:
    +
    TaskAssignor
    +
    +
    +
    public static enum TaskAssignor.AssignmentError +extends Enum<TaskAssignor.AssignmentError>
    +
    NONE: no error detected + ACTIVE_TASK_ASSIGNED_MULTIPLE_TIMES: multiple KafkaStreams clients assigned with the same active task + INVALID_STANDBY_TASK: stateless task assigned as a standby task + MISSING_PROCESS_ID: ProcessId present in the input ApplicationState was not present in the output TaskAssignment + UNKNOWN_PROCESS_ID: unrecognized ProcessId not matching any of the participating consumers + UNKNOWN_TASK_ID: unrecognized TaskId not matching any of the tasks to be assigned
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      + +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static TaskAssignor.AssignmentError[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static TaskAssignor.AssignmentError valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskAssignor.TaskAssignment.html b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskAssignor.TaskAssignment.html new file mode 100644 index 000000000..7823415df --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskAssignor.TaskAssignment.html @@ -0,0 +1,168 @@ + + + + +TaskAssignor.TaskAssignment (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TaskAssignor.TaskAssignment

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.assignment.TaskAssignor.TaskAssignment
    +
    +
    +
    +
    Enclosing interface:
    +
    TaskAssignor
    +
    +
    +
    public static class TaskAssignor.TaskAssignment +extends Object
    +
    Wrapper class for the final assignment of active and standbys tasks to individual + KafkaStreams clients.
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskAssignor.html b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskAssignor.html new file mode 100644 index 000000000..096dc0b7f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskAssignor.html @@ -0,0 +1,225 @@ + + + + +TaskAssignor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface TaskAssignor

    +
    +
    +
    +
    All Superinterfaces:
    +
    Configurable
    +
    +
    +
    All Known Implementing Classes:
    +
    StickyTaskAssignor
    +
    +
    +
    public interface TaskAssignor +extends Configurable
    +
    A TaskAssignor is responsible for creating a TaskAssignment from a given + ApplicationState. + The implementation may also override the onAssignmentComputed callback for insight into + the result of the assignment result.
    +
    +
    +
      + +
    • +
      +

      Nested Class Summary

      +
      Nested Classes
      +
      +
      Modifier and Type
      +
      Interface
      +
      Description
      +
      static enum 
      + +
      +
      NONE: no error detected + ACTIVE_TASK_ASSIGNED_MULTIPLE_TIMES: multiple KafkaStreams clients assigned with the same active task + INVALID_STANDBY_TASK: stateless task assigned as a standby task + MISSING_PROCESS_ID: ProcessId present in the input ApplicationState was not present in the output TaskAssignment + UNKNOWN_PROCESS_ID: unrecognized ProcessId not matching any of the participating consumers + UNKNOWN_TASK_ID: unrecognized TaskId not matching any of the tasks to be assigned
      +
      +
      static class 
      + +
      +
      Wrapper class for the final assignment of active and standbys tasks to individual + KafkaStreams clients.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      assign(ApplicationState applicationState)
      +
       
      +
      default void
      +
      configure(Map<String,?> configs)
      +
      +
      Configure this class with the given key-value pairs
      +
      +
      default void
      + +
      +
      This callback can be used to observe the final assignment returned to the brokers and check for any errors that + were detected while processing the returned assignment.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        assign

        + +
        +
        Parameters:
        +
        applicationState - the metadata for this Kafka Streams application
        +
        Returns:
        +
        the assignment of active and standby tasks to KafkaStreams clients
        +
        Throws:
        +
        TaskAssignmentException - If an error occurs during assignment, and you wish for the rebalance to be retried, + you can throw this exception to keep the assignment unchanged and automatically + schedule an immediate followup rebalance.
        +
        +
        +
      • +
      • +
        +

        onAssignmentComputed

        +
        default void onAssignmentComputed(ConsumerPartitionAssignor.GroupAssignment assignment, + ConsumerPartitionAssignor.GroupSubscription subscription, + TaskAssignor.AssignmentError error)
        +
        This callback can be used to observe the final assignment returned to the brokers and check for any errors that + were detected while processing the returned assignment. If any errors were found, the corresponding + will be returned and a StreamsException will be thrown after this callback returns. The StreamsException will + be thrown up to kill the StreamThread and can be handled as any other uncaught exception would if the application + has registered a StreamsUncaughtExceptionHandler. +

        + Note: some kinds of errors will make it impossible for the StreamsPartitionAssignor to parse the TaskAssignment + that was returned from the TaskAssignor's assign(org.apache.kafka.streams.processor.assignment.ApplicationState). If this occurs, the ConsumerPartitionAssignor.GroupAssignment passed + in to this callback will contain an empty map instead of the consumer assignments.

        +
        +
        Parameters:
        +
        assignment - the final consumer assignments returned to the kafka broker, or an empty assignment map if + an error prevented the assignor from converting the TaskAssignment into a GroupAssignment
        +
        subscription - the original consumer subscriptions passed into the assignor
        +
        error - the corresponding error type if one was detected while processing the returned assignment, + or AssignmentError.NONE if the returned assignment was valid
        +
        +
        +
      • +
      • +
        +

        configure

        +
        default void configure(Map<String,?> configs)
        +
        Description copied from interface: Configurable
        +
        Configure this class with the given key-value pairs
        +
        +
        Specified by:
        +
        configure in interface Configurable
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskInfo.html b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskInfo.html new file mode 100644 index 000000000..bc99a5628 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskInfo.html @@ -0,0 +1,173 @@ + + + + +TaskInfo (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface TaskInfo

    +
    +
    +
    +
    public interface TaskInfo
    +
    A simple container class corresponding to a given TaskId. + Includes metadata such as whether it's stateful and the names of all state stores + belonging to this task, the set of input topic partitions and changelog topic partitions + for all logged state stores, and the rack ids of all replicas of each topic partition + in the task.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        id

        +
        TaskId id()
        +
        +
        Returns:
        +
        The TaskId of the underlying task.
        +
        +
        +
      • +
      • +
        +

        isStateful

        +
        boolean isStateful()
        +
        +
        Returns:
        +
        true if the underlying task is stateful, and false otherwise.
        +
        +
        +
      • +
      • +
        +

        stateStoreNames

        +
        Set<String> stateStoreNames()
        +
        +
        Returns:
        +
        the set of state store names that this task makes use of. In the case of stateless tasks, + this set will be empty as no state stores are used.
        +
        +
        +
      • +
      • +
        +

        topicPartitions

        +
        Set<TaskTopicPartition> topicPartitions()
        +
        +
        Returns:
        +
        the set of topic partitions in use for this task.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskTopicPartition.html b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskTopicPartition.html new file mode 100644 index 000000000..6c1ca152a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/TaskTopicPartition.html @@ -0,0 +1,174 @@ + + + + +TaskTopicPartition (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface TaskTopicPartition

    +
    +
    +
    +
    public interface TaskTopicPartition
    +
    This is a simple container class used during the assignment process to distinguish + TopicPartitions type. Since the assignment logic can depend on the type of topic we're + looking at, and the rack information of the partition, this container class should have + everything necessary to make informed task assignment decisions.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        topicPartition

        +
        TopicPartition topicPartition()
        +
        +
        Returns:
        +
        the TopicPartition for this task.
        +
        +
        +
      • +
      • +
        +

        isSource

        +
        boolean isSource()
        +
        +
        Returns:
        +
        whether the underlying topic is a source topic or not. Source changelog topics + are both source topics and changelog topics.
        +
        +
        +
      • +
      • +
        +

        isChangelog

        +
        boolean isChangelog()
        +
        +
        Returns:
        +
        whether the underlying topic is a changelog topic or not. Source changelog topics + are both source topics and changelog topics.
        +
        +
        +
      • +
      • +
        +

        rackIds

        +
        Optional<Set<String>> rackIds()
        +
        +
        Returns:
        +
        the broker rack ids on which this topic partition resides. If no information could + be found, this will return an empty optional value.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/assignment/assignors/StickyTaskAssignor.html b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/assignors/StickyTaskAssignor.html new file mode 100644 index 000000000..2665341ad --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/assignors/StickyTaskAssignor.html @@ -0,0 +1,245 @@ + + + + +StickyTaskAssignor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StickyTaskAssignor

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.processor.assignment.assignors.StickyTaskAssignor
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Configurable, TaskAssignor
    +
    +
    +
    public class StickyTaskAssignor +extends Object +implements TaskAssignor
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        DEFAULT_STICKY_TRAFFIC_COST

        +
        public static final int DEFAULT_STICKY_TRAFFIC_COST
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        DEFAULT_STICKY_NON_OVERLAP_COST

        +
        public static final int DEFAULT_STICKY_NON_OVERLAP_COST
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StickyTaskAssignor

        +
        public StickyTaskAssignor()
        +
        +
      • +
      • +
        +

        StickyTaskAssignor

        +
        public StickyTaskAssignor(boolean mustPreserveActiveTaskAssignment)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        assign

        +
        public TaskAssignor.TaskAssignment assign(ApplicationState applicationState)
        +
        +
        Specified by:
        +
        assign in interface TaskAssignor
        +
        Parameters:
        +
        applicationState - the metadata for this Kafka Streams application
        +
        Returns:
        +
        the assignment of active and standby tasks to KafkaStreams clients
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/assignment/assignors/package-summary.html b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/assignors/package-summary.html new file mode 100644 index 000000000..fbcc92bcc --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/assignors/package-summary.html @@ -0,0 +1,98 @@ + + + + +org.apache.kafka.streams.processor.assignment.assignors (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.streams.processor.assignment.assignors

    +
    +
    +
    package org.apache.kafka.streams.processor.assignment.assignors
    +
    +
    Provides classes for assigning tasks to stream threads.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/assignment/assignors/package-tree.html b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/assignors/package-tree.html new file mode 100644 index 000000000..29a3b1ff7 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/assignors/package-tree.html @@ -0,0 +1,71 @@ + + + + +org.apache.kafka.streams.processor.assignment.assignors Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.streams.processor.assignment.assignors

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/assignment/package-summary.html b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/package-summary.html new file mode 100644 index 000000000..c82443b14 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/package-summary.html @@ -0,0 +1,174 @@ + + + + +org.apache.kafka.streams.processor.assignment (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.streams.processor.assignment

    +
    +
    +
    package org.apache.kafka.streams.processor.assignment
    +
    +
    Provides classes and interfaces used to manage and assign tasks within Kafka Streams applications.
    +
    +
    +
      +
    • + +
    • +
    • +
      +
      +
      +
      +
      Class
      +
      Description
      + +
      +
      A read-only metadata class representing the state of the application and the current rebalance.
      +
      + +
      +
      Assignment related configs for the Kafka Streams TaskAssignor.
      +
      + +
      +
      A simple container class for the assignor to return the desired placement of active and standby tasks on + KafkaStreams clients.
      +
      + +
       
      + +
       
      + +
      +
      A read-only metadata class representing the current state of each KafkaStreams client with at least one StreamThread participating in this rebalance
      +
      + +
      +
      A simple wrapper around UUID that abstracts a Process ID
      +
      + +
      +
      A set of utilities to help implement task assignment via the TaskAssignor
      +
      + +
       
      + +
      +
      A simple config container for necessary parameters and optional overrides to apply when + running the active or standby task rack-aware optimizations.
      +
      + +
      +
      A TaskAssignor is responsible for creating a TaskAssignment from a given + ApplicationState.
      +
      + +
      +
      NONE: no error detected + ACTIVE_TASK_ASSIGNED_MULTIPLE_TIMES: multiple KafkaStreams clients assigned with the same active task + INVALID_STANDBY_TASK: stateless task assigned as a standby task + MISSING_PROCESS_ID: ProcessId present in the input ApplicationState was not present in the output TaskAssignment + UNKNOWN_PROCESS_ID: unrecognized ProcessId not matching any of the participating consumers + UNKNOWN_TASK_ID: unrecognized TaskId not matching any of the tasks to be assigned
      +
      + +
      +
      Wrapper class for the final assignment of active and standbys tasks to individual + KafkaStreams clients.
      +
      + +
      +
      A simple container class corresponding to a given TaskId.
      +
      + +
      +
      This is a simple container class used during the assignment process to distinguish + TopicPartitions type.
      +
      +
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/assignment/package-tree.html b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/package-tree.html new file mode 100644 index 000000000..862f33be0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/assignment/package-tree.html @@ -0,0 +1,107 @@ + + + + +org.apache.kafka.streams.processor.assignment Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.streams.processor.assignment

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/package-summary.html b/static/41/javadoc/org/apache/kafka/streams/processor/package-summary.html new file mode 100644 index 000000000..2a05b01b0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/package-summary.html @@ -0,0 +1,240 @@ + + + + +org.apache.kafka.streams.processor (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.streams.processor

    +
    +
    +
    package org.apache.kafka.streams.processor
    +
    +
    Provides a low-level programming model (Processor API, aka, PAPI) to express a (stateful) data flow computation over input topics. + Use Topology as the entry point for your program.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/processor/package-tree.html b/static/41/javadoc/org/apache/kafka/streams/processor/package-tree.html new file mode 100644 index 000000000..c34cbc93b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/processor/package-tree.html @@ -0,0 +1,117 @@ + + + + +org.apache.kafka.streams.processor Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.streams.processor

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/query/FailureReason.html b/static/41/javadoc/org/apache/kafka/streams/query/FailureReason.html new file mode 100644 index 000000000..5a2fd3e9c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/query/FailureReason.html @@ -0,0 +1,286 @@ + + + + +FailureReason (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class FailureReason

    +
    +
    java.lang.Object +
    java.lang.Enum<FailureReason> +
    org.apache.kafka.streams.query.FailureReason
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<FailureReason>, Constable
    +
    +
    +
    @Evolving +public enum FailureReason +extends Enum<FailureReason>
    +
    This enumeration type captures the various top-level reasons that a particular + partition of a store would fail to execute a query. Stores should generally + respond with a failure message instead of throwing an exception. +

    + Intended to be used in QueryResult.forFailure(FailureReason, String).

    +
    +
    +
      + +
    • +
      +

      Nested Class Summary

      +
      +

      Nested classes/interfaces inherited from class java.lang.Enum

      +Enum.EnumDesc<E extends Enum<E>>
      +
      +
    • + +
    • +
      +

      Enum Constant Summary

      +
      Enum Constants
      +
      +
      Enum Constant
      +
      Description
      + +
      +
      The requested store partition does not exist at all.
      +
      + +
      +
      The query required to execute on an active task (via StateQueryRequest.requireActive()), + but while executing the query, the task was either a Standby task, or it was an Active task + not in the RUNNING state.
      +
      + +
      +
      Failure indicating that the requested store partition is not present on the local + KafkaStreams instance.
      +
      + +
      +
      Failure indicating that the store partition is not (yet) up to the desired bound.
      +
      + +
      +
      The store that handled the query got an exception during query execution.
      +
      + +
      +
      Failure indicating that the store doesn't know how to handle the given query.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Returns the enum constant of this class with the specified name.
      +
      +
      static FailureReason[]
      + +
      +
      Returns an array containing the constants of this enum class, in +the order they are declared.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Enum

      +compareTo, describeConstable, equals, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
      +
      +

      Methods inherited from class java.lang.Object

      +getClass, notify, notifyAll, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      +
        +
      • +
        +

        UNKNOWN_QUERY_TYPE

        +
        public static final FailureReason UNKNOWN_QUERY_TYPE
        +
        Failure indicating that the store doesn't know how to handle the given query.
        +
        +
      • +
      • +
        +

        NOT_ACTIVE

        +
        public static final FailureReason NOT_ACTIVE
        +
        The query required to execute on an active task (via StateQueryRequest.requireActive()), + but while executing the query, the task was either a Standby task, or it was an Active task + not in the RUNNING state. The failure message will contain the reason for the failure. +

        + The caller should either try again later or try a different replica.

        +
        +
      • +
      • +
        +

        NOT_UP_TO_BOUND

        +
        public static final FailureReason NOT_UP_TO_BOUND
        +
        Failure indicating that the store partition is not (yet) up to the desired bound. The caller + should either try again later or try a different replica.
        +
        +
      • +
      • +
        +

        NOT_PRESENT

        +
        public static final FailureReason NOT_PRESENT
        +
        Failure indicating that the requested store partition is not present on the local + KafkaStreams instance. It may have been migrated to another instance during a rebalance. The + caller is recommended to try a different replica.
        +
        +
      • +
      • +
        +

        DOES_NOT_EXIST

        +
        public static final FailureReason DOES_NOT_EXIST
        +
        The requested store partition does not exist at all. For example, partition 4 was requested, + but the store in question only has 4 partitions (0 through 3).
        +
        +
      • +
      • +
        +

        STORE_EXCEPTION

        +
        public static final FailureReason STORE_EXCEPTION
        +
        The store that handled the query got an exception during query execution. The message + will contain the exception details. Depending on the nature of the exception, the caller + may be able to retry this instance or may need to try a different instance.
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static FailureReason[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static FailureReason valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/query/KeyQuery.html b/static/41/javadoc/org/apache/kafka/streams/query/KeyQuery.html new file mode 100644 index 000000000..31ee727a1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/query/KeyQuery.html @@ -0,0 +1,198 @@ + + + + +KeyQuery (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class KeyQuery<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.query.KeyQuery<K,V>
    +
    +
    +
    +
    Type Parameters:
    +
    K - Type of keys
    +
    V - Type of values
    +
    +
    +
    All Implemented Interfaces:
    +
    Query<V>
    +
    +
    +
    @Evolving +public final class KeyQuery<K,V> +extends Object +implements Query<V>
    +
    Interactive query for retrieving a single record based on its key.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Return the key that was specified for this query.
      +
      +
      boolean
      + +
      +
      The flag whether to skip the cache or not during query evaluation.
      +
      + + +
      +
      Specifies that the cache should be skipped during query evaluation.
      +
      +
      static <K, +V> KeyQuery<K,V>
      +
      withKey(K key)
      +
      +
      Creates a query that will retrieve the record identified by key if it exists + (or null otherwise).
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        withKey

        +
        public static <K, +V> KeyQuery<K,V> withKey(K key)
        +
        Creates a query that will retrieve the record identified by key if it exists + (or null otherwise).
        +
        +
        Type Parameters:
        +
        K - The type of the key
        +
        V - The type of the value that will be retrieved
        +
        Parameters:
        +
        key - The key to retrieve
        +
        +
        +
      • +
      • +
        +

        skipCache

        +
        public KeyQuery<K,V> skipCache()
        +
        Specifies that the cache should be skipped during query evaluation. This means, that the query will always + get forwarded to the underlying store.
        +
        +
      • +
      • +
        +

        getKey

        +
        public K getKey()
        +
        Return the key that was specified for this query.
        +
        +
        Returns:
        +
        The key that was specified for this query.
        +
        +
        +
      • +
      • +
        +

        isSkipCache

        +
        public boolean isSkipCache()
        +
        The flag whether to skip the cache or not during query evaluation.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/query/MultiVersionedKeyQuery.html b/static/41/javadoc/org/apache/kafka/streams/query/MultiVersionedKeyQuery.html new file mode 100644 index 000000000..a078e5797 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/query/MultiVersionedKeyQuery.html @@ -0,0 +1,293 @@ + + + + +MultiVersionedKeyQuery (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class MultiVersionedKeyQuery<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.query.MultiVersionedKeyQuery<K,V>
    +
    +
    +
    +
    Type Parameters:
    +
    K - The type of the key.
    +
    V - The type of the result returned by this query.
    +
    +
    +
    All Implemented Interfaces:
    +
    Query<VersionedRecordIterator<V>>
    +
    +
    +
    @Evolving +public final class MultiVersionedKeyQuery<K,V> +extends Object +implements Query<VersionedRecordIterator<V>>
    +
    Interactive query for retrieving a set of records with the same specified key and different timestamps within the specified time range. + No ordering is guaranteed for the results, but the results can be sorted by timestamp (in ascending or descending order) by calling the corresponding defined methods.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        withKey

        +
        public static <K, +V> MultiVersionedKeyQuery<K,V> withKey(K key)
        +
        Creates a query that will retrieve the set of records identified by key if any exists + (or null otherwise). + +

        + While the query by default returns the all the record versions of the specified key, setting + the fromTimestamp (by calling the fromTime(Instant) method), and the toTimestamp + (by calling the toTime(Instant) method) makes the query to return the record versions associated + to the specified time range.

        +
        +
        Type Parameters:
        +
        K - The type of the key
        +
        V - The type of the value that will be retrieved
        +
        Parameters:
        +
        key - The specified key by the query
        +
        Throws:
        +
        NullPointerException - if key is null
        +
        +
        +
      • +
      • +
        +

        fromTime

        +
        public MultiVersionedKeyQuery<K,V> fromTime(Instant fromTime)
        +
        Specifies the starting time point for the key query. +

        + The key query returns all the records that are still existing in the time range starting from the timestamp fromTime. There can + be records which have been inserted before the fromTime and are still valid in the query specified time range (the whole time range + or even partially). The key query in fact returns all the records that have NOT become tombstone at or after fromTime.

        +
        +
        Parameters:
        +
        fromTime - The starting time point + If fromTime is null, it will be considered as negative infinity, ie, no lower bound
        +
        +
        +
      • +
      • +
        +

        toTime

        +
        public MultiVersionedKeyQuery<K,V> toTime(Instant toTime)
        +
        Specifies the ending time point for the key query. + The key query returns all the records that have timestamp <= toTime.
        +
        +
        Parameters:
        +
        toTime - The ending time point + If @param toTime is null, will be considered as positive infinity, ie, no upper bound
        +
        +
        +
      • +
      • +
        +

        withDescendingTimestamps

        +
        public MultiVersionedKeyQuery<K,V> withDescendingTimestamps()
        +
        Specifies the order of the returned records by the query as descending by timestamp.
        +
        +
      • +
      • +
        +

        withAscendingTimestamps

        +
        public MultiVersionedKeyQuery<K,V> withAscendingTimestamps()
        +
        Specifies the order of the returned records by the query as ascending by timestamp.
        +
        +
      • +
      • +
        +

        key

        +
        public K key()
        +
        The key that was specified for this query.
        +
        +
        Returns:
        +
        The specified key of the query.
        +
        +
        +
      • +
      • +
        +

        fromTime

        +
        public Optional<Instant> fromTime()
        +
        The starting time point of the query, if specified
        +
        +
        Returns:
        +
        The specified fromTime of the query.
        +
        +
        +
      • +
      • +
        +

        toTime

        +
        public Optional<Instant> toTime()
        +
        The ending time point of the query, if specified
        +
        +
        Returns:
        +
        The specified toTime of the query.
        +
        +
        +
      • +
      • +
        +

        resultOrder

        +
        public ResultOrder resultOrder()
        +
        The order of the returned records by timestamp.
        +
        +
        Returns:
        +
        the order of returned records based on timestamp (can be unordered, or in ascending, or in descending order of timestamps).
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/query/Position.html b/static/41/javadoc/org/apache/kafka/streams/query/Position.html new file mode 100644 index 000000000..b2b6a3aa8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/query/Position.html @@ -0,0 +1,280 @@ + + + + +Position (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Position

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.query.Position
    +
    +
    +
    +
    @Evolving +public class Position +extends Object
    +
    A representation of a position vector with respect to a set of topic partitions. For example, in + Interactive Query (KafkaStreams.query(StateQueryRequest), a + query result may contain information from multiple store partitions, each of which contains + information from multiple input topics' partitions. This class can be used to summarize all of + that positional information. +

    + This class is threadsafe, although it is mutable. Readers are recommended to use copy() to avoid seeing mutations to the Position after they get the reference. For + examples, when a store executes a StateStore.query(Query, PositionBound, QueryConfig) request and returns its current position via QueryResult.setPosition(Position), it should pass a copy of its position instead of the mutable + reference.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        emptyPosition

        +
        public static Position emptyPosition()
        +
        Create a new, empty Position.
        +
        +
      • +
      • +
        +

        fromMap

        +
        public static Position fromMap(Map<String,? extends Map<Integer,Long>> map)
        +
        Create a new Position and populate it with a mapping of topic -> partition -> offset. +

        + Note, the resulting Position does not share any structure with the provided map, so + subsequent changes to the map or Position will not affect the other.

        +
        +
      • +
      • +
        +

        withComponent

        +
        public Position withComponent(String topic, + int partition, + long offset)
        +
        Augment an existing Position by setting a new offset for a topic and partition. +

        + Note: enforces monotonicity on offsets. I.e., if there is already a component for the same + topic and partition with a larger offset, the update will succeed but not overwrite the + offset. +

        + Returns a self-reference for chained calls. Note: this method mutates the Position.

        +
        +
      • +
      • +
        +

        copy

        +
        public Position copy()
        +
        Create a deep copy of the Position.
        +
        +
      • +
      • +
        +

        merge

        +
        public Position merge(Position other)
        +
        Merges the provided Position into the current instance. +

        + If both Positions contain the same topic -> partition -> offset mapping, the resulting + Position will contain a mapping with the larger of the two offsets.

        +
        +
      • +
      • +
        +

        getTopics

        +
        public Set<String> getTopics()
        +
        Return the topics that are represented in this Position.
        +
        +
      • +
      • +
        +

        getPartitionPositions

        +
        public Map<Integer,Long> getPartitionPositions(String topic)
        +
        Return the partition -> offset mapping for a specific topic.
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        isEmpty

        +
        public boolean isEmpty()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/query/PositionBound.html b/static/41/javadoc/org/apache/kafka/streams/query/PositionBound.html new file mode 100644 index 000000000..b1bdf5ada --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/query/PositionBound.html @@ -0,0 +1,213 @@ + + + + +PositionBound (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class PositionBound

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.query.PositionBound
    +
    +
    +
    +
    @Evolving +public class PositionBound +extends Object
    +
    A class bounding the processing state Position during queries. This can be used to + specify that a query should fail if the locally available partition isn't caught up to the + specified bound. "Unbounded" places no restrictions on the current location of the partition.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        unbounded

        +
        public static PositionBound unbounded()
        +
        Creates a new PositionBound representing "no bound"
        +
        +
      • +
      • +
        +

        at

        +
        public static PositionBound at(Position position)
        +
        Creates a new PositionBound representing a specific position.
        +
        +
      • +
      • +
        +

        isUnbounded

        +
        public boolean isUnbounded()
        +
        Returns true iff this object specifies that there is no position bound.
        +
        +
      • +
      • +
        +

        position

        +
        public Position position()
        +
        Returns the specific position of this bound.
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/query/Query.html b/static/41/javadoc/org/apache/kafka/streams/query/Query.html new file mode 100644 index 000000000..a4f58a70b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/query/Query.html @@ -0,0 +1,96 @@ + + + + +Query (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Query<R>

    +
    +
    +
    +
    Type Parameters:
    +
    R - The type of the result returned by this query.
    +
    +
    +
    All Known Implementing Classes:
    +
    KeyQuery, MultiVersionedKeyQuery, RangeQuery, TimestampedKeyQuery, TimestampedRangeQuery, VersionedKeyQuery, WindowKeyQuery, WindowRangeQuery
    +
    +
    +
    public interface Query<R>
    +
    Marker interface that all interactive queries must implement (see KafkaStreams.query(StateQueryRequest)). +

    + You can find all available queries by searching for classes implementing this interface. +

    + Kafka Streams will pass unknown query types straight through into the bytes stores, so callers + can add custom queries by implementing this interface and providing custom stores that handle + them (via StoreSuppliers. +

    + See KIP-796 (https://cwiki.apache.org/confluence/x/34xnCw) for more details.

    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/query/QueryConfig.html b/static/41/javadoc/org/apache/kafka/streams/query/QueryConfig.html new file mode 100644 index 000000000..b5478f2ae --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/query/QueryConfig.html @@ -0,0 +1,160 @@ + + + + +QueryConfig (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class QueryConfig

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.query.QueryConfig
    +
    +
    +
    +
    @Evolving +public class QueryConfig +extends Object
    +
    Runtime configuration parameters
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        QueryConfig

        +
        public QueryConfig(boolean collectExecutionInfo)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        isCollectExecutionInfo

        +
        public boolean isCollectExecutionInfo()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/query/QueryResult.html b/static/41/javadoc/org/apache/kafka/streams/query/QueryResult.html new file mode 100644 index 000000000..cfea8f872 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/query/QueryResult.html @@ -0,0 +1,316 @@ + + + + +QueryResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface QueryResult<R>

    +
    +
    +
    +
    Type Parameters:
    +
    R - The result type of the query.
    +
    +
    +
    public interface QueryResult<R>
    +
    Container for a single partition's result when executing a StateQueryRequest.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
      +
      Used by stores to add detailed execution information (if requested) during query execution.
      +
      +
      static <R> QueryResult<R>
      +
      forFailure(FailureReason failureReason, + String failureMessage)
      +
      +
      Static factory method to create a result object for a failed query.
      +
      +
      static <R> QueryResult<R>
      +
      forResult(R result)
      +
      +
      Static factory method to create a result object for a successful query.
      +
      +
      static <R> QueryResult<R>
      +
      forUnknownQueryType(Query<R> query, + StateStore store)
      +
      +
      Static factory method to create a failed query result object to indicate that the store does + not know how to handle the query.
      +
      + + +
      +
      If detailed execution information was requested in StateQueryRequest.enableExecutionInfo(), + this method returned the execution details for this partition's result.
      +
      + + +
      +
      If this partition failed to execute the query, returns the failure message.
      +
      + + +
      +
      If this partition failed to execute the query, returns the reason.
      +
      + + +
      +
      This state partition's exact position in its history when this query was executed.
      +
      + + +
      +
      Returns the result of executing the query on one partition.
      +
      +
      boolean
      + +
      +
      True iff the query execution failed.
      +
      +
      boolean
      + +
      +
      True iff the query was successfully executed.
      +
      +
      static <R> QueryResult<R>
      +
      notUpToBound(Position currentPosition, + PositionBound positionBound, + Integer partition)
      +
      +
      Static factory method to create a failed query result object to indicate that the store has + not yet caught up to the requested position bound.
      +
      +
      void
      + +
      +
      Used by stores to report what exact position in the store's history it was at when it + executed the query.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        forResult

        +
        static <R> QueryResult<R> forResult(R result)
        +
        Static factory method to create a result object for a successful query. Used by StateStores + to respond to a StateStore.query(Query, PositionBound, QueryConfig).
        +
        +
      • +
      • +
        +

        forFailure

        +
        static <R> QueryResult<R> forFailure(FailureReason failureReason, + String failureMessage)
        +
        Static factory method to create a result object for a failed query. Used by StateStores to + respond to a StateStore.query(Query, PositionBound, QueryConfig).
        +
        +
      • +
      • +
        +

        forUnknownQueryType

        +
        static <R> QueryResult<R> forUnknownQueryType(Query<R> query, + StateStore store)
        +
        Static factory method to create a failed query result object to indicate that the store does + not know how to handle the query. +

        + Used by StateStores to respond to a StateStore.query(Query, PositionBound, QueryConfig).

        +
        +
      • +
      • +
        +

        notUpToBound

        +
        static <R> QueryResult<R> notUpToBound(Position currentPosition, + PositionBound positionBound, + Integer partition)
        +
        Static factory method to create a failed query result object to indicate that the store has + not yet caught up to the requested position bound. +

        + Used by StateStores to respond to a StateStore.query(Query, PositionBound, QueryConfig).

        +
        +
      • +
      • +
        +

        addExecutionInfo

        +
        void addExecutionInfo(String message)
        +
        Used by stores to add detailed execution information (if requested) during query execution.
        +
        +
      • +
      • +
        +

        setPosition

        +
        void setPosition(Position position)
        +
        Used by stores to report what exact position in the store's history it was at when it + executed the query.
        +
        +
      • +
      • +
        +

        isSuccess

        +
        boolean isSuccess()
        +
        True iff the query was successfully executed. The response is available in getResult().
        +
        +
      • +
      • +
        +

        isFailure

        +
        boolean isFailure()
        +
        True iff the query execution failed. More information about the failure is available in + getFailureReason() and getFailureMessage().
        +
        +
      • +
      • +
        +

        getExecutionInfo

        +
        List<String> getExecutionInfo()
        +
        If detailed execution information was requested in StateQueryRequest.enableExecutionInfo(), + this method returned the execution details for this partition's result.
        +
        +
      • +
      • +
        +

        getPosition

        +
        Position getPosition()
        +
        This state partition's exact position in its history when this query was executed. Can be + used in conjunction with subsequent queries via StateQueryRequest.withPositionBound(PositionBound). +

        + Note: stores are encouraged, but not required to set this property.

        +
        +
      • +
      • +
        +

        getFailureReason

        +
        FailureReason getFailureReason()
        +
        If this partition failed to execute the query, returns the reason.
        +
        +
        Throws:
        +
        IllegalArgumentException - if this is not a failed result.
        +
        +
        +
      • +
      • +
        +

        getFailureMessage

        +
        String getFailureMessage()
        +
        If this partition failed to execute the query, returns the failure message.
        +
        +
        Throws:
        +
        IllegalArgumentException - if this is not a failed result.
        +
        +
        +
      • +
      • +
        +

        getResult

        +
        R getResult()
        +
        Returns the result of executing the query on one partition. The result type is determined by + the query. Note: queries may choose to return null for a successful query, so isSuccess() and isFailure() must be used to determine whether the query + was successful of failed on this partition.
        +
        +
        Throws:
        +
        IllegalArgumentException - if this is not a successful query.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/query/RangeQuery.html b/static/41/javadoc/org/apache/kafka/streams/query/RangeQuery.html new file mode 100644 index 000000000..688907a1f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/query/RangeQuery.html @@ -0,0 +1,301 @@ + + + + +RangeQuery (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class RangeQuery<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.query.RangeQuery<K,V>
    +
    +
    +
    +
    Type Parameters:
    +
    K - Type of keys
    +
    V - Type of values
    +
    +
    +
    All Implemented Interfaces:
    +
    Query<KeyValueIterator<K,V>>
    +
    +
    +
    @Evolving +public final class RangeQuery<K,V> +extends Object +implements Query<KeyValueIterator<K,V>>
    +
    Interactive query for issuing range queries and scans over KeyValue stores. +

    + A range query retrieves a set of records, specified using an upper and/or lower bound on the keys. +

    + A scan query retrieves all records contained in the store. +

    + Keys' order is based on the serialized byte[] of the keys, not the 'logical' key order.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        withRange

        +
        public static <K, +V> RangeQuery<K,V> withRange(K lower, + K upper)
        +
        Interactive range query using a lower and upper bound to filter the keys returned.
        +
        +
        Type Parameters:
        +
        K - The key type
        +
        V - The value type
        +
        Parameters:
        +
        lower - The key that specifies the lower bound of the range
        +
        upper - The key that specifies the upper bound of the range
        +
        +
        +
      • +
      • +
        +

        resultOrder

        +
        public ResultOrder resultOrder()
        +
        Determines if the serialized byte[] of the keys in ascending or descending or unordered order. + Order is based on the serialized byte[] of the keys, not the 'logical' key order.
        +
        +
        Returns:
        +
        return the order of returned records based on the serialized byte[] of the keys (can be unordered, or in ascending or in descending order).
        +
        +
        +
      • +
      • +
        +

        withDescendingKeys

        +
        public RangeQuery<K,V> withDescendingKeys()
        +
        Set the query to return the serialized byte[] of the keys in descending order. + Order is based on the serialized byte[] of the keys, not the 'logical' key order.
        +
        +
        Returns:
        +
        a new RangeQuery instance with descending flag set.
        +
        +
        +
      • +
      • +
        +

        withAscendingKeys

        +
        public RangeQuery<K,V> withAscendingKeys()
        +
        Set the query to return the serialized byte[] of the keys in ascending order. + Order is based on the serialized byte[] of the keys, not the 'logical' key order.
        +
        +
        Returns:
        +
        a new RangeQuery instance with ascending flag set.
        +
        +
        +
      • +
      • +
        +

        withUpperBound

        +
        public static <K, +V> RangeQuery<K,V> withUpperBound(K upper)
        +
        Interactive range query using an upper bound to filter the keys returned. + If both <K,V> are null, RangQuery returns a full range scan.
        +
        +
        Type Parameters:
        +
        K - The key type
        +
        V - The value type
        +
        Parameters:
        +
        upper - The key that specifies the upper bound of the range
        +
        +
        +
      • +
      • +
        +

        withLowerBound

        +
        public static <K, +V> RangeQuery<K,V> withLowerBound(K lower)
        +
        Interactive range query using a lower bound to filter the keys returned.
        +
        +
        Type Parameters:
        +
        K - The key type
        +
        V - The value type
        +
        Parameters:
        +
        lower - The key that specifies the lower bound of the range
        +
        +
        +
      • +
      • +
        +

        withNoBounds

        +
        public static <K, +V> RangeQuery<K,V> withNoBounds()
        +
        Interactive scan query that returns all records in the store.
        +
        +
        Type Parameters:
        +
        K - The key type
        +
        V - The value type
        +
        +
        +
      • +
      • +
        +

        getLowerBound

        +
        public Optional<K> getLowerBound()
        +
        The lower bound of the query, if specified.
        +
        +
      • +
      • +
        +

        getUpperBound

        +
        public Optional<K> getUpperBound()
        +
        The upper bound of the query, if specified
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/query/ResultOrder.html b/static/41/javadoc/org/apache/kafka/streams/query/ResultOrder.html new file mode 100644 index 000000000..f38738f35 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/query/ResultOrder.html @@ -0,0 +1,225 @@ + + + + +ResultOrder (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Enum Class ResultOrder

    +
    +
    java.lang.Object +
    java.lang.Enum<ResultOrder> +
    org.apache.kafka.streams.query.ResultOrder
    +
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Serializable, Comparable<ResultOrder>, Constable
    +
    +
    +
    public enum ResultOrder +extends Enum<ResultOrder>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Enum Constant Details

      +
        +
      • +
        +

        ANY

        +
        public static final ResultOrder ANY
        +
        +
      • +
      • +
        +

        ASCENDING

        +
        public static final ResultOrder ASCENDING
        +
        +
      • +
      • +
        +

        DESCENDING

        +
        public static final ResultOrder DESCENDING
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        values

        +
        public static ResultOrder[] values()
        +
        Returns an array containing the constants of this enum class, in +the order they are declared.
        +
        +
        Returns:
        +
        an array containing the constants of this enum class, in the order they are declared
        +
        +
        +
      • +
      • +
        +

        valueOf

        +
        public static ResultOrder valueOf(String name)
        +
        Returns the enum constant of this class with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this class. (Extraneous whitespace characters are +not permitted.)
        +
        +
        Parameters:
        +
        name - the name of the enum constant to be returned.
        +
        Returns:
        +
        the enum constant with the specified name
        +
        Throws:
        +
        IllegalArgumentException - if this enum class has no constant with the specified name
        +
        NullPointerException - if the argument is null
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/query/StateQueryRequest.InStore.html b/static/41/javadoc/org/apache/kafka/streams/query/StateQueryRequest.InStore.html new file mode 100644 index 000000000..635024bc8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/query/StateQueryRequest.InStore.html @@ -0,0 +1,139 @@ + + + + +StateQueryRequest.InStore (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StateQueryRequest.InStore

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.query.StateQueryRequest.InStore
    +
    +
    +
    +
    Enclosing class:
    +
    StateQueryRequest<R>
    +
    +
    +
    public static class StateQueryRequest.InStore +extends Object
    +
    A progressive builder interface for creating StoreQueryRequests.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        withQuery

        +
        public <R> StateQueryRequest<R> withQuery(Query<R> query)
        +
        Specifies the query to run on the specified store.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/query/StateQueryRequest.html b/static/41/javadoc/org/apache/kafka/streams/query/StateQueryRequest.html new file mode 100644 index 000000000..9b8fdae18 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/query/StateQueryRequest.html @@ -0,0 +1,317 @@ + + + + +StateQueryRequest (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StateQueryRequest<R>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.query.StateQueryRequest<R>
    +
    +
    +
    +
    Type Parameters:
    +
    R - The type of the query result.
    +
    +
    +
    @Evolving +public class StateQueryRequest<R> +extends Object
    +
    The request object for Interactive Queries. This is an immutable builder class for passing all + required and optional arguments for querying a state store in Kafka Streams. +

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        inStore

        +
        public static StateQueryRequest.InStore inStore(String name)
        +
        Specifies the name of the store to query.
        +
        +
      • +
      • +
        +

        withPositionBound

        +
        public StateQueryRequest<R> withPositionBound(PositionBound positionBound)
        +
        Bounds the position of the state store against its input topics.
        +
        +
      • +
      • +
        +

        withAllPartitions

        +
        public StateQueryRequest<R> withAllPartitions()
        +
        Specifies that the query will run against all locally available partitions.
        +
        +
      • +
      • +
        +

        withPartitions

        +
        public StateQueryRequest<R> withPartitions(Set<Integer> partitions)
        +
        Specifies a set of partitions to run against. If some partitions are not locally available, + the response will contain a FailureReason.NOT_PRESENT for those partitions. If some + partitions in this set are not valid partitions for the store, the response will contain a + FailureReason.DOES_NOT_EXIST for those partitions.
        +
        +
      • +
      • +
        +

        enableExecutionInfo

        +
        public StateQueryRequest<R> enableExecutionInfo()
        +
        Requests for stores and the Streams runtime to record any useful details about how the query + was executed.
        +
        +
      • +
      • +
        +

        requireActive

        +
        public StateQueryRequest<R> requireActive()
        +
        Specifies that this query should only run on partitions for which this instance is the leader + (aka "active"). Partitions for which this instance is not the active replica will return + FailureReason.NOT_ACTIVE.
        +
        +
      • +
      • +
        +

        getStoreName

        +
        public String getStoreName()
        +
        The name of the store this request is for.
        +
        +
      • +
      • +
        +

        getPositionBound

        +
        public PositionBound getPositionBound()
        +
        The bound that this request places on its query, in terms of the partitions' positions + against its inputs.
        +
        +
      • +
      • +
        +

        getQuery

        +
        public Query<R> getQuery()
        +
        The query this request is meant to run.
        +
        +
      • +
      • +
        +

        isAllPartitions

        +
        public boolean isAllPartitions()
        +
        Whether this request should fetch from all locally available partitions.
        +
        +
      • +
      • +
        +

        getPartitions

        +
        public Set<Integer> getPartitions()
        +
        If the request is for specific partitions, return the set of partitions to query.
        +
        +
        Throws:
        +
        IllegalStateException - if this is a request for all partitions
        +
        +
        +
      • +
      • +
        +

        executionInfoEnabled

        +
        public boolean executionInfoEnabled()
        +
        Whether the request includes detailed execution information.
        +
        +
      • +
      • +
        +

        isRequireActive

        +
        public boolean isRequireActive()
        +
        Whether this request requires the query to execute only on active partitions.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/query/StateQueryResult.html b/static/41/javadoc/org/apache/kafka/streams/query/StateQueryResult.html new file mode 100644 index 000000000..aa7ffbc57 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/query/StateQueryResult.html @@ -0,0 +1,254 @@ + + + + +StateQueryResult (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StateQueryResult<R>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.query.StateQueryResult<R>
    +
    +
    +
    +
    Type Parameters:
    +
    R - The type of the query result.
    +
    +
    +
    @Evolving +public class StateQueryResult<R> +extends Object
    +
    The response object for interactive queries. This wraps the individual partition results, as well + as metadata relating to the result as a whole. +

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StateQueryResult

        +
        public StateQueryResult()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        setGlobalResult

        +
        public void setGlobalResult(QueryResult<R> r)
        +
        Set the result for a global store query. Used by Kafka Streams and available for tests.
        +
        +
      • +
      • +
        +

        addResult

        +
        public void addResult(int partition, + QueryResult<R> r)
        +
        Set the result for a partitioned store query. Used by Kafka Streams and available for tests.
        +
        +
      • +
      • +
        +

        getPartitionResults

        +
        public Map<Integer,QueryResult<R>> getPartitionResults()
        +
        The query's result for each partition that executed the query. Empty for global store + queries.
        +
        +
      • +
      • +
        +

        getOnlyPartitionResult

        +
        public QueryResult<R> getOnlyPartitionResult()
        +
        For queries that are expected to match records in only one partition, returns the result.
        +
        +
        Throws:
        +
        IllegalArgumentException - if the results are not for exactly one partition.
        +
        +
        +
      • +
      • +
        +

        getGlobalResult

        +
        public QueryResult<R> getGlobalResult()
        +
        The query's result for global store queries. Is null for non-global (partitioned) + store queries.
        +
        +
      • +
      • +
        +

        getPosition

        +
        public Position getPosition()
        +
        The position of the state store at the moment it executed the query. In conjunction with + StateQueryRequest.withPositionBound(org.apache.kafka.streams.query.PositionBound), this can be used to achieve a good balance + between consistency and availability in which repeated queries are guaranteed to advance in + time while allowing reads to be served from any replica that is caught up to that caller's + prior observations.
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/query/TimestampedKeyQuery.html b/static/41/javadoc/org/apache/kafka/streams/query/TimestampedKeyQuery.html new file mode 100644 index 000000000..a477fa684 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/query/TimestampedKeyQuery.html @@ -0,0 +1,198 @@ + + + + +TimestampedKeyQuery (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TimestampedKeyQuery<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.query.TimestampedKeyQuery<K,V>
    +
    +
    +
    +
    Type Parameters:
    +
    K - Type of keys
    +
    V - Type of values
    +
    +
    +
    All Implemented Interfaces:
    +
    Query<ValueAndTimestamp<V>>
    +
    +
    +
    @Evolving +public final class TimestampedKeyQuery<K,V> +extends Object +implements Query<ValueAndTimestamp<V>>
    +
    Interactive query for retrieving a single record based on its key from TimestampedKeyValueStore
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      boolean
      + +
      +
      The flag whether to skip the cache or not during query evaluation.
      +
      + +
      key()
      +
      +
      Return the key that was specified for this query.
      +
      + + +
      +
      Specifies that the cache should be skipped during query evaluation.
      +
      +
      static <K, +V> TimestampedKeyQuery<K,V>
      +
      withKey(K key)
      +
      +
      Creates a query that will retrieve the record identified by key if it exists + (or null otherwise).
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        withKey

        +
        public static <K, +V> TimestampedKeyQuery<K,V> withKey(K key)
        +
        Creates a query that will retrieve the record identified by key if it exists + (or null otherwise).
        +
        +
        Type Parameters:
        +
        K - The type of the key
        +
        V - The type of the value that will be retrieved
        +
        Parameters:
        +
        key - The key to retrieve
        +
        +
        +
      • +
      • +
        +

        skipCache

        +
        public TimestampedKeyQuery<K,V> skipCache()
        +
        Specifies that the cache should be skipped during query evaluation. This means, that the query will always + get forwarded to the underlying store.
        +
        +
      • +
      • +
        +

        key

        +
        public K key()
        +
        Return the key that was specified for this query.
        +
        +
        Returns:
        +
        The key that was specified for this query.
        +
        +
        +
      • +
      • +
        +

        isSkipCache

        +
        public boolean isSkipCache()
        +
        The flag whether to skip the cache or not during query evaluation.
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/query/TimestampedRangeQuery.html b/static/41/javadoc/org/apache/kafka/streams/query/TimestampedRangeQuery.html new file mode 100644 index 000000000..59cecc776 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/query/TimestampedRangeQuery.html @@ -0,0 +1,301 @@ + + + + +TimestampedRangeQuery (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TimestampedRangeQuery<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.query.TimestampedRangeQuery<K,V>
    +
    +
    +
    +
    Type Parameters:
    +
    K - Type of keys
    +
    V - Type of values
    +
    +
    +
    All Implemented Interfaces:
    +
    Query<KeyValueIterator<K,ValueAndTimestamp<V>>>
    +
    +
    +
    @Evolving +public final class TimestampedRangeQuery<K,V> +extends Object +implements Query<KeyValueIterator<K,ValueAndTimestamp<V>>>
    +
    Interactive query for issuing range queries and scans over TimestampedKeyValueStore +

    + A range query retrieves a set of records, specified using an upper and/or lower bound on the keys. +

    + A scan query retrieves all records contained in the store. +

    + Keys' order is based on the serialized byte[] of the keys, not the 'logical' key order.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        withRange

        +
        public static <K, +V> TimestampedRangeQuery<K,V> withRange(K lower, + K upper)
        +
        Interactive range query using a lower and upper bound to filter the keys returned.
        +
        +
        Type Parameters:
        +
        K - The key type
        +
        V - The value type
        +
        Parameters:
        +
        lower - The key that specifies the lower bound of the range
        +
        upper - The key that specifies the upper bound of the range
        +
        +
        +
      • +
      • +
        +

        withUpperBound

        +
        public static <K, +V> TimestampedRangeQuery<K,V> withUpperBound(K upper)
        +
        Interactive range query using an upper bound to filter the keys returned. + If both <K,V> are null, RangQuery returns a full range scan.
        +
        +
        Type Parameters:
        +
        K - The key type
        +
        V - The value type
        +
        Parameters:
        +
        upper - The key that specifies the upper bound of the range
        +
        +
        +
      • +
      • +
        +

        withLowerBound

        +
        public static <K, +V> TimestampedRangeQuery<K,V> withLowerBound(K lower)
        +
        Interactive range query using a lower bound to filter the keys returned.
        +
        +
        Type Parameters:
        +
        K - The key type
        +
        V - The value type
        +
        Parameters:
        +
        lower - The key that specifies the lower bound of the range
        +
        +
        +
      • +
      • +
        +

        resultOrder

        +
        public ResultOrder resultOrder()
        +
        Determines if the serialized byte[] of the keys in ascending or descending or unordered order. + Order is based on the serialized byte[] of the keys, not the 'logical' key order.
        +
        +
        Returns:
        +
        return the order of return records base on the serialized byte[] of the keys (can be unordered, or in ascending, or in descending order).
        +
        +
        +
      • +
      • +
        +

        withDescendingKeys

        +
        public TimestampedRangeQuery<K,V> withDescendingKeys()
        +
        Set the query to return the serialized byte[] of the keys in descending order. + Order is based on the serialized byte[] of the keys, not the 'logical' key order.
        +
        +
        Returns:
        +
        a new RangeQuery instance with descending flag set.
        +
        +
        +
      • +
      • +
        +

        withAscendingKeys

        +
        public TimestampedRangeQuery<K,V> withAscendingKeys()
        +
        Set the query to return the serialized byte[] of the keys in ascending order. + Order is based on the serialized byte[] of the keys, not the 'logical' key order.
        +
        +
        Returns:
        +
        a new RangeQuery instance with ascending flag set.
        +
        +
        +
      • +
      • +
        +

        withNoBounds

        +
        public static <K, +V> TimestampedRangeQuery<K,V> withNoBounds()
        +
        Interactive scan query that returns all records in the store.
        +
        +
        Type Parameters:
        +
        K - The key type
        +
        V - The value type
        +
        +
        +
      • +
      • +
        +

        lowerBound

        +
        public Optional<K> lowerBound()
        +
        The lower bound of the query, if specified.
        +
        +
      • +
      • +
        +

        upperBound

        +
        public Optional<K> upperBound()
        +
        The upper bound of the query, if specified
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/query/VersionedKeyQuery.html b/static/41/javadoc/org/apache/kafka/streams/query/VersionedKeyQuery.html new file mode 100644 index 000000000..b945c50e5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/query/VersionedKeyQuery.html @@ -0,0 +1,214 @@ + + + + +VersionedKeyQuery (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class VersionedKeyQuery<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.query.VersionedKeyQuery<K,V>
    +
    +
    +
    +
    Type Parameters:
    +
    K - The type of the key.
    +
    V - The type of the value.
    +
    +
    +
    All Implemented Interfaces:
    +
    Query<VersionedRecord<V>>
    +
    +
    +
    @Evolving +public final class VersionedKeyQuery<K,V> +extends Object +implements Query<VersionedRecord<V>>
    +
    Interactive query for retrieving a single record from a versioned state store based on its key and timestamp.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        withKey

        +
        public static <K, +V> VersionedKeyQuery<K,V> withKey(K key)
        +
        Creates a query that will retrieve the record from a versioned state store identified by key if it exists + (or null otherwise). +

        + While the query by default returns the latest value of the specified key, setting + the asOfTimestamp (by calling the asOf(Instant) method), makes the query + to return the value associated to the specified asOfTimestamp.

        +
        +
        Type Parameters:
        +
        K - The type of the key
        +
        V - The type of the value that will be retrieved
        +
        Parameters:
        +
        key - The key to retrieve
        +
        Throws:
        +
        NullPointerException - if key is null
        +
        +
        +
      • +
      • +
        +

        asOf

        +
        public VersionedKeyQuery<K,V> asOf(Instant asOfTimestamp)
        +
        Specifies the timestamp for the key query. The key query returns the record's version for the specified timestamp. + (To be more precise: The key query returns the record with the greatest timestamp <= asOfTimestamp)
        +
        +
        Parameters:
        +
        asOfTimestamp - The timestamp of the query.
        +
        Throws:
        +
        NullPointerException - if asOfTimestamp is null
        +
        +
        +
      • +
      • +
        +

        key

        +
        public K key()
        +
        The key that was specified for this query.
        +
        +
        Returns:
        +
        The specified key of the query.
        +
        +
        +
      • +
      • +
        +

        asOfTimestamp

        +
        public Optional<Instant> asOfTimestamp()
        +
        The timestamp of the query, if specified.
        +
        +
        Returns:
        +
        The specified asOfTimestamp of the query.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/query/WindowKeyQuery.html b/static/41/javadoc/org/apache/kafka/streams/query/WindowKeyQuery.html new file mode 100644 index 000000000..9a400f28a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/query/WindowKeyQuery.html @@ -0,0 +1,183 @@ + + + + +WindowKeyQuery (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class WindowKeyQuery<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.query.WindowKeyQuery<K,V>
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Query<WindowStoreIterator<V>>
    +
    +
    +
    @Evolving +public class WindowKeyQuery<K,V> +extends Object +implements Query<WindowStoreIterator<V>>
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        withKeyAndWindowStartRange

        +
        public static <K, +V> WindowKeyQuery<K,V> withKeyAndWindowStartRange(K key, + Instant timeFrom, + Instant timeTo)
        +
        +
      • +
      • +
        +

        getKey

        +
        public K getKey()
        +
        +
      • +
      • +
        +

        getTimeFrom

        +
        public Optional<Instant> getTimeFrom()
        +
        +
      • +
      • +
        +

        getTimeTo

        +
        public Optional<Instant> getTimeTo()
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/query/WindowRangeQuery.html b/static/41/javadoc/org/apache/kafka/streams/query/WindowRangeQuery.html new file mode 100644 index 000000000..67f951723 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/query/WindowRangeQuery.html @@ -0,0 +1,191 @@ + + + + +WindowRangeQuery (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class WindowRangeQuery<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.query.WindowRangeQuery<K,V>
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Query<KeyValueIterator<Windowed<K>,V>>
    +
    +
    +
    public class WindowRangeQuery<K,V> +extends Object +implements Query<KeyValueIterator<Windowed<K>,V>>
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/query/package-summary.html b/static/41/javadoc/org/apache/kafka/streams/query/package-summary.html new file mode 100644 index 000000000..f36820b3c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/query/package-summary.html @@ -0,0 +1,189 @@ + + + + +org.apache.kafka.streams.query (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.streams.query

    +
    +
    +
    package org.apache.kafka.streams.query
    +
    +
    Provides a query API (aka Interactive Queries) over state stores, for extracting data from a stateful Kafka Streams application.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/query/package-tree.html b/static/41/javadoc/org/apache/kafka/streams/query/package-tree.html new file mode 100644 index 000000000..4a333d1a2 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/query/package-tree.html @@ -0,0 +1,106 @@ + + + + +org.apache.kafka.streams.query Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.streams.query

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    +
      +
    • org.apache.kafka.streams.query.Query<R>
    • +
    • org.apache.kafka.streams.query.QueryResult<R>
    • +
    +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/BuiltInDslStoreSuppliers.InMemoryDslStoreSuppliers.html b/static/41/javadoc/org/apache/kafka/streams/state/BuiltInDslStoreSuppliers.InMemoryDslStoreSuppliers.html new file mode 100644 index 000000000..cfbeca692 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/BuiltInDslStoreSuppliers.InMemoryDslStoreSuppliers.html @@ -0,0 +1,201 @@ + + + + +BuiltInDslStoreSuppliers.InMemoryDslStoreSuppliers (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class BuiltInDslStoreSuppliers.InMemoryDslStoreSuppliers

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.state.BuiltInDslStoreSuppliers.InMemoryDslStoreSuppliers
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Configurable, DslStoreSuppliers
    +
    +
    +
    Enclosing class:
    +
    BuiltInDslStoreSuppliers
    +
    +
    +
    public static class BuiltInDslStoreSuppliers.InMemoryDslStoreSuppliers +extends Object +implements DslStoreSuppliers
    +
    A DslStoreSuppliers that supplies all stores backed by an in-memory map
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers.html b/static/41/javadoc/org/apache/kafka/streams/state/BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers.html new file mode 100644 index 000000000..345bb9212 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers.html @@ -0,0 +1,201 @@ + + + + +BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.state.BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Configurable, DslStoreSuppliers
    +
    +
    +
    Enclosing class:
    +
    BuiltInDslStoreSuppliers
    +
    +
    +
    public static class BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers +extends Object +implements DslStoreSuppliers
    +
    A DslStoreSuppliers that supplies all stores backed by RocksDB
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/BuiltInDslStoreSuppliers.html b/static/41/javadoc/org/apache/kafka/streams/state/BuiltInDslStoreSuppliers.html new file mode 100644 index 000000000..2060274b0 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/BuiltInDslStoreSuppliers.html @@ -0,0 +1,189 @@ + + + + +BuiltInDslStoreSuppliers (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class BuiltInDslStoreSuppliers

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.state.BuiltInDslStoreSuppliers
    +
    +
    +
    +
    public class BuiltInDslStoreSuppliers +extends Object
    +
    Collection of builtin DslStoreSuppliers for Kafka Streams. Today we + support RocksDb and InMemory stores out of the box.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Field Details

      + +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        BuiltInDslStoreSuppliers

        +
        public BuiltInDslStoreSuppliers()
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/DslKeyValueParams.html b/static/41/javadoc/org/apache/kafka/streams/state/DslKeyValueParams.html new file mode 100644 index 000000000..1c5b6744f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/DslKeyValueParams.html @@ -0,0 +1,215 @@ + + + + +DslKeyValueParams (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DslKeyValueParams

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.state.DslKeyValueParams
    +
    +
    +
    +
    public class DslKeyValueParams +extends Object
    +
    DslKeyValueParams is a wrapper class for all parameters that function + as inputs to DslStoreSuppliers.keyValueStore(DslKeyValueParams).
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DslKeyValueParams

        +
        public DslKeyValueParams(String name, + boolean isTimestamped)
        +
        +
        Parameters:
        +
        name - the name of the store (cannot be null)
        +
        isTimestamped - whether the returned stores should be timestamped, see (TimestampedKeyValueStore
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        name

        +
        public String name()
        +
        +
      • +
      • +
        +

        isTimestamped

        +
        public boolean isTimestamped()
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/DslSessionParams.html b/static/41/javadoc/org/apache/kafka/streams/state/DslSessionParams.html new file mode 100644 index 000000000..61d8f498c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/DslSessionParams.html @@ -0,0 +1,229 @@ + + + + +DslSessionParams (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DslSessionParams

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.state.DslSessionParams
    +
    +
    +
    +
    public class DslSessionParams +extends Object
    +
    DslSessionParams is a wrapper class for all parameters that function + as inputs to DslStoreSuppliers.sessionStore(DslSessionParams).
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DslSessionParams

        +
        public DslSessionParams(String name, + Duration retentionPeriod, + EmitStrategy emitStrategy)
        +
        +
        Parameters:
        +
        name - name of the store (cannot be null)
        +
        retentionPeriod - length of time to retain data in the store (cannot be negative) + (note that the retention period must be at least as long enough to + contain the inactivity gap of the session and the entire grace period.)
        +
        emitStrategy - defines how to emit results
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        name

        +
        public String name()
        +
        +
      • +
      • +
        +

        retentionPeriod

        +
        public Duration retentionPeriod()
        +
        +
      • +
      • +
        +

        emitStrategy

        +
        public EmitStrategy emitStrategy()
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/DslStoreSuppliers.html b/static/41/javadoc/org/apache/kafka/streams/state/DslStoreSuppliers.html new file mode 100644 index 000000000..d2a7bb02b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/DslStoreSuppliers.html @@ -0,0 +1,192 @@ + + + + +DslStoreSuppliers (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface DslStoreSuppliers

    +
    +
    +
    +
    All Superinterfaces:
    +
    Configurable
    +
    +
    +
    All Known Implementing Classes:
    +
    BuiltInDslStoreSuppliers.InMemoryDslStoreSuppliers, BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers, Materialized.StoreType
    +
    +
    +
    public interface DslStoreSuppliers +extends Configurable
    +
    DslStoreSuppliers defines a grouping of factories to construct + stores for each of the types of state store implementations in Kafka + Streams. This allows configuration of a default store supplier beyond + the builtin defaults of RocksDB and In-Memory. + +

    There are various ways that this configuration can be supplied to + the application (in order of precedence): +

      +
    1. Passed in directly to a DSL operator via either + Materialized.as(DslStoreSuppliers), + Materialized.withStoreType(DslStoreSuppliers), or + StreamJoined.withDslStoreSuppliers(DslStoreSuppliers)
    2. + +
    3. Passed in via a Topology configuration override (configured in a + TopologyConfig and passed into the + StreamsBuilder(TopologyConfig) constructor
    4. + +
    5. Configured as a global default in StreamsConfig using + the StreamsConfig.DSL_STORE_SUPPLIERS_CLASS_CONFIG
    6. + configuration. +
    + +

    Kafka Streams is packaged with some pre-existing DslStoreSuppliers + that exist in BuiltInDslStoreSuppliers

    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/DslWindowParams.html b/static/41/javadoc/org/apache/kafka/streams/state/DslWindowParams.html new file mode 100644 index 000000000..1110dd058 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/DslWindowParams.html @@ -0,0 +1,279 @@ + + + + +DslWindowParams (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DslWindowParams

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.state.DslWindowParams
    +
    +
    +
    +
    public class DslWindowParams +extends Object
    +
    DslWindowParams is a wrapper class for all parameters that function + as inputs to DslStoreSuppliers.windowStore(DslWindowParams).
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DslWindowParams

        +
        public DslWindowParams(String name, + Duration retentionPeriod, + Duration windowSize, + boolean retainDuplicates, + EmitStrategy emitStrategy, + boolean isSlidingWindow, + boolean isTimestamped)
        +
        +
        Parameters:
        +
        name - name of the store (cannot be null)
        +
        retentionPeriod - length of time to retain data in the store (cannot be negative) + (note that the retention period must be at least long enough to contain the + windowed data's entire life cycle, from window-start through window-end, + and for the entire grace period)
        +
        windowSize - size of the windows (cannot be negative)
        +
        retainDuplicates - whether to retain duplicates. Turning this on will automatically disable + caching and means that null values will be ignored.
        +
        emitStrategy - defines how to emit results
        +
        isSlidingWindow - whether the requested store is a sliding window
        +
        isTimestamped - whether the requested store should be timestamped (see TimestampedWindowStore
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        name

        +
        public String name()
        +
        +
      • +
      • +
        +

        retentionPeriod

        +
        public Duration retentionPeriod()
        +
        +
      • +
      • +
        +

        windowSize

        +
        public Duration windowSize()
        +
        +
      • +
      • +
        +

        retainDuplicates

        +
        public boolean retainDuplicates()
        +
        +
      • +
      • +
        +

        emitStrategy

        +
        public EmitStrategy emitStrategy()
        +
        +
      • +
      • +
        +

        isSlidingWindow

        +
        public boolean isSlidingWindow()
        +
        +
      • +
      • +
        +

        isTimestamped

        +
        public boolean isTimestamped()
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/HostInfo.html b/static/41/javadoc/org/apache/kafka/streams/state/HostInfo.html new file mode 100644 index 000000000..6c5e98fa3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/HostInfo.html @@ -0,0 +1,247 @@ + + + + +HostInfo (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class HostInfo

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.state.HostInfo
    +
    +
    +
    +
    public class HostInfo +extends Object
    +
    Represents a user defined endpoint in a KafkaStreams application. + Instances of this class can be obtained by calling one of: + KafkaStreams.metadataForAllStreamsClients() + KafkaStreams.streamsMetadataForStore(String) + + The HostInfo is constructed during Partition Assignment + see StreamsPartitionAssignor + It is extracted from the config StreamsConfig.APPLICATION_SERVER_CONFIG + + If developers wish to expose an endpoint in their KafkaStreams applications they should provide the above + config.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        HostInfo

        +
        public HostInfo(String host, + int port)
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        buildFromEndpoint

        +
        public static HostInfo buildFromEndpoint(String endPoint)
        +
        +
        Returns:
        +
        a new HostInfo or null if endPoint is null or has no characters
        +
        Throws:
        +
        ConfigException - if the host or port cannot be parsed from the given endpoint string
        +
        +
        +
      • +
      • +
        +

        unavailable

        +
        public static HostInfo unavailable()
        +
        +
        Returns:
        +
        a sentinel for cases where the host metadata is currently unavailable, eg during rebalance operations.
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      • +
        +

        host

        +
        public String host()
        +
        +
      • +
      • +
        +

        port

        +
        public int port()
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/KeyValueBytesStoreSupplier.html b/static/41/javadoc/org/apache/kafka/streams/state/KeyValueBytesStoreSupplier.html new file mode 100644 index 000000000..979eb8ed8 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/KeyValueBytesStoreSupplier.html @@ -0,0 +1,109 @@ + + + + +KeyValueBytesStoreSupplier (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface KeyValueBytesStoreSupplier

    +
    +
    +
    +
    All Superinterfaces:
    +
    StoreSupplier<KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>>
    +
    +
    +
    All Known Subinterfaces:
    +
    VersionedBytesStoreSupplier
    +
    +
    +
    public interface KeyValueBytesStoreSupplier +extends StoreSupplier<KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>>
    +
    A store supplier that can be used to create one or more KeyValueStore<Bytes, byte[]> instances of type <Bytes, byte[]>. + + For any stores implementing the KeyValueStore<Bytes, byte[]> interface, null value bytes are considered as "not exist". This means: + +
      +
    1. Null value bytes in put operations should be treated as delete.
    2. +
    3. If the key does not exist, get operations should return null value bytes.
    4. +
    +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/KeyValueIterator.html b/static/41/javadoc/org/apache/kafka/streams/state/KeyValueIterator.html new file mode 100644 index 000000000..d23b3386d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/KeyValueIterator.html @@ -0,0 +1,168 @@ + + + + +KeyValueIterator (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface KeyValueIterator<K,V>

    +
    +
    +
    +
    Type Parameters:
    +
    K - Type of keys
    +
    V - Type of values
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Closeable, Iterator<KeyValue<K,V>>
    +
    +
    +
    All Known Subinterfaces:
    +
    WindowStoreIterator<V>
    +
    +
    +
    public interface KeyValueIterator<K,V> +extends Iterator<KeyValue<K,V>>, Closeable
    +
    Iterator interface of KeyValue. +

    + Users must call its close method explicitly upon completeness to release resources, + or use try-with-resources statement (available since JDK7) for this Closeable class. + Note that remove() is not supported.

    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
       
      + + +
      +
      Peek at the next key without advancing the iterator
      +
      +
      +
      +
      +
      +

      Methods inherited from interface java.util.Iterator

      +forEachRemaining, hasNext, next, remove
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        close

        +
        void close()
        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        +
        +
      • +
      • +
        +

        peekNextKey

        +
        K peekNextKey()
        +
        Peek at the next key without advancing the iterator
        +
        +
        Returns:
        +
        the key of the next value that would be returned from the next call to next
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/KeyValueStore.html b/static/41/javadoc/org/apache/kafka/streams/state/KeyValueStore.html new file mode 100644 index 000000000..78af6adef --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/KeyValueStore.html @@ -0,0 +1,225 @@ + + + + +KeyValueStore (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface KeyValueStore<K,V>

    +
    +
    +
    +
    Type Parameters:
    +
    K - The key type
    +
    V - The value type
    +
    +
    +
    All Superinterfaces:
    +
    ReadOnlyKeyValueStore<K,V>, StateStore
    +
    +
    +
    All Known Subinterfaces:
    +
    TimestampedKeyValueStore<K,V>, VersionedBytesStore
    +
    +
    +
    public interface KeyValueStore<K,V> +extends StateStore, ReadOnlyKeyValueStore<K,V>
    +
    A key-value store that supports put/get/delete and range queries.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        put

        +
        void put(K key, + V value)
        +
        Update the value associated with this key.
        +
        +
        Parameters:
        +
        key - The key to associate the value to
        +
        value - The value to update, it can be null; + if the serialized bytes are also null it is interpreted as deletes
        +
        Throws:
        +
        NullPointerException - If null is used for key.
        +
        InvalidStateStoreException - if the store is not initialized
        +
        +
        +
      • +
      • +
        +

        putIfAbsent

        +
        V putIfAbsent(K key, + V value)
        +
        Update the value associated with this key, unless a value is already associated with the key.
        +
        +
        Parameters:
        +
        key - The key to associate the value to
        +
        value - The value to update, it can be null; + if the serialized bytes are also null it is interpreted as deletes
        +
        Returns:
        +
        The old value or null if there is no such key.
        +
        Throws:
        +
        NullPointerException - If null is used for key.
        +
        InvalidStateStoreException - if the store is not initialized
        +
        +
        +
      • +
      • +
        +

        putAll

        +
        void putAll(List<KeyValue<K,V>> entries)
        +
        Update all the given key/value pairs.
        +
        +
        Parameters:
        +
        entries - A list of entries to put into the store; + if the serialized bytes are also null it is interpreted as deletes
        +
        Throws:
        +
        NullPointerException - If null is used for key.
        +
        InvalidStateStoreException - if the store is not initialized
        +
        +
        +
      • +
      • +
        +

        delete

        +
        V delete(K key)
        +
        Delete the value from the store (if there is one).
        +
        +
        Parameters:
        +
        key - The key
        +
        Returns:
        +
        The old value or null if there is no such key.
        +
        Throws:
        +
        NullPointerException - If null is used for key.
        +
        InvalidStateStoreException - if the store is not initialized
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/QueryableStoreType.html b/static/41/javadoc/org/apache/kafka/streams/state/QueryableStoreType.html new file mode 100644 index 000000000..3f0dea0c6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/QueryableStoreType.html @@ -0,0 +1,176 @@ + + + + +QueryableStoreType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface QueryableStoreType<T>

    +
    +
    +
    +
    Type Parameters:
    +
    T - The store type
    +
    +
    +
    All Known Implementing Classes:
    +
    QueryableStoreTypes.KeyValueStoreType, QueryableStoreTypes.SessionStoreType, QueryableStoreTypes.WindowStoreType
    +
    +
    +
    public interface QueryableStoreType<T>
    +
    Used to enable querying of custom StateStore types via the KafkaStreams API.
    +
    +
    See Also:
    +
    + +
    +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      boolean
      +
      accepts(StateStore stateStore)
      +
      +
      Called when searching for StateStores to see if they + match the type expected by implementors of this interface.
      +
      + +
      create(org.apache.kafka.streams.state.internals.StateStoreProvider storeProvider, + String storeName)
      +
      +
      Create an instance of T (usually a facade) that developers can use + to query the underlying StateStores.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        accepts

        +
        boolean accepts(StateStore stateStore)
        +
        Called when searching for StateStores to see if they + match the type expected by implementors of this interface.
        +
        +
        Parameters:
        +
        stateStore - The stateStore
        +
        Returns:
        +
        true if it is a match
        +
        +
        +
      • +
      • +
        +

        create

        +
        T create(org.apache.kafka.streams.state.internals.StateStoreProvider storeProvider, + String storeName)
        +
        Create an instance of T (usually a facade) that developers can use + to query the underlying StateStores.
        +
        +
        Parameters:
        +
        storeProvider - provides access to all the underlying StateStore instances
        +
        storeName - The name of the Store
        +
        Returns:
        +
        a read-only interface over a StateStore + (cf. QueryableStoreTypes.KeyValueStoreType)
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/QueryableStoreTypes.KeyValueStoreType.html b/static/41/javadoc/org/apache/kafka/streams/state/QueryableStoreTypes.KeyValueStoreType.html new file mode 100644 index 000000000..894aa016a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/QueryableStoreTypes.KeyValueStoreType.html @@ -0,0 +1,178 @@ + + + + +QueryableStoreTypes.KeyValueStoreType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class QueryableStoreTypes.KeyValueStoreType<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.state.QueryableStoreTypes.KeyValueStoreType<K,V>
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    QueryableStoreType<ReadOnlyKeyValueStore<K,V>>
    +
    +
    +
    Enclosing class:
    +
    QueryableStoreTypes
    +
    +
    +
    public static class QueryableStoreTypes.KeyValueStoreType<K,V> +extends Object
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        create

        +
        public ReadOnlyKeyValueStore<K,V> create(org.apache.kafka.streams.state.internals.StateStoreProvider storeProvider, + String storeName)
        +
        Description copied from interface: QueryableStoreType
        +
        Create an instance of T (usually a facade) that developers can use + to query the underlying StateStores.
        +
        +
        Parameters:
        +
        storeProvider - provides access to all the underlying StateStore instances
        +
        storeName - The name of the Store
        +
        Returns:
        +
        a read-only interface over a StateStore + (cf. QueryableStoreTypes.KeyValueStoreType)
        +
        +
        +
      • +
      • +
        +

        accepts

        +
        public boolean accepts(StateStore stateStore)
        +
        Description copied from interface: QueryableStoreType
        +
        Called when searching for StateStores to see if they + match the type expected by implementors of this interface.
        +
        +
        Specified by:
        +
        accepts in interface QueryableStoreType<T>
        +
        Parameters:
        +
        stateStore - The stateStore
        +
        Returns:
        +
        true if it is a match
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/QueryableStoreTypes.SessionStoreType.html b/static/41/javadoc/org/apache/kafka/streams/state/QueryableStoreTypes.SessionStoreType.html new file mode 100644 index 000000000..856b7f170 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/QueryableStoreTypes.SessionStoreType.html @@ -0,0 +1,178 @@ + + + + +QueryableStoreTypes.SessionStoreType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class QueryableStoreTypes.SessionStoreType<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.state.QueryableStoreTypes.SessionStoreType<K,V>
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    QueryableStoreType<ReadOnlySessionStore<K,V>>
    +
    +
    +
    Enclosing class:
    +
    QueryableStoreTypes
    +
    +
    +
    public static class QueryableStoreTypes.SessionStoreType<K,V> +extends Object
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        create

        +
        public ReadOnlySessionStore<K,V> create(org.apache.kafka.streams.state.internals.StateStoreProvider storeProvider, + String storeName)
        +
        Description copied from interface: QueryableStoreType
        +
        Create an instance of T (usually a facade) that developers can use + to query the underlying StateStores.
        +
        +
        Parameters:
        +
        storeProvider - provides access to all the underlying StateStore instances
        +
        storeName - The name of the Store
        +
        Returns:
        +
        a read-only interface over a StateStore + (cf. QueryableStoreTypes.KeyValueStoreType)
        +
        +
        +
      • +
      • +
        +

        accepts

        +
        public boolean accepts(StateStore stateStore)
        +
        Description copied from interface: QueryableStoreType
        +
        Called when searching for StateStores to see if they + match the type expected by implementors of this interface.
        +
        +
        Specified by:
        +
        accepts in interface QueryableStoreType<T>
        +
        Parameters:
        +
        stateStore - The stateStore
        +
        Returns:
        +
        true if it is a match
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/QueryableStoreTypes.WindowStoreType.html b/static/41/javadoc/org/apache/kafka/streams/state/QueryableStoreTypes.WindowStoreType.html new file mode 100644 index 000000000..6b284b42b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/QueryableStoreTypes.WindowStoreType.html @@ -0,0 +1,178 @@ + + + + +QueryableStoreTypes.WindowStoreType (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class QueryableStoreTypes.WindowStoreType<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.state.QueryableStoreTypes.WindowStoreType<K,V>
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    QueryableStoreType<ReadOnlyWindowStore<K,V>>
    +
    +
    +
    Enclosing class:
    +
    QueryableStoreTypes
    +
    +
    +
    public static class QueryableStoreTypes.WindowStoreType<K,V> +extends Object
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        create

        +
        public ReadOnlyWindowStore<K,V> create(org.apache.kafka.streams.state.internals.StateStoreProvider storeProvider, + String storeName)
        +
        Description copied from interface: QueryableStoreType
        +
        Create an instance of T (usually a facade) that developers can use + to query the underlying StateStores.
        +
        +
        Parameters:
        +
        storeProvider - provides access to all the underlying StateStore instances
        +
        storeName - The name of the Store
        +
        Returns:
        +
        a read-only interface over a StateStore + (cf. QueryableStoreTypes.KeyValueStoreType)
        +
        +
        +
      • +
      • +
        +

        accepts

        +
        public boolean accepts(StateStore stateStore)
        +
        Description copied from interface: QueryableStoreType
        +
        Called when searching for StateStores to see if they + match the type expected by implementors of this interface.
        +
        +
        Specified by:
        +
        accepts in interface QueryableStoreType<T>
        +
        Parameters:
        +
        stateStore - The stateStore
        +
        Returns:
        +
        true if it is a match
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/QueryableStoreTypes.html b/static/41/javadoc/org/apache/kafka/streams/state/QueryableStoreTypes.html new file mode 100644 index 000000000..e2e6ce65f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/QueryableStoreTypes.html @@ -0,0 +1,283 @@ + + + + +QueryableStoreTypes (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class QueryableStoreTypes

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.state.QueryableStoreTypes
    +
    +
    +
    +
    public final class QueryableStoreTypes +extends Object
    +
    Provides access to the QueryableStoreTypes provided with KafkaStreams. + These can be used with KafkaStreams.store(StoreQueryParameters). + To access and query the StateStores that are part of a Topology.
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/ReadOnlyKeyValueStore.html b/static/41/javadoc/org/apache/kafka/streams/state/ReadOnlyKeyValueStore.html new file mode 100644 index 000000000..9488044f4 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/ReadOnlyKeyValueStore.html @@ -0,0 +1,310 @@ + + + + +ReadOnlyKeyValueStore (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ReadOnlyKeyValueStore<K,V>

    +
    +
    +
    +
    Type Parameters:
    +
    K - the key type
    +
    V - the value type
    +
    +
    +
    All Known Subinterfaces:
    +
    KeyValueStore<K,V>, TimestampedKeyValueStore<K,V>, VersionedBytesStore
    +
    +
    +
    public interface ReadOnlyKeyValueStore<K,V>
    +
    A key-value store that only supports read operations. + Implementations should be thread-safe as concurrent reads and writes are expected. +

    + Please note that this contract defines the thread-safe read functionality only; it does not + guarantee anything about whether the actual instance is writable by another thread, or + whether it uses some locking mechanism under the hood. For this reason, making dependencies + between the read and write operations on different StateStore instances can cause concurrency + problems like deadlock.

    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      all()
      +
      +
      Return an iterator over all keys in this store.
      +
      +
      long
      + +
      +
      Return an approximate count of key-value mappings in this store.
      +
      + +
      get(K key)
      +
      +
      Get the value corresponding to this key.
      +
      +
      default <PS extends Serializer<P>, +P>
      KeyValueIterator<K,V>
      +
      prefixScan(P prefix, + PS prefixKeySerializer)
      +
      +
      Return an iterator over all keys with the specified prefix.
      +
      + +
      range(K from, + K to)
      +
      +
      Get an iterator over a given range of keys.
      +
      + + +
      +
      Return a reverse iterator over all keys in this store.
      +
      + +
      reverseRange(K from, + K to)
      +
      +
      Get a reverse iterator over a given range of keys.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        get

        +
        V get(K key)
        +
        Get the value corresponding to this key.
        +
        +
        Parameters:
        +
        key - The key to fetch
        +
        Returns:
        +
        The value or null if no value is found.
        +
        Throws:
        +
        NullPointerException - If null is used for key.
        +
        InvalidStateStoreException - if the store is not initialized
        +
        +
        +
      • +
      • +
        +

        range

        +
        KeyValueIterator<K,V> range(K from, + K to)
        +
        Get an iterator over a given range of keys. This iterator must be closed after use. + The returned iterator must be safe from ConcurrentModificationExceptions + and must not return null values. + Order is based on the serialized byte[] of the keys, not the 'logical' key order.
        +
        +
        Parameters:
        +
        from - The first key that could be in the range, where iteration starts from. + A null value indicates that the range starts with the first element in the store.
        +
        to - The last key that could be in the range, where iteration ends. + A null value indicates that the range ends with the last element in the store.
        +
        Returns:
        +
        The iterator for this range, from key with the smallest bytes to the key with the largest bytes of keys.
        +
        Throws:
        +
        InvalidStateStoreException - if the store is not initialized
        +
        +
        +
      • +
      • +
        +

        reverseRange

        +
        default KeyValueIterator<K,V> reverseRange(K from, + K to)
        +
        Get a reverse iterator over a given range of keys. This iterator must be closed after use. + The returned iterator must be safe from ConcurrentModificationExceptions + and must not return null values. + Order is based on the serialized byte[] of the keys, not the 'logical' key order.
        +
        +
        Parameters:
        +
        from - The first key that could be in the range, where iteration ends. + A null value indicates that the range starts with the first element in the store.
        +
        to - The last key that could be in the range, where iteration starts from. + A null value indicates that the range ends with the last element in the store.
        +
        Returns:
        +
        The iterator for this range, from key with the smallest bytes to the key with the largest bytes of keys.
        +
        Throws:
        +
        InvalidStateStoreException - if the store is not initialized
        +
        +
        +
      • +
      • +
        +

        all

        + +
        Return an iterator over all keys in this store. This iterator must be closed after use. + The returned iterator must be safe from ConcurrentModificationExceptions + and must not return null values. + Order is not guaranteed as bytes lexicographical ordering might not represent key order.
        +
        +
        Returns:
        +
        An iterator of all key/value pairs in the store, from smallest to largest bytes.
        +
        Throws:
        +
        InvalidStateStoreException - if the store is not initialized
        +
        +
        +
      • +
      • +
        +

        reverseAll

        +
        default KeyValueIterator<K,V> reverseAll()
        +
        Return a reverse iterator over all keys in this store. This iterator must be closed after use. + The returned iterator must be safe from ConcurrentModificationExceptions + and must not return null values. + Order is not guaranteed as bytes lexicographical ordering might not represent key order.
        +
        +
        Returns:
        +
        A reverse iterator of all key/value pairs in the store, from largest to smallest key bytes.
        +
        Throws:
        +
        InvalidStateStoreException - if the store is not initialized
        +
        +
        +
      • +
      • +
        +

        prefixScan

        +
        default <PS extends Serializer<P>, +P> +KeyValueIterator<K,V> prefixScan(P prefix, + PS prefixKeySerializer)
        +
        Return an iterator over all keys with the specified prefix. + Since the type of the prefix can be different from that of the key, a serializer to convert the + prefix into the format in which the keys are stored in the stores needs to be passed to this method. + The returned iterator must be safe from ConcurrentModificationExceptions + and must not return null values. + Since prefixScan() relies on byte lexicographical ordering and not on the ordering of the key type, results for some types might be unexpected. + For example, if the key type is Integer, and the store contains keys [1, 2, 11, 13], + then running store.prefixScan(1, new IntegerSerializer()) will return [1] and not [1,11,13]. + In contrast, if the key type is String the keys will be sorted [1, 11, 13, 2] in the store and store.prefixScan(1, new StringSerializer()) will return [1,11,13]. + In both cases prefixScan() starts the scan at 1 and stops at 2.
        +
        +
        Type Parameters:
        +
        PS - Prefix Serializer type
        +
        P - Prefix Type.
        +
        Parameters:
        +
        prefix - The prefix.
        +
        prefixKeySerializer - Serializer for the Prefix key type
        +
        Returns:
        +
        The iterator for keys having the specified prefix.
        +
        Throws:
        +
        InvalidStateStoreException - if the store is not initialized
        +
        +
        +
      • +
      • +
        +

        approximateNumEntries

        +
        long approximateNumEntries()
        +
        Return an approximate count of key-value mappings in this store. +

        + The count is not guaranteed to be exact in order to accommodate stores + where an exact count is expensive to calculate.

        +
        +
        Returns:
        +
        an approximate count of key-value mappings in the store.
        +
        Throws:
        +
        InvalidStateStoreException - if the store is not initialized
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/ReadOnlySessionStore.html b/static/41/javadoc/org/apache/kafka/streams/state/ReadOnlySessionStore.html new file mode 100644 index 000000000..37ca536b3 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/ReadOnlySessionStore.html @@ -0,0 +1,666 @@ + + + + +ReadOnlySessionStore (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ReadOnlySessionStore<K,AGG>

    +
    +
    +
    +
    Type Parameters:
    +
    K - the key type
    +
    AGG - the aggregated value type
    +
    +
    +
    All Known Subinterfaces:
    +
    SessionStore<K,AGG>
    +
    +
    +
    public interface ReadOnlySessionStore<K,AGG>
    +
    A session store that only supports read operations. Implementations should be thread-safe as + concurrent reads and writes are expected.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Retrieve all aggregated sessions for the provided key.
      +
      + +
      backwardFetch(K keyFrom, + K keyTo)
      +
      +
      Retrieve all aggregated sessions for the given range of keys.
      +
      + +
      backwardFindSessions(K key, + long earliestSessionEndTime, + long latestSessionStartTime)
      +
      +
      Fetch any sessions with the matching key and the sessions end is ≥ earliestSessionEndTime + and the sessions start is ≤ latestSessionStartTime iterating from latest to earliest.
      +
      + +
      backwardFindSessions(K key, + Instant earliestSessionEndTime, + Instant latestSessionStartTime)
      +
      +
      Fetch any sessions with the matching key and the sessions end is ≥ earliestSessionEndTime + and the sessions start is ≤ latestSessionStartTime iterating from latest to earliest.
      +
      + +
      backwardFindSessions(K keyFrom, + K keyTo, + long earliestSessionEndTime, + long latestSessionStartTime)
      +
      +
      Fetch any sessions in the given range of keys and the sessions end is ≥ + earliestSessionEndTime and the sessions start is ≤ latestSessionStartTime iterating from + latest to earliest.
      +
      + +
      backwardFindSessions(K keyFrom, + K keyTo, + Instant earliestSessionEndTime, + Instant latestSessionStartTime)
      +
      +
      Fetch any sessions in the given range of keys and the sessions end is ≥ + earliestSessionEndTime and the sessions start is ≤ latestSessionStartTime iterating from + latest to earliest.
      +
      + +
      fetch(K key)
      +
      +
      Retrieve all aggregated sessions for the provided key.
      +
      + +
      fetch(K keyFrom, + K keyTo)
      +
      +
      Retrieve all aggregated sessions for the given range of keys.
      +
      +
      default AGG
      +
      fetchSession(K key, + long sessionStartTime, + long sessionEndTime)
      +
      +
      Get the value of key from a single session.
      +
      +
      default AGG
      +
      fetchSession(K key, + Instant sessionStartTime, + Instant sessionEndTime)
      +
      +
      Get the value of key from a single session.
      +
      + +
      findSessions(K key, + long earliestSessionEndTime, + long latestSessionStartTime)
      +
      +
      Fetch any sessions with the matching key and the sessions end is ≥ earliestSessionEndTime + and the sessions start is ≤ latestSessionStartTime iterating from earliest to latest.
      +
      + +
      findSessions(K key, + Instant earliestSessionEndTime, + Instant latestSessionStartTime)
      +
      +
      Fetch any sessions with the matching key and the sessions end is ≥ earliestSessionEndTime + and the sessions start is ≤ latestSessionStartTime iterating from earliest to latest.
      +
      + +
      findSessions(K keyFrom, + K keyTo, + long earliestSessionEndTime, + long latestSessionStartTime)
      +
      +
      Fetch any sessions in the given range of keys and the sessions end is ≥ + earliestSessionEndTime and the sessions start is ≤ latestSessionStartTime iterating from + earliest to latest.
      +
      + +
      findSessions(K keyFrom, + K keyTo, + Instant earliestSessionEndTime, + Instant latestSessionStartTime)
      +
      +
      Fetch any sessions in the given range of keys and the sessions end is ≥ + earliestSessionEndTime and the sessions start is ≤ latestSessionStartTime iterating from + earliest to latest.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        findSessions

        +
        default KeyValueIterator<Windowed<K>,AGG> findSessions(K key, + long earliestSessionEndTime, + long latestSessionStartTime)
        +
        Fetch any sessions with the matching key and the sessions end is ≥ earliestSessionEndTime + and the sessions start is ≤ latestSessionStartTime iterating from earliest to latest. + I.e., earliestSessionEndTime is the lower bound of the search interval and latestSessionStartTime + is the upper bound of the search interval, and the method returns all sessions that overlap + with the search interval. + Thus, if a session ends before earliestSessionEndTime, or starts after latestSessionStartTime + it won't be contained in the result: +
        
        + earliestSessionEndTime: ESET
        + latestSessionStartTime: LSST
        +
        +                       [ESET............LSST]
        + [not-included] [included]   [included]   [included] [not-included]
        + 
        +

        + This iterator must be closed after use.

        +
        +
        Parameters:
        +
        key - the key to return sessions for
        +
        earliestSessionEndTime - the end timestamp of the earliest session to search for, where + iteration starts.
        +
        latestSessionStartTime - the end timestamp of the latest session to search for, where + iteration ends.
        +
        Returns:
        +
        iterator of sessions with the matching key and aggregated values, from earliest to + latest session time.
        +
        Throws:
        +
        NullPointerException - If null is used for key.
        +
        +
        +
      • +
      • +
        +

        findSessions

        +
        default KeyValueIterator<Windowed<K>,AGG> findSessions(K key, + Instant earliestSessionEndTime, + Instant latestSessionStartTime)
        +
        Fetch any sessions with the matching key and the sessions end is ≥ earliestSessionEndTime + and the sessions start is ≤ latestSessionStartTime iterating from earliest to latest. + I.e., earliestSessionEndTime is the lower bound of the search interval and latestSessionStartTime + is the upper bound of the search interval, and the method returns all sessions that overlap + with the search interval. + Thus, if a session ends before earliestSessionEndTime, or starts after latestSessionStartTime + it won't be contained in the result: +
        
        + earliestSessionEndTime: ESET
        + latestSessionStartTime: LSST
        +
        +                       [ESET............LSST]
        + [not-included] [included]   [included]   [included] [not-included]
        + 
        +

        + This iterator must be closed after use.

        +
        +
        Parameters:
        +
        key - the key to return sessions for
        +
        earliestSessionEndTime - the end timestamp of the earliest session to search for, where + iteration starts.
        +
        latestSessionStartTime - the end timestamp of the latest session to search for, where + iteration ends.
        +
        Returns:
        +
        iterator of sessions with the matching key and aggregated values, from earliest to + latest session time.
        +
        Throws:
        +
        NullPointerException - If null is used for key.
        +
        +
        +
      • +
      • +
        +

        backwardFindSessions

        +
        default KeyValueIterator<Windowed<K>,AGG> backwardFindSessions(K key, + long earliestSessionEndTime, + long latestSessionStartTime)
        +
        Fetch any sessions with the matching key and the sessions end is ≥ earliestSessionEndTime + and the sessions start is ≤ latestSessionStartTime iterating from latest to earliest. + I.e., earliestSessionEndTime is the lower bound of the search interval and latestSessionStartTime + is the upper bound of the search interval, and the method returns all sessions that overlap + with the search interval. + Thus, if a session ends before earliestSessionEndTime, or starts after latestSessionStartTime + it won't be contained in the result: +
        
        + earliestSessionEndTime: ESET
        + latestSessionStartTime: LSST
        +
        +                       [ESET............LSST]
        + [not-included] [included]   [included]   [included] [not-included]
        + 
        +

        + This iterator must be closed after use.

        +
        +
        Parameters:
        +
        key - the key to return sessions for
        +
        earliestSessionEndTime - the end timestamp of the earliest session to search for, where + iteration ends.
        +
        latestSessionStartTime - the end timestamp of the latest session to search for, where + iteration starts.
        +
        Returns:
        +
        backward iterator of sessions with the matching key and aggregated values, from + latest to earliest session time.
        +
        Throws:
        +
        NullPointerException - If null is used for key.
        +
        +
        +
      • +
      • +
        +

        backwardFindSessions

        +
        default KeyValueIterator<Windowed<K>,AGG> backwardFindSessions(K key, + Instant earliestSessionEndTime, + Instant latestSessionStartTime)
        +
        Fetch any sessions with the matching key and the sessions end is ≥ earliestSessionEndTime + and the sessions start is ≤ latestSessionStartTime iterating from latest to earliest. + I.e., earliestSessionEndTime is the lower bound of the search interval and latestSessionStartTime + is the upper bound of the search interval, and the method returns all sessions that overlap + with the search interval. + Thus, if a session ends before earliestSessionEndTime, or starts after latestSessionStartTime + it won't be contained in the result: +
        
        + earliestSessionEndTime: ESET
        + latestSessionStartTime: LSST
        +
        +                       [ESET............LSST]
        + [not-included] [included]   [included]   [included] [not-included]
        + 
        +

        + This iterator must be closed after use.

        +
        +
        Parameters:
        +
        key - the key to return sessions for
        +
        earliestSessionEndTime - the end timestamp of the earliest session to search for, where + iteration ends.
        +
        latestSessionStartTime - the end timestamp of the latest session to search for, where + iteration starts.
        +
        Returns:
        +
        backward iterator of sessions with the matching key and aggregated values, from + latest to earliest session time.
        +
        Throws:
        +
        NullPointerException - If null is used for key.
        +
        +
        +
      • +
      • +
        +

        findSessions

        +
        default KeyValueIterator<Windowed<K>,AGG> findSessions(K keyFrom, + K keyTo, + long earliestSessionEndTime, + long latestSessionStartTime)
        +
        Fetch any sessions in the given range of keys and the sessions end is ≥ + earliestSessionEndTime and the sessions start is ≤ latestSessionStartTime iterating from + earliest to latest. + I.e., earliestSessionEndTime is the lower bound of the search interval and latestSessionStartTime + is the upper bound of the search interval, and the method returns all sessions that overlap + with the search interval. + Thus, if a session ends before earliestSessionEndTime, or starts after latestSessionStartTime + it won't be contained in the result: +
        
        + earliestSessionEndTime: ESET
        + latestSessionStartTime: LSST
        +
        +                       [ESET............LSST]
        + [not-included] [included]   [included]   [included] [not-included]
        + 
        +

        + This iterator must be closed after use.

        +
        +
        Parameters:
        +
        keyFrom - The first key that could be in the range + A null value indicates a starting position from the first element in the store.
        +
        keyTo - The last key that could be in the range + A null value indicates that the range ends with the last element in the store.
        +
        earliestSessionEndTime - the end timestamp of the earliest session to search for, where + iteration starts.
        +
        latestSessionStartTime - the end timestamp of the latest session to search for, where + iteration ends.
        +
        Returns:
        +
        iterator of sessions with the matching keys and aggregated values, from earliest to + latest session time.
        +
        +
        +
      • +
      • +
        +

        findSessions

        +
        default KeyValueIterator<Windowed<K>,AGG> findSessions(K keyFrom, + K keyTo, + Instant earliestSessionEndTime, + Instant latestSessionStartTime)
        +
        Fetch any sessions in the given range of keys and the sessions end is ≥ + earliestSessionEndTime and the sessions start is ≤ latestSessionStartTime iterating from + earliest to latest. + I.e., earliestSessionEndTime is the lower bound of the search interval and latestSessionStartTime + is the upper bound of the search interval, and the method returns all sessions that overlap + with the search interval. + Thus, if a session ends before earliestSessionEndTime, or starts after latestSessionStartTime + it won't be contained in the result: +
        
        + earliestSessionEndTime: ESET
        + latestSessionStartTime: LSST
        +
        +                       [ESET............LSST]
        + [not-included] [included]   [included]   [included] [not-included]
        + 
        +

        + This iterator must be closed after use.

        +
        +
        Parameters:
        +
        keyFrom - The first key that could be in the range + A null value indicates a starting position from the first element in the store.
        +
        keyTo - The last key that could be in the range + A null value indicates that the range ends with the last element in the store.
        +
        earliestSessionEndTime - the end timestamp of the earliest session to search for, where + iteration starts.
        +
        latestSessionStartTime - the end timestamp of the latest session to search for, where + iteration ends.
        +
        Returns:
        +
        iterator of sessions with the matching keys and aggregated values, from earliest to + latest session time.
        +
        +
        +
      • +
      • +
        +

        backwardFindSessions

        +
        default KeyValueIterator<Windowed<K>,AGG> backwardFindSessions(K keyFrom, + K keyTo, + long earliestSessionEndTime, + long latestSessionStartTime)
        +
        Fetch any sessions in the given range of keys and the sessions end is ≥ + earliestSessionEndTime and the sessions start is ≤ latestSessionStartTime iterating from + latest to earliest. + I.e., earliestSessionEndTime is the lower bound of the search interval and latestSessionStartTime + is the upper bound of the search interval, and the method returns all sessions that overlap + with the search interval. + Thus, if a session ends before earliestSessionEndTime, or starts after latestSessionStartTime + it won't be contained in the result: +
        
        + earliestSessionEndTime: ESET
        + latestSessionStartTime: LSST
        +
        +                       [ESET............LSST]
        + [not-included] [included]   [included]   [included] [not-included]
        + 
        +

        + This iterator must be closed after use.

        +
        +
        Parameters:
        +
        keyFrom - The first key that could be in the range + A null value indicates a starting position from the first element in the store.
        +
        keyTo - The last key that could be in the range + A null value indicates that the range ends with the last element in the store.
        +
        earliestSessionEndTime - the end timestamp of the earliest session to search for, where + iteration ends.
        +
        latestSessionStartTime - the end timestamp of the latest session to search for, where + iteration starts.
        +
        Returns:
        +
        backward iterator of sessions with the matching keys and aggregated values, from + latest to earliest session time.
        +
        +
        +
      • +
      • +
        +

        backwardFindSessions

        +
        default KeyValueIterator<Windowed<K>,AGG> backwardFindSessions(K keyFrom, + K keyTo, + Instant earliestSessionEndTime, + Instant latestSessionStartTime)
        +
        Fetch any sessions in the given range of keys and the sessions end is ≥ + earliestSessionEndTime and the sessions start is ≤ latestSessionStartTime iterating from + latest to earliest. + I.e., earliestSessionEndTime is the lower bound of the search interval and latestSessionStartTime + is the upper bound of the search interval, and the method returns all sessions that overlap + with the search interval. + Thus, if a session ends before earliestSessionEndTime, or starts after latestSessionStartTime + it won't be contained in the result: +
        
        + earliestSessionEndTime: ESET
        + latestSessionStartTime: LSST
        +
        +                       [ESET............LSST]
        + [not-included] [included]   [included]   [included] [not-included]
        + 
        +

        + This iterator must be closed after use.

        +
        +
        Parameters:
        +
        keyFrom - The first key that could be in the range + A null value indicates a starting position from the first element in the store.
        +
        keyTo - The last key that could be in the range + A null value indicates that the range ends with the last element in the store.
        +
        earliestSessionEndTime - the end timestamp of the earliest session to search for, where + iteration ends.
        +
        latestSessionStartTime - the end timestamp of the latest session to search for, where + iteration starts.
        +
        Returns:
        +
        backward iterator of sessions with the matching keys and aggregated values, from + latest to earliest session time.
        +
        +
        +
      • +
      • +
        +

        fetchSession

        +
        default AGG fetchSession(K key, + long sessionStartTime, + long sessionEndTime)
        +
        Get the value of key from a single session.
        +
        +
        Parameters:
        +
        key - the key to fetch
        +
        sessionStartTime - start timestamp of the session
        +
        sessionEndTime - end timestamp of the session
        +
        Returns:
        +
        The value or null if no session with the exact start and end timestamp exists + for the given key
        +
        Throws:
        +
        NullPointerException - If null is used for any key.
        +
        +
        +
      • +
      • +
        +

        fetchSession

        +
        default AGG fetchSession(K key, + Instant sessionStartTime, + Instant sessionEndTime)
        +
        Get the value of key from a single session.
        +
        +
        Parameters:
        +
        key - the key to fetch
        +
        sessionStartTime - start timestamp of the session
        +
        sessionEndTime - end timestamp of the session
        +
        Returns:
        +
        The value or null if no session with the exact start and end timestamp exists + for the given key
        +
        Throws:
        +
        NullPointerException - If null is used for any key.
        +
        +
        +
      • +
      • +
        +

        fetch

        + +
        Retrieve all aggregated sessions for the provided key. This iterator must be closed after + use. +

        + For each key, the iterator guarantees ordering of sessions, starting from the oldest/earliest + available session to the newest/latest session.

        +
        +
        Parameters:
        +
        key - record key to find aggregated session values for
        +
        Returns:
        +
        KeyValueIterator containing all sessions for the provided key, from oldest to newest + session.
        +
        Throws:
        +
        NullPointerException - If null is used for key.
        +
        +
        +
      • +
      • +
        +

        backwardFetch

        +
        default KeyValueIterator<Windowed<K>,AGG> backwardFetch(K key)
        +
        Retrieve all aggregated sessions for the provided key. This iterator must be closed after + use. +

        + For each key, the iterator guarantees ordering of sessions, starting from the newest/latest + available session to the oldest/earliest session.

        +
        +
        Parameters:
        +
        key - record key to find aggregated session values for
        +
        Returns:
        +
        backward KeyValueIterator containing all sessions for the provided key, from newest + to oldest session.
        +
        Throws:
        +
        NullPointerException - If null is used for key.
        +
        +
        +
      • +
      • +
        +

        fetch

        +
        KeyValueIterator<Windowed<K>,AGG> fetch(K keyFrom, + K keyTo)
        +
        Retrieve all aggregated sessions for the given range of keys. This iterator must be closed + after use. +

        + For each key, the iterator guarantees ordering of sessions, starting from the oldest/earliest + available session to the newest/latest session.

        +
        +
        Parameters:
        +
        keyFrom - first key in the range to find aggregated session values for + A null value indicates a starting position from the first element in the store.
        +
        keyTo - last key in the range to find aggregated session values for + A null value indicates that the range ends with the last element in the store.
        +
        Returns:
        +
        KeyValueIterator containing all sessions for the provided key, from oldest to newest + session.
        +
        +
        +
      • +
      • +
        +

        backwardFetch

        +
        default KeyValueIterator<Windowed<K>,AGG> backwardFetch(K keyFrom, + K keyTo)
        +
        Retrieve all aggregated sessions for the given range of keys. This iterator must be closed + after use. +

        + For each key, the iterator guarantees ordering of sessions, starting from the newest/latest + available session to the oldest/earliest session.

        +
        +
        Parameters:
        +
        keyFrom - first key in the range to find aggregated session values for + A null value indicates a starting position from the first element in the store.
        +
        keyTo - last key in the range to find aggregated session values for + A null value indicates that the range ends with the last element in the store.
        +
        Returns:
        +
        backward KeyValueIterator containing all sessions for the provided key, from newest + to oldest session.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/ReadOnlyWindowStore.html b/static/41/javadoc/org/apache/kafka/streams/state/ReadOnlyWindowStore.html new file mode 100644 index 000000000..8ccdbd46e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/ReadOnlyWindowStore.html @@ -0,0 +1,426 @@ + + + + +ReadOnlyWindowStore (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface ReadOnlyWindowStore<K,V>

    +
    +
    +
    +
    Type Parameters:
    +
    K - Type of keys
    +
    V - Type of values
    +
    +
    +
    All Known Subinterfaces:
    +
    TimestampedWindowStore<K,V>, WindowStore<K,V>
    +
    +
    +
    public interface ReadOnlyWindowStore<K,V>
    +
    A window store that only supports read operations. + Implementations should be thread-safe as concurrent reads and writes are expected. + +

    Note: The current implementation of either forward or backward fetches on range-key-range-time does not + obey the ordering when there are multiple local stores hosted on that instance. For example, + if there are two stores from two tasks hosting keys {1,3} and {2,4}, then a range query of key [1,4] + would return in the order of [1,3,2,4] but not [1,2,3,4] since it is just looping over the stores only.

    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      all()
      +
      +
      Gets all the key-value pairs in the existing windows.
      +
      + + +
      +
      Gets all the key-value pairs in the existing windows in backward order + with respect to time (from end to beginning of time).
      +
      + +
      backwardFetch(K key, + Instant timeFrom, + Instant timeTo)
      +
      +
      Get all the key-value pairs with the given key and the time range from all the existing windows + in backward order with respect to time (from end to beginning of time).
      +
      + +
      backwardFetch(K keyFrom, + K keyTo, + Instant timeFrom, + Instant timeTo)
      +
      +
      Get all the key-value pairs in the given key range and time range from all the existing windows + in backward order with respect to time (from end to beginning of time).
      +
      + +
      backwardFetchAll(Instant timeFrom, + Instant timeTo)
      +
      +
      Gets all the key-value pairs that belong to the windows within in the given time range in backward order + with respect to time (from end to beginning of time).
      +
      + +
      fetch(K key, + long time)
      +
      +
      Get the value of key from a window.
      +
      + +
      fetch(K key, + Instant timeFrom, + Instant timeTo)
      +
      +
      Get all the key-value pairs with the given key and the time range from all the existing windows.
      +
      + +
      fetch(K keyFrom, + K keyTo, + Instant timeFrom, + Instant timeTo)
      +
      +
      Get all the key-value pairs in the given key range and time range from all the existing windows.
      +
      + +
      fetchAll(Instant timeFrom, + Instant timeTo)
      +
      +
      Gets all the key-value pairs that belong to the windows within in the given time range.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        fetch

        +
        V fetch(K key, + long time)
        +
        Get the value of key from a window.
        +
        +
        Parameters:
        +
        key - the key to fetch
        +
        time - start timestamp (inclusive) of the window
        +
        Returns:
        +
        The value or null if no value is found in the window
        +
        Throws:
        +
        InvalidStateStoreException - if the store is not initialized
        +
        NullPointerException - if null is used for any key.
        +
        +
        +
      • +
      • +
        +

        fetch

        +
        WindowStoreIterator<V> fetch(K key, + Instant timeFrom, + Instant timeTo) + throws IllegalArgumentException
        +
        Get all the key-value pairs with the given key and the time range from all the existing windows. +

        + This iterator must be closed after use. +

        + The time range is inclusive and applies to the starting timestamp of the window. + For example, if we have the following windows: +

        + +-------------------------------+
        + |  key  | start time | end time |
        + +-------+------------+----------+
        + |   A   |     10     |    20    |
        + +-------+------------+----------+
        + |   A   |     15     |    25    |
        + +-------+------------+----------+
        + |   A   |     20     |    30    |
        + +-------+------------+----------+
        + |   A   |     25     |    35    |
        + +--------------------------------
        + 
        + And we call store.fetch("A", Instant.ofEpochMilli(10), Instant.ofEpochMilli(20)) then the results will contain the first + three windows from the table above, i.e., all those where 10 <= start time <= 20. +

        + For each key, the iterator guarantees ordering of windows, starting from the oldest/earliest + available window to the newest/latest window.

        +
        +
        Parameters:
        +
        key - the key to fetch
        +
        timeFrom - time range start (inclusive), where iteration starts.
        +
        timeTo - time range end (inclusive), where iteration ends.
        +
        Returns:
        +
        an iterator over key-value pairs <timestamp, value>, from beginning to end of time.
        +
        Throws:
        +
        InvalidStateStoreException - if the store is not initialized
        +
        NullPointerException - if null is used for key.
        +
        IllegalArgumentException - if duration is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        backwardFetch

        +
        default WindowStoreIterator<V> backwardFetch(K key, + Instant timeFrom, + Instant timeTo) + throws IllegalArgumentException
        +
        Get all the key-value pairs with the given key and the time range from all the existing windows + in backward order with respect to time (from end to beginning of time). +

        + This iterator must be closed after use. +

        + The time range is inclusive and applies to the starting timestamp of the window. + For example, if we have the following windows: +

        + +-------------------------------+
        + |  key  | start time | end time |
        + +-------+------------+----------+
        + |   A   |     10     |    20    |
        + +-------+------------+----------+
        + |   A   |     15     |    25    |
        + +-------+------------+----------+
        + |   A   |     20     |    30    |
        + +-------+------------+----------+
        + |   A   |     25     |    35    |
        + +--------------------------------
        + 
        + And we call store.backwardFetch("A", Instant.ofEpochMilli(10), Instant.ofEpochMilli(20)) then the + results will contain the first three windows from the table above in backward order, + i.e., all those where 10 <= start time <= 20. +

        + For each key, the iterator guarantees ordering of windows, starting from the newest/latest + available window to the oldest/earliest window.

        +
        +
        Parameters:
        +
        key - the key to fetch
        +
        timeFrom - time range start (inclusive), where iteration ends.
        +
        timeTo - time range end (inclusive), where iteration starts.
        +
        Returns:
        +
        an iterator over key-value pairs <timestamp, value>, from end to beginning of time.
        +
        Throws:
        +
        InvalidStateStoreException - if the store is not initialized
        +
        NullPointerException - if null is used for key.
        +
        IllegalArgumentException - if duration is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        fetch

        +
        KeyValueIterator<Windowed<K>,V> fetch(K keyFrom, + K keyTo, + Instant timeFrom, + Instant timeTo) + throws IllegalArgumentException
        +
        Get all the key-value pairs in the given key range and time range from all the existing windows. +

        + This iterator must be closed after use.

        +
        +
        Parameters:
        +
        keyFrom - the first key in the range + A null value indicates a starting position from the first element in the store.
        +
        keyTo - the last key in the range + A null value indicates that the range ends with the last element in the store.
        +
        timeFrom - time range start (inclusive), where iteration starts.
        +
        timeTo - time range end (inclusive), where iteration ends.
        +
        Returns:
        +
        an iterator over windowed key-value pairs <Windowed<K>, value>, from beginning to end of time.
        +
        Throws:
        +
        InvalidStateStoreException - if the store is not initialized
        +
        IllegalArgumentException - if duration is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        backwardFetch

        +
        default KeyValueIterator<Windowed<K>,V> backwardFetch(K keyFrom, + K keyTo, + Instant timeFrom, + Instant timeTo) + throws IllegalArgumentException
        +
        Get all the key-value pairs in the given key range and time range from all the existing windows + in backward order with respect to time (from end to beginning of time). +

        + This iterator must be closed after use.

        +
        +
        Parameters:
        +
        keyFrom - the first key in the range + A null value indicates a starting position from the first element in the store.
        +
        keyTo - the last key in the range + A null value indicates that the range ends with the last element in the store.
        +
        timeFrom - time range start (inclusive), where iteration ends.
        +
        timeTo - time range end (inclusive), where iteration starts.
        +
        Returns:
        +
        an iterator over windowed key-value pairs <Windowed<K>, value>, from end to beginning of time.
        +
        Throws:
        +
        InvalidStateStoreException - if the store is not initialized
        +
        IllegalArgumentException - if duration is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        all

        + +
        Gets all the key-value pairs in the existing windows.
        +
        +
        Returns:
        +
        an iterator over windowed key-value pairs <Windowed<K>, value>, from beginning to end of time.
        +
        Throws:
        +
        InvalidStateStoreException - if the store is not initialized
        +
        +
        +
      • +
      • +
        +

        backwardAll

        +
        default KeyValueIterator<Windowed<K>,V> backwardAll()
        +
        Gets all the key-value pairs in the existing windows in backward order + with respect to time (from end to beginning of time).
        +
        +
        Returns:
        +
        a backward iterator over windowed key-value pairs <Windowed<K>, value>, from the end to beginning of time.
        +
        Throws:
        +
        InvalidStateStoreException - if the store is not initialized
        +
        +
        +
      • +
      • +
        +

        fetchAll

        +
        KeyValueIterator<Windowed<K>,V> fetchAll(Instant timeFrom, + Instant timeTo) + throws IllegalArgumentException
        +
        Gets all the key-value pairs that belong to the windows within in the given time range.
        +
        +
        Parameters:
        +
        timeFrom - the beginning of the time slot from which to search (inclusive), where iteration starts.
        +
        timeTo - the end of the time slot from which to search (inclusive), where iteration ends.
        +
        Returns:
        +
        an iterator over windowed key-value pairs <Windowed<K>, value>, from beginning to end of time.
        +
        Throws:
        +
        InvalidStateStoreException - if the store is not initialized
        +
        NullPointerException - if null is used for any key
        +
        IllegalArgumentException - if duration is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        backwardFetchAll

        +
        default KeyValueIterator<Windowed<K>,V> backwardFetchAll(Instant timeFrom, + Instant timeTo) + throws IllegalArgumentException
        +
        Gets all the key-value pairs that belong to the windows within in the given time range in backward order + with respect to time (from end to beginning of time).
        +
        +
        Parameters:
        +
        timeFrom - the beginning of the time slot from which to search (inclusive), where iteration ends.
        +
        timeTo - the end of the time slot from which to search (inclusive), where iteration starts.
        +
        Returns:
        +
        a backward iterator over windowed key-value pairs <Windowed<K>, value>, from end to beginning of time.
        +
        Throws:
        +
        InvalidStateStoreException - if the store is not initialized
        +
        NullPointerException - if null is used for any key
        +
        IllegalArgumentException - if duration is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/RocksDBConfigSetter.html b/static/41/javadoc/org/apache/kafka/streams/state/RocksDBConfigSetter.html new file mode 100644 index 000000000..e9d97435d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/RocksDBConfigSetter.html @@ -0,0 +1,198 @@ + + + + +RocksDBConfigSetter (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface RocksDBConfigSetter

    +
    +
    +
    +
    public interface RocksDBConfigSetter
    +
    An interface to that allows developers to customize the RocksDB settings for a given Store. + Please read the RocksDB Tuning Guide. + + Note: if you choose to modify the org.rocksdb.BlockBasedTableConfig you should retrieve a reference to + the existing one (rather than create a new BlockBasedTableConfig object) so as to not lose the other default settings. + This can be done as BlockBasedTableConfig tableConfig = (BlockBasedTableConfig) options.tableFormatConfig();
    +
    +
    +
      + +
    • +
      +

      Field Summary

      +
      Fields
      +
      +
      Modifier and Type
      +
      Field
      +
      Description
      +
      static final org.slf4j.Logger
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      +
      close(String storeName, + org.rocksdb.Options options)
      +
      +
      Close any user-constructed objects that inherit from org.rocksdb.RocksObject.
      +
      +
      void
      +
      setConfig(String storeName, + org.rocksdb.Options options, + Map<String,Object> configs)
      +
      +
      Set the rocks db options for the provided storeName.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        LOG

        +
        static final org.slf4j.Logger LOG
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        setConfig

        +
        void setConfig(String storeName, + org.rocksdb.Options options, + Map<String,Object> configs)
        +
        Set the rocks db options for the provided storeName.
        +
        +
        Parameters:
        +
        storeName - the name of the store being configured
        +
        options - the RocksDB options
        +
        configs - the configuration supplied to StreamsConfig
        +
        +
        +
      • +
      • +
        +

        close

        +
        void close(String storeName, + org.rocksdb.Options options)
        +
        Close any user-constructed objects that inherit from org.rocksdb.RocksObject. +

        + Any object created with new in setConfig() and that inherits + from org.rocksdb.RocksObject should have org.rocksdb.RocksObject#close() + called on it here to avoid leaking off-heap memory. Objects to be closed can be saved by the user or retrieved + back from options using its getter methods. +

        + Example objects needing to be closed include org.rocksdb.Filter and org.rocksdb.Cache.

        +
        +
        Parameters:
        +
        storeName - the name of the store being configured
        +
        options - the RocksDB options
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/SessionBytesStoreSupplier.html b/static/41/javadoc/org/apache/kafka/streams/state/SessionBytesStoreSupplier.html new file mode 100644 index 000000000..34fb5f613 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/SessionBytesStoreSupplier.html @@ -0,0 +1,164 @@ + + + + +SessionBytesStoreSupplier (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface SessionBytesStoreSupplier

    +
    +
    +
    +
    All Superinterfaces:
    +
    StoreSupplier<SessionStore<org.apache.kafka.common.utils.Bytes,byte[]>>
    +
    +
    +
    public interface SessionBytesStoreSupplier +extends StoreSupplier<SessionStore<org.apache.kafka.common.utils.Bytes,byte[]>>
    +
    A store supplier that can be used to create one or more SessionStore<Byte, byte[]> instances. + + For any stores implementing the SessionStore<Byte, byte[]> interface, null value + bytes are considered as "not exist". This means: +
      +
    1. null value bytes in put operations should be treated as delete.
    2. +
    3. null value bytes should never be returned in range query results.
    4. +
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      long
      + +
      +
      The time period for which the SessionStore will retain historic data.
      +
      +
      long
      + +
      +
      The size of a segment, in milliseconds.
      +
      +
      +
      +
      +
      +

      Methods inherited from interface org.apache.kafka.streams.state.StoreSupplier

      +get, metricsScope, name
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        segmentIntervalMs

        +
        long segmentIntervalMs()
        +
        The size of a segment, in milliseconds. Used when caching is enabled to segment the cache + and reduce the amount of data that needs to be scanned when performing range queries.
        +
        +
        Returns:
        +
        segmentInterval in milliseconds
        +
        +
        +
      • +
      • +
        +

        retentionPeriod

        +
        long retentionPeriod()
        +
        The time period for which the SessionStore will retain historic data.
        +
        +
        Returns:
        +
        retentionPeriod
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/SessionStore.html b/static/41/javadoc/org/apache/kafka/streams/state/SessionStore.html new file mode 100644 index 000000000..93a31ec41 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/SessionStore.html @@ -0,0 +1,424 @@ + + + + +SessionStore (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface SessionStore<K,AGG>

    +
    +
    +
    +
    Type Parameters:
    +
    K - type of the record keys
    +
    AGG - type of the aggregated values
    +
    +
    +
    All Superinterfaces:
    +
    ReadOnlySessionStore<K,AGG>, StateStore
    +
    +
    +
    public interface SessionStore<K,AGG> +extends StateStore, ReadOnlySessionStore<K,AGG>
    +
    Interface for storing the aggregated values of sessions. +

    + The key is internally represented as Windowed<K> that comprises the plain + key and the Window that represents window start- and end-timestamp. +

    + If two sessions are merged, a new session with new start- and end-timestamp must be inserted into + the store while the two old sessions must be deleted.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        findSessions

        +
        default KeyValueIterator<Windowed<K>,AGG> findSessions(long earliestSessionEndTime, + long latestSessionEndTime)
        +
        Return all the session window entries that ends between the specified range (both ends are inclusive). + This function would be used to retrieve all closed and immutable windows.
        +
        +
        Parameters:
        +
        earliestSessionEndTime - earliest session end time to search from, inclusive
        +
        latestSessionEndTime - latest session end time to search to, inclusive
        +
        +
        +
      • +
      • +
        +

        findSessions

        +
        default KeyValueIterator<Windowed<K>,AGG> findSessions(K key, + Instant earliestSessionEndTime, + Instant latestSessionStartTime)
        +
        Description copied from interface: ReadOnlySessionStore
        +
        Fetch any sessions with the matching key and the sessions end is ≥ earliestSessionEndTime + and the sessions start is ≤ latestSessionStartTime iterating from earliest to latest. + I.e., earliestSessionEndTime is the lower bound of the search interval and latestSessionStartTime + is the upper bound of the search interval, and the method returns all sessions that overlap + with the search interval. + Thus, if a session ends before earliestSessionEndTime, or starts after latestSessionStartTime + it won't be contained in the result: +
        
        + earliestSessionEndTime: ESET
        + latestSessionStartTime: LSST
        +
        +                       [ESET............LSST]
        + [not-included] [included]   [included]   [included] [not-included]
        + 
        +

        + This iterator must be closed after use.

        +
        +
        Specified by:
        +
        findSessions in interface ReadOnlySessionStore<K,AGG>
        +
        Parameters:
        +
        key - the key to return sessions for
        +
        earliestSessionEndTime - the end timestamp of the earliest session to search for, where + iteration starts.
        +
        latestSessionStartTime - the end timestamp of the latest session to search for, where + iteration ends.
        +
        Returns:
        +
        iterator of sessions with the matching key and aggregated values, from earliest to + latest session time.
        +
        +
        +
      • +
      • +
        +

        backwardFindSessions

        +
        default KeyValueIterator<Windowed<K>,AGG> backwardFindSessions(K key, + Instant earliestSessionEndTime, + Instant latestSessionStartTime)
        +
        Description copied from interface: ReadOnlySessionStore
        +
        Fetch any sessions with the matching key and the sessions end is ≥ earliestSessionEndTime + and the sessions start is ≤ latestSessionStartTime iterating from latest to earliest. + I.e., earliestSessionEndTime is the lower bound of the search interval and latestSessionStartTime + is the upper bound of the search interval, and the method returns all sessions that overlap + with the search interval. + Thus, if a session ends before earliestSessionEndTime, or starts after latestSessionStartTime + it won't be contained in the result: +
        
        + earliestSessionEndTime: ESET
        + latestSessionStartTime: LSST
        +
        +                       [ESET............LSST]
        + [not-included] [included]   [included]   [included] [not-included]
        + 
        +

        + This iterator must be closed after use.

        +
        +
        Specified by:
        +
        backwardFindSessions in interface ReadOnlySessionStore<K,AGG>
        +
        Parameters:
        +
        key - the key to return sessions for
        +
        earliestSessionEndTime - the end timestamp of the earliest session to search for, where + iteration ends.
        +
        latestSessionStartTime - the end timestamp of the latest session to search for, where + iteration starts.
        +
        Returns:
        +
        backward iterator of sessions with the matching key and aggregated values, from + latest to earliest session time.
        +
        +
        +
      • +
      • +
        +

        findSessions

        +
        default KeyValueIterator<Windowed<K>,AGG> findSessions(K keyFrom, + K keyTo, + Instant earliestSessionEndTime, + Instant latestSessionStartTime)
        +
        Description copied from interface: ReadOnlySessionStore
        +
        Fetch any sessions in the given range of keys and the sessions end is ≥ + earliestSessionEndTime and the sessions start is ≤ latestSessionStartTime iterating from + earliest to latest. + I.e., earliestSessionEndTime is the lower bound of the search interval and latestSessionStartTime + is the upper bound of the search interval, and the method returns all sessions that overlap + with the search interval. + Thus, if a session ends before earliestSessionEndTime, or starts after latestSessionStartTime + it won't be contained in the result: +
        
        + earliestSessionEndTime: ESET
        + latestSessionStartTime: LSST
        +
        +                       [ESET............LSST]
        + [not-included] [included]   [included]   [included] [not-included]
        + 
        +

        + This iterator must be closed after use.

        +
        +
        Specified by:
        +
        findSessions in interface ReadOnlySessionStore<K,AGG>
        +
        Parameters:
        +
        keyFrom - The first key that could be in the range + A null value indicates a starting position from the first element in the store.
        +
        keyTo - The last key that could be in the range + A null value indicates that the range ends with the last element in the store.
        +
        earliestSessionEndTime - the end timestamp of the earliest session to search for, where + iteration starts.
        +
        latestSessionStartTime - the end timestamp of the latest session to search for, where + iteration ends.
        +
        Returns:
        +
        iterator of sessions with the matching keys and aggregated values, from earliest to + latest session time.
        +
        +
        +
      • +
      • +
        +

        backwardFindSessions

        +
        default KeyValueIterator<Windowed<K>,AGG> backwardFindSessions(K keyFrom, + K keyTo, + Instant earliestSessionEndTime, + Instant latestSessionStartTime)
        +
        Description copied from interface: ReadOnlySessionStore
        +
        Fetch any sessions in the given range of keys and the sessions end is ≥ + earliestSessionEndTime and the sessions start is ≤ latestSessionStartTime iterating from + latest to earliest. + I.e., earliestSessionEndTime is the lower bound of the search interval and latestSessionStartTime + is the upper bound of the search interval, and the method returns all sessions that overlap + with the search interval. + Thus, if a session ends before earliestSessionEndTime, or starts after latestSessionStartTime + it won't be contained in the result: +
        
        + earliestSessionEndTime: ESET
        + latestSessionStartTime: LSST
        +
        +                       [ESET............LSST]
        + [not-included] [included]   [included]   [included] [not-included]
        + 
        +

        + This iterator must be closed after use.

        +
        +
        Specified by:
        +
        backwardFindSessions in interface ReadOnlySessionStore<K,AGG>
        +
        Parameters:
        +
        keyFrom - The first key that could be in the range + A null value indicates a starting position from the first element in the store.
        +
        keyTo - The last key that could be in the range + A null value indicates that the range ends with the last element in the store.
        +
        earliestSessionEndTime - the end timestamp of the earliest session to search for, where + iteration ends.
        +
        latestSessionStartTime - the end timestamp of the latest session to search for, where + iteration starts.
        +
        Returns:
        +
        backward iterator of sessions with the matching keys and aggregated values, from + latest to earliest session time.
        +
        +
        +
      • +
      • +
        +

        fetchSession

        +
        default AGG fetchSession(K key, + Instant sessionStartTime, + Instant sessionEndTime)
        +
        Description copied from interface: ReadOnlySessionStore
        +
        Get the value of key from a single session.
        +
        +
        Specified by:
        +
        fetchSession in interface ReadOnlySessionStore<K,AGG>
        +
        Parameters:
        +
        key - the key to fetch
        +
        sessionStartTime - start timestamp of the session
        +
        sessionEndTime - end timestamp of the session
        +
        Returns:
        +
        The value or null if no session with the exact start and end timestamp exists + for the given key
        +
        +
        +
      • +
      • +
        +

        remove

        +
        void remove(Windowed<K> sessionKey)
        +
        Remove the session aggregated with provided Windowed key from the store
        +
        +
        Parameters:
        +
        sessionKey - key of the session to remove
        +
        Throws:
        +
        NullPointerException - If null is used for sessionKey.
        +
        +
        +
      • +
      • +
        +

        put

        +
        void put(Windowed<K> sessionKey, + AGG aggregate)
        +
        Write the aggregated value for the provided key to the store
        +
        +
        Parameters:
        +
        sessionKey - key of the session to write
        +
        aggregate - the aggregated value for the session, it can be null; if the serialized + bytes are also null it is interpreted as deletes
        +
        Throws:
        +
        NullPointerException - If null is used for sessionKey.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/StateSerdes.html b/static/41/javadoc/org/apache/kafka/streams/state/StateSerdes.html new file mode 100644 index 000000000..8891eb4af --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/StateSerdes.html @@ -0,0 +1,443 @@ + + + + +StateSerdes (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StateSerdes<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.state.StateSerdes<K,V>
    +
    +
    +
    +
    Type Parameters:
    +
    K - key type of serde
    +
    V - value type of serde
    +
    +
    +
    public final class StateSerdes<K,V> +extends Object
    +
    Factory for creating serializers / deserializers for state stores in Kafka Streams.
    +
    +
    +
      + +
    • +
      +

      Field Summary

      +
      Fields
      +
      +
      Modifier and Type
      +
      Field
      +
      Description
      +
      static final int
      + +
       
      +
      static final int
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Constructor Summary

      +
      Constructors
      +
      +
      Constructor
      +
      Description
      +
      StateSerdes(String topic, + Serde<K> keySerde, + Serde<V> valueSerde)
      +
      +
      Create a context for serialization using the specified serializers and deserializers which + must match the key and value types used as parameters for this object; the state changelog topic + is provided to bind this serde factory to, so that future calls for serialize / deserialize do not + need to provide the topic name any more.
      +
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + + +
      +
      Return the key deserializer.
      +
      + +
      keyFrom(byte[] rawKey)
      +
      +
      Deserialize the key from raw bytes.
      +
      + + +
      +
      Return the key serde.
      +
      + + +
      +
      Return the key serializer.
      +
      +
      byte[]
      +
      rawKey(K key)
      +
      +
      Serialize the given key.
      +
      +
      byte[]
      +
      rawValue(V value)
      +
      +
      Serialize the given value.
      +
      + + +
      +
      Return the topic.
      +
      + + +
      +
      Return the value deserializer.
      +
      + +
      valueFrom(byte[] rawValue)
      +
      +
      Deserialize the value from raw bytes.
      +
      + + +
      +
      Return the value serde.
      +
      + + +
      +
      Return the value serializer.
      +
      +
      static <K, +V> StateSerdes<K,V>
      +
      withBuiltinTypes(String topic, + Class<K> keyClass, + Class<V> valueClass)
      +
      +
      Create a new instance of StateSerdes for the given state name and key-/value-type classes.
      +
      +
      +
      +
      +
      +

      Methods inherited from class java.lang.Object

      +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        TIMESTAMP_SIZE

        +
        public static final int TIMESTAMP_SIZE
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        BOOLEAN_SIZE

        +
        public static final int BOOLEAN_SIZE
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StateSerdes

        +
        public StateSerdes(String topic, + Serde<K> keySerde, + Serde<V> valueSerde)
        +
        Create a context for serialization using the specified serializers and deserializers which + must match the key and value types used as parameters for this object; the state changelog topic + is provided to bind this serde factory to, so that future calls for serialize / deserialize do not + need to provide the topic name any more.
        +
        +
        Parameters:
        +
        topic - the topic name
        +
        keySerde - the serde for keys; cannot be null
        +
        valueSerde - the serde for values; cannot be null
        +
        Throws:
        +
        IllegalArgumentException - if key or value serde is null
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        withBuiltinTypes

        +
        public static <K, +V> StateSerdes<K,V> withBuiltinTypes(String topic, + Class<K> keyClass, + Class<V> valueClass)
        +
        Create a new instance of StateSerdes for the given state name and key-/value-type classes.
        +
        +
        Type Parameters:
        +
        K - the key type
        +
        V - the value type
        +
        Parameters:
        +
        topic - the topic name
        +
        keyClass - the class of the key type
        +
        valueClass - the class of the value type
        +
        Returns:
        +
        a new instance of StateSerdes
        +
        +
        +
      • +
      • +
        +

        keySerde

        +
        public Serde<K> keySerde()
        +
        Return the key serde.
        +
        +
        Returns:
        +
        the key serde
        +
        +
        +
      • +
      • +
        +

        valueSerde

        +
        public Serde<V> valueSerde()
        +
        Return the value serde.
        +
        +
        Returns:
        +
        the value serde
        +
        +
        +
      • +
      • +
        +

        keyDeserializer

        +
        public Deserializer<K> keyDeserializer()
        +
        Return the key deserializer.
        +
        +
        Returns:
        +
        the key deserializer
        +
        +
        +
      • +
      • +
        +

        keySerializer

        +
        public Serializer<K> keySerializer()
        +
        Return the key serializer.
        +
        +
        Returns:
        +
        the key serializer
        +
        +
        +
      • +
      • +
        +

        valueDeserializer

        +
        public Deserializer<V> valueDeserializer()
        +
        Return the value deserializer.
        +
        +
        Returns:
        +
        the value deserializer
        +
        +
        +
      • +
      • +
        +

        valueSerializer

        +
        public Serializer<V> valueSerializer()
        +
        Return the value serializer.
        +
        +
        Returns:
        +
        the value serializer
        +
        +
        +
      • +
      • +
        +

        topic

        +
        public String topic()
        +
        Return the topic.
        +
        +
        Returns:
        +
        the topic
        +
        +
        +
      • +
      • +
        +

        keyFrom

        +
        public K keyFrom(byte[] rawKey)
        +
        Deserialize the key from raw bytes.
        +
        +
        Parameters:
        +
        rawKey - the key as raw bytes
        +
        Returns:
        +
        the key as typed object
        +
        +
        +
      • +
      • +
        +

        valueFrom

        +
        public V valueFrom(byte[] rawValue)
        +
        Deserialize the value from raw bytes.
        +
        +
        Parameters:
        +
        rawValue - the value as raw bytes
        +
        Returns:
        +
        the value as typed object
        +
        +
        +
      • +
      • +
        +

        rawKey

        +
        public byte[] rawKey(K key)
        +
        Serialize the given key.
        +
        +
        Parameters:
        +
        key - the key to be serialized
        +
        Returns:
        +
        the serialized key
        +
        +
        +
      • +
      • +
        +

        rawValue

        +
        public byte[] rawValue(V value)
        +
        Serialize the given value.
        +
        +
        Parameters:
        +
        value - the value to be serialized
        +
        Returns:
        +
        the serialized value
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/StoreBuilder.html b/static/41/javadoc/org/apache/kafka/streams/state/StoreBuilder.html new file mode 100644 index 000000000..5b3abefb6 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/StoreBuilder.html @@ -0,0 +1,254 @@ + + + + +StoreBuilder (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface StoreBuilder<T extends StateStore>

    +
    +
    +
    +
    Type Parameters:
    +
    T - the type of store to build
    +
    +
    +
    public interface StoreBuilder<T extends StateStore>
    +
    Build a StateStore wrapped with optional caching and logging.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        withCachingEnabled

        +
        StoreBuilder<T> withCachingEnabled()
        +
        Enable caching on the store.
        +
        +
        Returns:
        +
        this
        +
        +
        +
      • +
      • +
        +

        withCachingDisabled

        +
        StoreBuilder<T> withCachingDisabled()
        +
        Disable caching on the store.
        +
        +
        Returns:
        +
        this
        +
        +
        +
      • +
      • +
        +

        withLoggingEnabled

        +
        StoreBuilder<T> withLoggingEnabled(Map<String,String> config)
        +
        Maintain a changelog for any changes made to the store. + Use the provided config to set the config of the changelog topic.
        +
        +
        Parameters:
        +
        config - config applied to the changelog topic
        +
        Returns:
        +
        this
        +
        +
        +
      • +
      • +
        +

        withLoggingDisabled

        +
        StoreBuilder<T> withLoggingDisabled()
        +
        Disable the changelog for store built by this StoreBuilder. + This will turn off fault-tolerance for your store. + By default the changelog is enabled.
        +
        +
        Returns:
        +
        this
        +
        +
        +
      • +
      • +
        +

        build

        +
        T build()
        +
        Build the store as defined by the builder.
        +
        +
        Returns:
        +
        the built StateStore
        +
        +
        +
      • +
      • +
        +

        logConfig

        +
        Map<String,String> logConfig()
        +
        Returns a Map containing any log configs that will be used when creating the changelog for the StateStore. +

        + Note: any unrecognized configs will be ignored by the Kafka brokers.

        +
        +
        Returns:
        +
        Map containing any log configs to be used when creating the changelog for the StateStore + If loggingEnabled returns false, this function will always return an empty map
        +
        +
        +
      • +
      • +
        +

        loggingEnabled

        +
        boolean loggingEnabled()
        +
        +
        Returns:
        +
        true if the StateStore should have logging enabled
        +
        +
        +
      • +
      • +
        +

        name

        +
        String name()
        +
        Return the name of this state store builder. + This must be a valid Kafka topic name; valid characters are ASCII alphanumerics, '.', '_' and '-'.
        +
        +
        Returns:
        +
        the name of this state store builder
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/StoreSupplier.html b/static/41/javadoc/org/apache/kafka/streams/state/StoreSupplier.html new file mode 100644 index 000000000..6268fd2c9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/StoreSupplier.html @@ -0,0 +1,173 @@ + + + + +StoreSupplier (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface StoreSupplier<T extends StateStore>

    +
    +
    +
    +
    Type Parameters:
    +
    T - State store type
    +
    +
    +
    All Known Subinterfaces:
    +
    KeyValueBytesStoreSupplier, SessionBytesStoreSupplier, VersionedBytesStoreSupplier, WindowBytesStoreSupplier
    +
    +
    +
    public interface StoreSupplier<T extends StateStore>
    +
    A state store supplier which can create one or more StateStore instances.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      get()
      +
      +
      Return a new StateStore instance.
      +
      + + +
      +
      Return a String that is used as the scope for metrics recorded by Metered stores.
      +
      + + +
      +
      Return the name of this state store supplier.
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        name

        +
        String name()
        +
        Return the name of this state store supplier. + This must be a valid Kafka topic name; valid characters are ASCII alphanumerics, '.', '_' and '-'.
        +
        +
        Returns:
        +
        the name of this state store supplier
        +
        +
        +
      • +
      • +
        +

        get

        +
        T get()
        +
        Return a new StateStore instance.
        +
        +
        Returns:
        +
        a new StateStore instance of type T
        +
        +
        +
      • +
      • +
        +

        metricsScope

        +
        String metricsScope()
        +
        Return a String that is used as the scope for metrics recorded by Metered stores.
        +
        +
        Returns:
        +
        metricsScope
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/Stores.html b/static/41/javadoc/org/apache/kafka/streams/state/Stores.html new file mode 100644 index 000000000..9434bfcd5 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/Stores.html @@ -0,0 +1,740 @@ + + + + +Stores (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class Stores

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.state.Stores
    +
    +
    +
    +
    public final class Stores +extends Object
    +
    Factory for creating state stores in Kafka Streams. +

    + When using the high-level DSL, i.e., StreamsBuilder, users create + StoreSuppliers that can be further customized via + Materialized. + For example, a topic read as KTable can be materialized into an + in-memory store with custom key/value serdes and caching disabled: +

    
    + StreamsBuilder builder = new StreamsBuilder();
    + KeyValueBytesStoreSupplier storeSupplier = Stores.inMemoryKeyValueStore("queryable-store-name");
    + KTable<Long,String> table = builder.table(
    +   "topicName",
    +   Materialized.<Long,String>as(storeSupplier)
    +               .withKeySerde(Serdes.Long())
    +               .withValueSerde(Serdes.String())
    +               .withCachingDisabled());
    + 
    + When using the Processor API, i.e., Topology, users create + StoreBuilders that can be attached to Processors. + For example, you can create a windowed RocksDB store with custom + changelog topic configuration like: +
    
    + Topology topology = new Topology();
    + topology.addProcessor("processorName", ...);
    +
    + Map<String,String> topicConfig = new HashMap<>();
    + StoreBuilder<WindowStore<Integer, Long>> storeBuilder = Stores
    +   .windowStoreBuilder(
    +     Stores.persistentWindowStore("queryable-store-name", ...),
    +     Serdes.Integer(),
    +     Serdes.Long())
    +   .withLoggingEnabled(topicConfig);
    +
    + topology.addStateStore(storeBuilder, "processorName");
    + 
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/TimestampedBytesStore.html b/static/41/javadoc/org/apache/kafka/streams/state/TimestampedBytesStore.html new file mode 100644 index 000000000..24162d2d9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/TimestampedBytesStore.html @@ -0,0 +1,122 @@ + + + + +TimestampedBytesStore (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface TimestampedBytesStore

    +
    +
    +
    +
    All Known Subinterfaces:
    +
    VersionedBytesStore
    +
    +
    +
    public interface TimestampedBytesStore
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      Static Methods
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      static byte[]
      +
      convertToTimestampedFormat(byte[] plainValue)
      +
       
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        convertToTimestampedFormat

        +
        static byte[] convertToTimestampedFormat(byte[] plainValue)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/TimestampedKeyValueStore.html b/static/41/javadoc/org/apache/kafka/streams/state/TimestampedKeyValueStore.html new file mode 100644 index 000000000..76b9367a1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/TimestampedKeyValueStore.html @@ -0,0 +1,109 @@ + + + + +TimestampedKeyValueStore (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface TimestampedKeyValueStore<K,V>

    +
    +
    +
    +
    Type Parameters:
    +
    K - The key type
    +
    V - The value type
    +
    +
    +
    All Superinterfaces:
    +
    KeyValueStore<K,ValueAndTimestamp<V>>, ReadOnlyKeyValueStore<K,ValueAndTimestamp<V>>, StateStore
    +
    +
    +
    public interface TimestampedKeyValueStore<K,V> +extends KeyValueStore<K,ValueAndTimestamp<V>>
    +
    A key-(value/timestamp) store that supports put/get/delete and range queries.
    +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/TimestampedWindowStore.html b/static/41/javadoc/org/apache/kafka/streams/state/TimestampedWindowStore.html new file mode 100644 index 000000000..4535fbd2d --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/TimestampedWindowStore.html @@ -0,0 +1,116 @@ + + + + +TimestampedWindowStore (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface TimestampedWindowStore<K,V>

    +
    +
    +
    +
    Type Parameters:
    +
    K - Type of keys
    +
    V - Type of values
    +
    +
    +
    All Superinterfaces:
    +
    ReadOnlyWindowStore<K,ValueAndTimestamp<V>>, StateStore, WindowStore<K,ValueAndTimestamp<V>>
    +
    +
    +
    public interface TimestampedWindowStore<K,V> +extends WindowStore<K,ValueAndTimestamp<V>>
    +
    Interface for storing the aggregated values of fixed-size time windows. +

    + Note, that the stores's physical key type is Windowed<K>. + In contrast to a WindowStore that stores plain windowedKeys-value pairs, + a TimestampedWindowStore stores windowedKeys-(value/timestamp) pairs. +

    + While the window start- and end-timestamp are fixed per window, the value-side timestamp is used + to store the last update timestamp of the corresponding window.

    +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/ValueAndTimestamp.html b/static/41/javadoc/org/apache/kafka/streams/state/ValueAndTimestamp.html new file mode 100644 index 000000000..247c8a18f --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/ValueAndTimestamp.html @@ -0,0 +1,253 @@ + + + + +ValueAndTimestamp (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ValueAndTimestamp<V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.state.ValueAndTimestamp<V>
    +
    +
    +
    +
    Type Parameters:
    +
    V -
    +
    +
    +
    public final class ValueAndTimestamp<V> +extends Object
    +
    Combines a value from a KeyValue with a timestamp.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        make

        +
        public static <V> ValueAndTimestamp<V> make(V value, + long timestamp)
        +
        Create a new ValueAndTimestamp instance if the provided value is not null.
        +
        +
        Type Parameters:
        +
        V - the type of the value
        +
        Parameters:
        +
        value - the value
        +
        timestamp - the timestamp
        +
        Returns:
        +
        a new ValueAndTimestamp instance if the provided value is not null; + otherwise null is returned
        +
        +
        +
      • +
      • +
        +

        makeAllowNullable

        +
        public static <V> ValueAndTimestamp<V> makeAllowNullable(V value, + long timestamp)
        +
        Create a new ValueAndTimestamp instance. The provided value may be null.
        +
        +
        Type Parameters:
        +
        V - the type of the value
        +
        Parameters:
        +
        value - the value
        +
        timestamp - the timestamp
        +
        Returns:
        +
        a new ValueAndTimestamp instance
        +
        +
        +
      • +
      • +
        +

        getValueOrNull

        +
        public static <V> V getValueOrNull(ValueAndTimestamp<V> valueAndTimestamp)
        +
        Return the wrapped value of the given valueAndTimestamp parameter + if the parameter is not null.
        +
        +
        Type Parameters:
        +
        V - the type of the value
        +
        Parameters:
        +
        valueAndTimestamp - a ValueAndTimestamp instance; can be null
        +
        Returns:
        +
        the wrapped value of valueAndTimestamp if not null; otherwise null
        +
        +
        +
      • +
      • +
        +

        value

        +
        public V value()
        +
        +
      • +
      • +
        +

        timestamp

        +
        public long timestamp()
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/VersionedBytesStore.html b/static/41/javadoc/org/apache/kafka/streams/state/VersionedBytesStore.html new file mode 100644 index 000000000..00c89435c --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/VersionedBytesStore.html @@ -0,0 +1,175 @@ + + + + +VersionedBytesStore (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface VersionedBytesStore

    +
    +
    +
    +
    All Superinterfaces:
    +
    KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>, ReadOnlyKeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>, StateStore, TimestampedBytesStore
    +
    +
    +
    public interface VersionedBytesStore +extends KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>, TimestampedBytesStore
    +
    A representation of a versioned key-value store as a KeyValueStore of type <Bytes, byte[]>. + See VersionedBytesStoreSupplier for more.
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/VersionedBytesStoreSupplier.html b/static/41/javadoc/org/apache/kafka/streams/state/VersionedBytesStoreSupplier.html new file mode 100644 index 000000000..5c1c3245b --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/VersionedBytesStoreSupplier.html @@ -0,0 +1,151 @@ + + + + +VersionedBytesStoreSupplier (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface VersionedBytesStoreSupplier

    +
    +
    +
    +
    All Superinterfaces:
    +
    KeyValueBytesStoreSupplier, StoreSupplier<KeyValueStore<org.apache.kafka.common.utils.Bytes,byte[]>>
    +
    +
    +
    public interface VersionedBytesStoreSupplier +extends KeyValueBytesStoreSupplier
    +
    A store supplier that can be used to create one or more versioned key-value stores, + specifically, VersionedBytesStore instances. +

    + Rather than representing the returned store as a VersionedKeyValueStore of + type <Bytes, byte[]>, this supplier interface represents the returned store as a + KeyValueStore of type <Bytes, byte[]> (via VersionedBytesStore) in order to be compatible with + existing DSL methods for passing key-value stores such as StreamsBuilder.table(String, Materialized) + and KTable.filter(Predicate, Materialized). A VersionedKeyValueStore<Bytes, byte[]> + is represented as a KeyValueStore KeyValueStore<Bytes, byte[]> by interpreting the + value bytes as containing record timestamp information in addition to raw record values.

    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      long
      + +
      +
      Returns the history retention (in milliseconds) that stores created from this supplier will have.
      +
      +
      +
      +
      +
      +

      Methods inherited from interface org.apache.kafka.streams.state.StoreSupplier

      +get, metricsScope, name
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        historyRetentionMs

        +
        long historyRetentionMs()
        +
        Returns the history retention (in milliseconds) that stores created from this supplier will have. + This value is used to set compaction configs on store changelog topics (if relevant).
        +
        +
        Returns:
        +
        history retention, i.e., length of time that old record versions are available for + query from a versioned store
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/VersionedKeyValueStore.html b/static/41/javadoc/org/apache/kafka/streams/state/VersionedKeyValueStore.html new file mode 100644 index 000000000..e58aa060a --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/VersionedKeyValueStore.html @@ -0,0 +1,343 @@ + + + + +VersionedKeyValueStore (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface VersionedKeyValueStore<K,V>

    +
    +
    +
    +
    Type Parameters:
    +
    K - The key type
    +
    V - The value type
    +
    +
    +
    All Superinterfaces:
    +
    StateStore
    +
    +
    +
    public interface VersionedKeyValueStore<K,V> +extends StateStore
    +
    A key-value store that stores multiple record versions per key, and supports timestamp-based + retrieval operations to return the latest record (per key) as of a specified timestamp. + Only one record is stored per key and timestamp, i.e., a second call to + put(Object, Object, long) with the same key and timestamp will replace the first. +

    + Each store instance has an associated, fixed-duration "history retention" which specifies + how long old record versions should be kept for. In particular, a versioned store guarantees + to return accurate results for calls to get(Object, long) where the provided timestamp + bound is within history retention of the current observed stream time. (Queries with timestamp + bound older than the specified history retention are considered invalid.) +

    + The store's "history retention" also doubles as its "grace period," which determines how far + back in time writes to the store will be accepted. A versioned store will not accept writes + (inserts, updates, or deletions) if the timestamp associated with the write is older than the + current observed stream time by more than the grace period.

    +
    +
    +
      + +
    • +
      +

      Field Summary

      +
      Fields
      +
      +
      Modifier and Type
      +
      Field
      +
      Description
      +
      static final long
      + +
       
      +
      static final long
      + +
       
      +
      +
      +
    • + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      delete(K key, + long timestamp)
      +
      +
      Delete the value associated with this key from the store, at the specified timestamp + (if there is such a value), and return the deleted value.
      +
      + +
      get(K key)
      +
      +
      Get the current (i.e., latest by timestamp) record associated with this key.
      +
      + +
      get(K key, + long asOfTimestamp)
      +
      +
      Get the record associated with this key as of the specified timestamp (i.e., + the existing record with the largest timestamp not exceeding the provided + timestamp bound).
      +
      +
      long
      +
      put(K key, + V value, + long timestamp)
      +
      +
      Add a new record version associated with the specified key and timestamp.
      +
      +
      +
      +
      +
      +

      Methods inherited from interface org.apache.kafka.streams.processor.StateStore

      +close, flush, getPosition, init, isOpen, name, persistent, query
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Field Details

      +
        +
      • +
        +

        PUT_RETURN_CODE_VALID_TO_UNDEFINED

        +
        static final long PUT_RETURN_CODE_VALID_TO_UNDEFINED
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        PUT_RETURN_CODE_NOT_PUT

        +
        static final long PUT_RETURN_CODE_NOT_PUT
        +
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        put

        +
        long put(K key, + V value, + long timestamp)
        +
        Add a new record version associated with the specified key and timestamp. +

        + If the timestamp associated with the new record version is older than the store's + grace period (i.e., history retention) relative to the current observed stream time, + then the record will not be added.

        +
        +
        Parameters:
        +
        key - The key
        +
        value - The value, it can be null. null is interpreted as a delete.
        +
        timestamp - The timestamp for this record version
        +
        Returns:
        +
        The validTo timestamp of the newly put record. Two special values, -1 and + Long.MIN_VALUE carry specific meanings. -1 indicates that the + record that was put is the latest record version for its key, and therefore the + validTo timestamp is undefined. Long.MIN_VALUE indicates that the record + was not put, due to grace period having been exceeded.
        +
        Throws:
        +
        NullPointerException - If null is used for key.
        +
        InvalidStateStoreException - if the store is not initialized
        +
        +
        +
      • +
      • +
        +

        delete

        +
        VersionedRecord<V> delete(K key, + long timestamp)
        +
        Delete the value associated with this key from the store, at the specified timestamp + (if there is such a value), and return the deleted value. +

        + If the timestamp associated with this deletion is older than the store's grace period + (i.e., history retention) relative to the current observed stream time, then the deletion + will not be performed and null will be returned. +

        + As a consequence of the above, the way to delete a record version is not + to first call #get(key) or #get(key, timestamp) + and use the returned VersionedRecord.timestamp() in a call to this + delete(key, timestamp) method, as the returned timestamp may be older than + the store's grace period (i.e., history retention) and will therefore not take place. + Instead, you should pass a business logic inferred timestamp that specifies when + the delete actually happens. For example, it could be the timestamp of the currently + processed input record or the current stream time. +

        + This operation is semantically equivalent to #get(key, timestamp) + followed by #put(key, null, timestamp), with + a caveat that if the deletion timestamp is older than the store's grace period + (i.e., history retention) then the return value is always null, regardless + of what #get(key, timestamp) would return.

        +
        +
        Parameters:
        +
        key - The key
        +
        timestamp - The timestamp for this delete
        +
        Returns:
        +
        The value and timestamp of the record associated with this key as of + the deletion timestamp (inclusive), or null if no such record exists + (including if the deletion timestamp is older than this store's history + retention time, i.e., the store no longer contains data for the provided + timestamp). Note that the record timestamp r.timestamp() of the + returned VersionedRecord may be smaller than the provided deletion + timestamp.
        +
        Throws:
        +
        NullPointerException - If null is used for key.
        +
        InvalidStateStoreException - if the store is not initialized
        +
        +
        +
      • +
      • +
        +

        get

        +
        VersionedRecord<V> get(K key)
        +
        Get the current (i.e., latest by timestamp) record associated with this key.
        +
        +
        Parameters:
        +
        key - The key to fetch
        +
        Returns:
        +
        The value and timestamp of the current record associated with this key, or + null if there is no current record for this key.
        +
        Throws:
        +
        NullPointerException - If null is used for key.
        +
        InvalidStateStoreException - if the store is not initialized
        +
        +
        +
      • +
      • +
        +

        get

        +
        VersionedRecord<V> get(K key, + long asOfTimestamp)
        +
        Get the record associated with this key as of the specified timestamp (i.e., + the existing record with the largest timestamp not exceeding the provided + timestamp bound).
        +
        +
        Parameters:
        +
        key - The key to fetch
        +
        asOfTimestamp - The timestamp bound. This bound is inclusive; if a record + (for the specified key) exists with this timestamp, then + this is the record that will be returned.
        +
        Returns:
        +
        The value and timestamp of the record associated with this key + as of the provided timestamp, or null if no such record exists + (including if the provided timestamp bound is older than this store's history + retention time, i.e., the store no longer contains data for the provided + timestamp). Note that the record timestamp r.timestamp() of the + returned VersionedRecord may be smaller than the provided timestamp + bound. Additionally, if the latest record version for the key is eligible + for the provided timestamp bound, then that record will be returned even if + the timestamp bound is older than the store's history retention.
        +
        Throws:
        +
        NullPointerException - If null is used for key.
        +
        InvalidStateStoreException - if the store is not initialized
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/VersionedRecord.html b/static/41/javadoc/org/apache/kafka/streams/state/VersionedRecord.html new file mode 100644 index 000000000..343b736d9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/VersionedRecord.html @@ -0,0 +1,252 @@ + + + + +VersionedRecord (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class VersionedRecord<V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.state.VersionedRecord<V>
    +
    +
    +
    +
    Type Parameters:
    +
    V - The value type
    +
    +
    +
    public final class VersionedRecord<V> +extends Object
    +
    Combines a value (from a key-value record) with a timestamp, for use as the return type + from VersionedKeyValueStore.get(Object, long) and related methods.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        VersionedRecord

        +
        public VersionedRecord(V value, + long timestamp)
        +
        Create a new VersionedRecord instance. value cannot be null.
        +
        +
        Parameters:
        +
        value - The value
        +
        timestamp - The type of the result returned by this query.
        +
        +
        +
      • +
      • +
        +

        VersionedRecord

        +
        public VersionedRecord(V value, + long timestamp, + long validTo)
        +
        Create a new VersionedRecord instance. value cannot be null.
        +
        +
        Parameters:
        +
        value - The value
        +
        timestamp - The timestamp
        +
        validTo - The exclusive upper bound of the validity interval
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        value

        +
        public V value()
        +
        +
      • +
      • +
        +

        timestamp

        +
        public long timestamp()
        +
        +
      • +
      • +
        +

        validTo

        +
        public Optional<Long> validTo()
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/VersionedRecordIterator.html b/static/41/javadoc/org/apache/kafka/streams/state/VersionedRecordIterator.html new file mode 100644 index 000000000..95c5bd2c1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/VersionedRecordIterator.html @@ -0,0 +1,147 @@ + + + + +VersionedRecordIterator (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface VersionedRecordIterator<V>

    +
    +
    +
    +
    Type Parameters:
    +
    V - Type of values
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Closeable, Iterator<VersionedRecord<V>>
    +
    +
    +
    public interface VersionedRecordIterator<V> +extends Iterator<VersionedRecord<V>>, Closeable
    +
    Iterator interface of VersionedRecord. +

    + Users must call its close method explicitly upon completeness to release resources, + or use try-with-resources statement (available since JDK7) for this Closeable class. + Note that remove() is not supported.

    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      void
      + +
       
      +
      +
      +
      +
      +

      Methods inherited from interface java.util.Iterator

      +forEachRemaining, hasNext, next, remove
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/WindowBytesStoreSupplier.html b/static/41/javadoc/org/apache/kafka/streams/state/WindowBytesStoreSupplier.html new file mode 100644 index 000000000..6dfc593b9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/WindowBytesStoreSupplier.html @@ -0,0 +1,197 @@ + + + + +WindowBytesStoreSupplier (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface WindowBytesStoreSupplier

    +
    +
    +
    +
    All Superinterfaces:
    +
    StoreSupplier<WindowStore<org.apache.kafka.common.utils.Bytes,byte[]>>
    +
    +
    +
    public interface WindowBytesStoreSupplier +extends StoreSupplier<WindowStore<org.apache.kafka.common.utils.Bytes,byte[]>>
    +
    A store supplier that can be used to create one or more WindowStore<Byte, byte[]> instances of type <Byte, byte[]>. + + For any stores implementing the WindowStore<Byte, byte[]> interface, null value bytes are considered as "not exist". This means: + + 1. Null value bytes in put operations should be treated as delete. + 2. Null value bytes should never be returned in range query results.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      boolean
      + +
      +
      Whether or not this store is retaining duplicate keys.
      +
      +
      long
      + +
      +
      The time period for which the WindowStore will retain historic data.
      +
      +
      long
      + +
      +
      The size of the segments (in milliseconds) the store has.
      +
      +
      long
      + +
      +
      The size of the windows (in milliseconds) any store created from this supplier is creating.
      +
      +
      +
      +
      +
      +

      Methods inherited from interface org.apache.kafka.streams.state.StoreSupplier

      +get, metricsScope, name
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        segmentIntervalMs

        +
        long segmentIntervalMs()
        +
        The size of the segments (in milliseconds) the store has. + If your store is segmented then this should be the size of segments in the underlying store. + It is also used to reduce the amount of data that is scanned when caching is enabled.
        +
        +
        Returns:
        +
        size of the segments (in milliseconds)
        +
        +
        +
      • +
      • +
        +

        windowSize

        +
        long windowSize()
        +
        The size of the windows (in milliseconds) any store created from this supplier is creating.
        +
        +
        Returns:
        +
        window size
        +
        +
        +
      • +
      • +
        +

        retainDuplicates

        +
        boolean retainDuplicates()
        +
        Whether or not this store is retaining duplicate keys. + Usually only true if the store is being used for joins. + Note this should return false if caching is enabled.
        +
        +
        Returns:
        +
        true if duplicates should be retained
        +
        +
        +
      • +
      • +
        +

        retentionPeriod

        +
        long retentionPeriod()
        +
        The time period for which the WindowStore will retain historic data.
        +
        +
        Returns:
        +
        retentionPeriod
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/WindowStore.html b/static/41/javadoc/org/apache/kafka/streams/state/WindowStore.html new file mode 100644 index 000000000..38da19c68 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/WindowStore.html @@ -0,0 +1,552 @@ + + + + +WindowStore (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface WindowStore<K,V>

    +
    +
    +
    +
    Type Parameters:
    +
    K - Type of keys
    +
    V - Type of values
    +
    +
    +
    All Superinterfaces:
    +
    ReadOnlyWindowStore<K,V>, StateStore
    +
    +
    +
    All Known Subinterfaces:
    +
    TimestampedWindowStore<K,V>
    +
    +
    +
    public interface WindowStore<K,V> +extends StateStore, ReadOnlyWindowStore<K,V>
    +
    Interface for storing the aggregated values of fixed-size time windows. +

    + Note, that the stores' physical key type is Windowed<K>.

    +
    +
    + +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        put

        +
        void put(K key, + V value, + long windowStartTimestamp)
        +
        Put a key-value pair into the window with given window start timestamp +

        + If serialized value bytes are null it is interpreted as delete. Note that deletes will be + ignored in the case of an underlying store that retains duplicates.

        +
        +
        Parameters:
        +
        key - The key to associate the value to
        +
        value - The value; can be null
        +
        windowStartTimestamp - The timestamp of the beginning of the window to put the key/value into
        +
        Throws:
        +
        InvalidStateStoreException - if the store is not initialized
        +
        NullPointerException - if the given key is null
        +
        +
        +
      • +
      • +
        +

        fetch

        +
        WindowStoreIterator<V> fetch(K key, + long timeFrom, + long timeTo)
        +
        Get all the key-value pairs with the given key and the time range from all the existing windows. +

        + This iterator must be closed after use. +

        + The time range is inclusive and applies to the starting timestamp of the window. + For example, if we have the following windows: +

        + +-------------------------------+
        + |  key  | start time | end time |
        + +-------+------------+----------+
        + |   A   |     10     |    20    |
        + +-------+------------+----------+
        + |   A   |     15     |    25    |
        + +-------+------------+----------+
        + |   A   |     20     |    30    |
        + +-------+------------+----------+
        + |   A   |     25     |    35    |
        + +--------------------------------
        + 
        + And we call store.fetch("A", 10, 20) then the results will contain the first + three windows from the table above, i.e., all those where 10 <= start time <= 20. +

        + For each key, the iterator guarantees ordering of windows, starting from the oldest/earliest + available window to the newest/latest window.

        +
        +
        Parameters:
        +
        key - the key to fetch
        +
        timeFrom - time range start (inclusive)
        +
        timeTo - time range end (inclusive)
        +
        Returns:
        +
        an iterator over key-value pairs <timestamp, value>
        +
        Throws:
        +
        InvalidStateStoreException - if the store is not initialized
        +
        NullPointerException - if the given key is null
        +
        +
        +
      • +
      • +
        +

        fetch

        +
        default WindowStoreIterator<V> fetch(K key, + Instant timeFrom, + Instant timeTo) + throws IllegalArgumentException
        +
        Description copied from interface: ReadOnlyWindowStore
        +
        Get all the key-value pairs with the given key and the time range from all the existing windows. +

        + This iterator must be closed after use. +

        + The time range is inclusive and applies to the starting timestamp of the window. + For example, if we have the following windows: +

        + +-------------------------------+
        + |  key  | start time | end time |
        + +-------+------------+----------+
        + |   A   |     10     |    20    |
        + +-------+------------+----------+
        + |   A   |     15     |    25    |
        + +-------+------------+----------+
        + |   A   |     20     |    30    |
        + +-------+------------+----------+
        + |   A   |     25     |    35    |
        + +--------------------------------
        + 
        + And we call store.fetch("A", Instant.ofEpochMilli(10), Instant.ofEpochMilli(20)) then the results will contain the first + three windows from the table above, i.e., all those where 10 <= start time <= 20. +

        + For each key, the iterator guarantees ordering of windows, starting from the oldest/earliest + available window to the newest/latest window.

        +
        +
        Specified by:
        +
        fetch in interface ReadOnlyWindowStore<K,V>
        +
        Parameters:
        +
        key - the key to fetch
        +
        timeFrom - time range start (inclusive), where iteration starts.
        +
        timeTo - time range end (inclusive), where iteration ends.
        +
        Returns:
        +
        an iterator over key-value pairs <timestamp, value>, from beginning to end of time.
        +
        Throws:
        +
        IllegalArgumentException - if duration is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        backwardFetch

        +
        default WindowStoreIterator<V> backwardFetch(K key, + long timeFrom, + long timeTo)
        +
        +
      • +
      • +
        +

        backwardFetch

        +
        default WindowStoreIterator<V> backwardFetch(K key, + Instant timeFrom, + Instant timeTo) + throws IllegalArgumentException
        +
        Description copied from interface: ReadOnlyWindowStore
        +
        Get all the key-value pairs with the given key and the time range from all the existing windows + in backward order with respect to time (from end to beginning of time). +

        + This iterator must be closed after use. +

        + The time range is inclusive and applies to the starting timestamp of the window. + For example, if we have the following windows: +

        + +-------------------------------+
        + |  key  | start time | end time |
        + +-------+------------+----------+
        + |   A   |     10     |    20    |
        + +-------+------------+----------+
        + |   A   |     15     |    25    |
        + +-------+------------+----------+
        + |   A   |     20     |    30    |
        + +-------+------------+----------+
        + |   A   |     25     |    35    |
        + +--------------------------------
        + 
        + And we call store.backwardFetch("A", Instant.ofEpochMilli(10), Instant.ofEpochMilli(20)) then the + results will contain the first three windows from the table above in backward order, + i.e., all those where 10 <= start time <= 20. +

        + For each key, the iterator guarantees ordering of windows, starting from the newest/latest + available window to the oldest/earliest window.

        +
        +
        Specified by:
        +
        backwardFetch in interface ReadOnlyWindowStore<K,V>
        +
        Parameters:
        +
        key - the key to fetch
        +
        timeFrom - time range start (inclusive), where iteration ends.
        +
        timeTo - time range end (inclusive), where iteration starts.
        +
        Returns:
        +
        an iterator over key-value pairs <timestamp, value>, from end to beginning of time.
        +
        Throws:
        +
        IllegalArgumentException - if duration is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        fetch

        +
        KeyValueIterator<Windowed<K>,V> fetch(K keyFrom, + K keyTo, + long timeFrom, + long timeTo)
        +
        Get all the key-value pairs in the given key range and time range from all the existing windows. +

        + This iterator must be closed after use.

        +
        +
        Parameters:
        +
        keyFrom - the first key in the range + A null value indicates a starting position from the first element in the store.
        +
        keyTo - the last key in the range + A null value indicates that the range ends with the last element in the store.
        +
        timeFrom - time range start (inclusive)
        +
        timeTo - time range end (inclusive)
        +
        Returns:
        +
        an iterator over windowed key-value pairs <Windowed<K>, value>
        +
        Throws:
        +
        InvalidStateStoreException - if the store is not initialized
        +
        +
        +
      • +
      • +
        +

        fetch

        +
        default KeyValueIterator<Windowed<K>,V> fetch(K keyFrom, + K keyTo, + Instant timeFrom, + Instant timeTo) + throws IllegalArgumentException
        +
        Description copied from interface: ReadOnlyWindowStore
        +
        Get all the key-value pairs in the given key range and time range from all the existing windows. +

        + This iterator must be closed after use.

        +
        +
        Specified by:
        +
        fetch in interface ReadOnlyWindowStore<K,V>
        +
        Parameters:
        +
        keyFrom - the first key in the range + A null value indicates a starting position from the first element in the store.
        +
        keyTo - the last key in the range + A null value indicates that the range ends with the last element in the store.
        +
        timeFrom - time range start (inclusive), where iteration starts.
        +
        timeTo - time range end (inclusive), where iteration ends.
        +
        Returns:
        +
        an iterator over windowed key-value pairs <Windowed<K>, value>, from beginning to end of time.
        +
        Throws:
        +
        IllegalArgumentException - if duration is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        backwardFetch

        +
        default KeyValueIterator<Windowed<K>,V> backwardFetch(K keyFrom, + K keyTo, + long timeFrom, + long timeTo)
        +
        +
      • +
      • +
        +

        backwardFetch

        +
        default KeyValueIterator<Windowed<K>,V> backwardFetch(K keyFrom, + K keyTo, + Instant timeFrom, + Instant timeTo) + throws IllegalArgumentException
        +
        Description copied from interface: ReadOnlyWindowStore
        +
        Get all the key-value pairs in the given key range and time range from all the existing windows + in backward order with respect to time (from end to beginning of time). +

        + This iterator must be closed after use.

        +
        +
        Specified by:
        +
        backwardFetch in interface ReadOnlyWindowStore<K,V>
        +
        Parameters:
        +
        keyFrom - the first key in the range + A null value indicates a starting position from the first element in the store.
        +
        keyTo - the last key in the range + A null value indicates that the range ends with the last element in the store.
        +
        timeFrom - time range start (inclusive), where iteration ends.
        +
        timeTo - time range end (inclusive), where iteration starts.
        +
        Returns:
        +
        an iterator over windowed key-value pairs <Windowed<K>, value>, from end to beginning of time.
        +
        Throws:
        +
        IllegalArgumentException - if duration is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        fetchAll

        +
        KeyValueIterator<Windowed<K>,V> fetchAll(long timeFrom, + long timeTo)
        +
        Gets all the key-value pairs that belong to the windows within in the given time range.
        +
        +
        Parameters:
        +
        timeFrom - the beginning of the time slot from which to search (inclusive)
        +
        timeTo - the end of the time slot from which to search (inclusive)
        +
        Returns:
        +
        an iterator over windowed key-value pairs <Windowed<K>, value>
        +
        Throws:
        +
        InvalidStateStoreException - if the store is not initialized
        +
        +
        +
      • +
      • +
        +

        fetchAll

        +
        default KeyValueIterator<Windowed<K>,V> fetchAll(Instant timeFrom, + Instant timeTo) + throws IllegalArgumentException
        +
        Description copied from interface: ReadOnlyWindowStore
        +
        Gets all the key-value pairs that belong to the windows within in the given time range.
        +
        +
        Specified by:
        +
        fetchAll in interface ReadOnlyWindowStore<K,V>
        +
        Parameters:
        +
        timeFrom - the beginning of the time slot from which to search (inclusive), where iteration starts.
        +
        timeTo - the end of the time slot from which to search (inclusive), where iteration ends.
        +
        Returns:
        +
        an iterator over windowed key-value pairs <Windowed<K>, value>, from beginning to end of time.
        +
        Throws:
        +
        IllegalArgumentException - if duration is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      • +
        +

        backwardFetchAll

        +
        default KeyValueIterator<Windowed<K>,V> backwardFetchAll(long timeFrom, + long timeTo)
        +
        +
      • +
      • +
        +

        backwardFetchAll

        +
        default KeyValueIterator<Windowed<K>,V> backwardFetchAll(Instant timeFrom, + Instant timeTo) + throws IllegalArgumentException
        +
        Description copied from interface: ReadOnlyWindowStore
        +
        Gets all the key-value pairs that belong to the windows within in the given time range in backward order + with respect to time (from end to beginning of time).
        +
        +
        Specified by:
        +
        backwardFetchAll in interface ReadOnlyWindowStore<K,V>
        +
        Parameters:
        +
        timeFrom - the beginning of the time slot from which to search (inclusive), where iteration ends.
        +
        timeTo - the end of the time slot from which to search (inclusive), where iteration starts.
        +
        Returns:
        +
        a backward iterator over windowed key-value pairs <Windowed<K>, value>, from end to beginning of time.
        +
        Throws:
        +
        IllegalArgumentException - if duration is negative or can't be represented as long milliseconds
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/WindowStoreIterator.html b/static/41/javadoc/org/apache/kafka/streams/state/WindowStoreIterator.html new file mode 100644 index 000000000..a384353bb --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/WindowStoreIterator.html @@ -0,0 +1,152 @@ + + + + +WindowStoreIterator (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface WindowStoreIterator<V>

    +
    +
    +
    +
    Type Parameters:
    +
    V - Type of values
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Closeable, Iterator<KeyValue<Long,V>>, KeyValueIterator<Long,V>
    +
    +
    +
    public interface WindowStoreIterator<V> +extends KeyValueIterator<Long,V>, Closeable
    +
    Iterator interface of KeyValue with key typed Long used for WindowStore.fetch(Object, long, long) + and WindowStore.fetch(Object, Instant, Instant) + + Users must call its close method explicitly upon completeness to release resources, + or use try-with-resources statement (available since JDK7) for this Closeable class.
    +
    +
    + +
    +
    + +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/package-summary.html b/static/41/javadoc/org/apache/kafka/streams/state/package-summary.html new file mode 100644 index 000000000..106448d79 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/package-summary.html @@ -0,0 +1,278 @@ + + + + +org.apache.kafka.streams.state (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.streams.state

    +
    +
    +
    package org.apache.kafka.streams.state
    +
    +
    Provides interfaces for managing the intermediate state of a stateful streams application.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/state/package-tree.html b/static/41/javadoc/org/apache/kafka/streams/state/package-tree.html new file mode 100644 index 000000000..c1015a7ae --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/state/package-tree.html @@ -0,0 +1,180 @@ + + + + +org.apache.kafka.streams.state Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.streams.state

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/test/TestRecord.html b/static/41/javadoc/org/apache/kafka/streams/test/TestRecord.html new file mode 100644 index 000000000..ea04e9d78 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/test/TestRecord.html @@ -0,0 +1,440 @@ + + + + +TestRecord (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class TestRecord<K,V>

    +
    +
    java.lang.Object +
    org.apache.kafka.streams.test.TestRecord<K,V>
    +
    +
    +
    +
    public class TestRecord<K,V> +extends Object
    +
    A key/value pair, including timestamp and record headers, to be sent to or received from TopologyTestDriver. + If [a] record does not contain a timestamp, + TestInputTopic will auto advance it's time when the record is piped.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        TestRecord

        +
        public TestRecord(K key, + V value, + Headers headers, + Instant recordTime)
        +
        Creates a record.
        +
        +
        Parameters:
        +
        key - The key that will be included in the record
        +
        value - The value of the record
        +
        headers - the record headers that will be included in the record
        +
        recordTime - The timestamp of the record.
        +
        +
        +
      • +
      • +
        +

        TestRecord

        +
        public TestRecord(K key, + V value, + Headers headers, + Long timestampMs)
        +
        Creates a record.
        +
        +
        Parameters:
        +
        key - The key that will be included in the record
        +
        value - The value of the record
        +
        headers - the record headers that will be included in the record
        +
        timestampMs - The timestamp of the record, in milliseconds since the beginning of the epoch.
        +
        +
        +
      • +
      • +
        +

        TestRecord

        +
        public TestRecord(K key, + V value, + Instant recordTime)
        +
        Creates a record.
        +
        +
        Parameters:
        +
        key - The key of the record
        +
        value - The value of the record
        +
        recordTime - The timestamp of the record as Instant.
        +
        +
        +
      • +
      • +
        +

        TestRecord

        +
        public TestRecord(K key, + V value, + Headers headers)
        +
        Creates a record.
        +
        +
        Parameters:
        +
        key - The key of the record
        +
        value - The value of the record
        +
        headers - The record headers that will be included in the record
        +
        +
        +
      • +
      • +
        +

        TestRecord

        +
        public TestRecord(K key, + V value)
        +
        Creates a record.
        +
        +
        Parameters:
        +
        key - The key of the record
        +
        value - The value of the record
        +
        +
        +
      • +
      • +
        +

        TestRecord

        +
        public TestRecord(V value)
        +
        Create a record with null key.
        +
        +
        Parameters:
        +
        value - The value of the record
        +
        +
        +
      • +
      • +
        +

        TestRecord

        +
        public TestRecord(ConsumerRecord<K,V> record)
        +
        Create a TestRecord from a ConsumerRecord.
        +
        +
        Parameters:
        +
        record - The v
        +
        +
        +
      • +
      • +
        +

        TestRecord

        +
        public TestRecord(ProducerRecord<K,V> record)
        +
        Create a TestRecord from a ProducerRecord.
        +
        +
        Parameters:
        +
        record - The record contents
        +
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        headers

        +
        public Headers headers()
        +
        +
        Returns:
        +
        The headers.
        +
        +
        +
      • +
      • +
        +

        key

        +
        public K key()
        +
        +
        Returns:
        +
        The key (or null if no key is specified).
        +
        +
        +
      • +
      • +
        +

        value

        +
        public V value()
        +
        +
        Returns:
        +
        The value.
        +
        +
        +
      • +
      • +
        +

        timestamp

        +
        public Long timestamp()
        +
        +
        Returns:
        +
        The timestamp, which is in milliseconds since epoch.
        +
        +
        +
      • +
      • +
        +

        getHeaders

        +
        public Headers getHeaders()
        +
        +
        Returns:
        +
        The headers.
        +
        +
        +
      • +
      • +
        +

        getKey

        +
        public K getKey()
        +
        +
        Returns:
        +
        The key (or null if no key is specified)
        +
        +
        +
      • +
      • +
        +

        getValue

        +
        public V getValue()
        +
        +
        Returns:
        +
        The value.
        +
        +
        +
      • +
      • +
        +

        getRecordTime

        +
        public Instant getRecordTime()
        +
        +
        Returns:
        +
        The timestamp.
        +
        +
        +
      • +
      • +
        +

        toString

        +
        public String toString()
        +
        +
        Overrides:
        +
        toString in class Object
        +
        +
        +
      • +
      • +
        +

        equals

        +
        public boolean equals(Object o)
        +
        +
        Overrides:
        +
        equals in class Object
        +
        +
        +
      • +
      • +
        +

        hashCode

        +
        public int hashCode()
        +
        +
        Overrides:
        +
        hashCode in class Object
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/test/package-summary.html b/static/41/javadoc/org/apache/kafka/streams/test/package-summary.html new file mode 100644 index 000000000..8ee6e4e20 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/test/package-summary.html @@ -0,0 +1,120 @@ + + + + +org.apache.kafka.streams.test (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.streams.test

    +
    +
    +
    package org.apache.kafka.streams.test
    +
    +
    Provides classes for testing Kafka Streams applications with mocked inputs.
    +
    +
    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/streams/test/package-tree.html b/static/41/javadoc/org/apache/kafka/streams/test/package-tree.html new file mode 100644 index 000000000..fa3e7cd1e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/streams/test/package-tree.html @@ -0,0 +1,71 @@ + + + + +org.apache.kafka.streams.test Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.streams.test

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/tools/api/Decoder.html b/static/41/javadoc/org/apache/kafka/tools/api/Decoder.html new file mode 100644 index 000000000..f30c7adc1 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/tools/api/Decoder.html @@ -0,0 +1,134 @@ + + + + +Decoder (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface Decoder<T>

    +
    +
    +
    +
    All Known Implementing Classes:
    +
    DefaultDecoder, IntegerDecoder, LongDecoder, StringDecoder
    +
    +
    +
    Functional Interface:
    +
    This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.
    +
    +
    +
    @FunctionalInterface +public interface Decoder<T>
    +
    A decoder is a method of turning byte arrays into objects.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      fromBytes(byte[] bytes)
      +
       
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        fromBytes

        +
        T fromBytes(byte[] bytes)
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/tools/api/DefaultDecoder.html b/static/41/javadoc/org/apache/kafka/tools/api/DefaultDecoder.html new file mode 100644 index 000000000..05e4d83c9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/tools/api/DefaultDecoder.html @@ -0,0 +1,168 @@ + + + + +DefaultDecoder (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class DefaultDecoder

    +
    +
    java.lang.Object +
    org.apache.kafka.tools.api.DefaultDecoder
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Decoder<byte[]>
    +
    +
    +
    public class DefaultDecoder +extends Object +implements Decoder<byte[]>
    +
    The default implementation does nothing, just returns the same byte array it takes in.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        DefaultDecoder

        +
        public DefaultDecoder()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        fromBytes

        +
        public byte[] fromBytes(byte[] bytes)
        +
        +
        Specified by:
        +
        fromBytes in interface Decoder<byte[]>
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/tools/api/IntegerDecoder.html b/static/41/javadoc/org/apache/kafka/tools/api/IntegerDecoder.html new file mode 100644 index 000000000..1be0db158 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/tools/api/IntegerDecoder.html @@ -0,0 +1,168 @@ + + + + +IntegerDecoder (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class IntegerDecoder

    +
    +
    java.lang.Object +
    org.apache.kafka.tools.api.IntegerDecoder
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Decoder<Integer>
    +
    +
    +
    public class IntegerDecoder +extends Object +implements Decoder<Integer>
    +
    The integer decoder translates bytes into integers.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        IntegerDecoder

        +
        public IntegerDecoder()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/tools/api/LongDecoder.html b/static/41/javadoc/org/apache/kafka/tools/api/LongDecoder.html new file mode 100644 index 000000000..8247d76de --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/tools/api/LongDecoder.html @@ -0,0 +1,168 @@ + + + + +LongDecoder (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class LongDecoder

    +
    +
    java.lang.Object +
    org.apache.kafka.tools.api.LongDecoder
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Decoder<Long>
    +
    +
    +
    public class LongDecoder +extends Object +implements Decoder<Long>
    +
    The long decoder translates bytes into longs.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        LongDecoder

        +
        public LongDecoder()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        fromBytes

        +
        public Long fromBytes(byte[] bytes)
        +
        +
        Specified by:
        +
        fromBytes in interface Decoder<Long>
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/tools/api/RecordReader.html b/static/41/javadoc/org/apache/kafka/tools/api/RecordReader.html new file mode 100644 index 000000000..c2a8ce6e9 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/tools/api/RecordReader.html @@ -0,0 +1,179 @@ + + + + +RecordReader (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Interface RecordReader

    +
    +
    +
    +
    All Superinterfaces:
    +
    AutoCloseable, Closeable, Configurable
    +
    +
    +
    public interface RecordReader +extends Closeable, Configurable
    +
    Typical implementations of this interface convert data from an `InputStream` received via `readRecords` into a + iterator of `ProducerRecord` instance. Note that implementations must have a public nullary constructor. + + This is used by the `org.apache.kafka.tools.ConsoleProducer`.
    +
    +
    +
      + +
    • +
      +

      Method Summary

      +
      +
      +
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      +
      default void
      + +
      +
      Closes this reader.
      +
      +
      default void
      +
      configure(Map<String,?> configs)
      +
      +
      Configure this class with the given key-value pairs
      +
      +
      Iterator<ProducerRecord<byte[],byte[]>>
      +
      readRecords(InputStream inputStream)
      +
      +
      read byte array from input stream and then generate an iterator of producer record
      +
      +
      +
      +
      +
      +
    • +
    +
    +
    +
      + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        configure

        +
        default void configure(Map<String,?> configs)
        +
        Description copied from interface: Configurable
        +
        Configure this class with the given key-value pairs
        +
        +
        Specified by:
        +
        configure in interface Configurable
        +
        +
        +
      • +
      • +
        +

        readRecords

        +
        Iterator<ProducerRecord<byte[],byte[]>> readRecords(InputStream inputStream)
        +
        read byte array from input stream and then generate an iterator of producer record
        +
        +
        Parameters:
        +
        inputStream - InputStream of messages. the implementation does not need to close the input stream.
        +
        Returns:
        +
        an iterator of producer record. It should implement following rules. 1) the hasNext() method must be idempotent. + 2) the convert error should be thrown by next() method.
        +
        +
        +
      • +
      • +
        +

        close

        +
        default void close()
        +
        Closes this reader. + This method is invoked if the iterator from readRecords either has no more records or throws exception.
        +
        +
        Specified by:
        +
        close in interface AutoCloseable
        +
        Specified by:
        +
        close in interface Closeable
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/tools/api/StringDecoder.html b/static/41/javadoc/org/apache/kafka/tools/api/StringDecoder.html new file mode 100644 index 000000000..dc6c3cc28 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/tools/api/StringDecoder.html @@ -0,0 +1,168 @@ + + + + +StringDecoder (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class StringDecoder

    +
    +
    java.lang.Object +
    org.apache.kafka.tools.api.StringDecoder
    +
    +
    +
    +
    All Implemented Interfaces:
    +
    Decoder<String>
    +
    +
    +
    public class StringDecoder +extends Object +implements Decoder<String>
    +
    The string decoder translates bytes into strings. It uses UTF8 by default.
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        StringDecoder

        +
        public StringDecoder()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      + +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/tools/api/package-summary.html b/static/41/javadoc/org/apache/kafka/tools/api/package-summary.html new file mode 100644 index 000000000..12dcad49e --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/tools/api/package-summary.html @@ -0,0 +1,114 @@ + + + + +org.apache.kafka.tools.api (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Package org.apache.kafka.tools.api

    +
    +
    +
    package org.apache.kafka.tools.api
    +
    +
    Provides interfaces for writing plugins of kafka tools
    +
    +
    +
      +
    • +
      +
      +
      +
      +
      Class
      +
      Description
      + +
      +
      A decoder is a method of turning byte arrays into objects.
      +
      + +
      +
      The default implementation does nothing, just returns the same byte array it takes in.
      +
      + +
      +
      The integer decoder translates bytes into integers.
      +
      + +
      +
      The long decoder translates bytes into longs.
      +
      + +
      +
      Typical implementations of this interface convert data from an `InputStream` received via `readRecords` into a + iterator of `ProducerRecord` instance.
      +
      + +
      +
      The string decoder translates bytes into strings.
      +
      +
      +
      +
      +
    • +
    +
    +
    +
    +
    + + diff --git a/static/41/javadoc/org/apache/kafka/tools/api/package-tree.html b/static/41/javadoc/org/apache/kafka/tools/api/package-tree.html new file mode 100644 index 000000000..a060d2cb4 --- /dev/null +++ b/static/41/javadoc/org/apache/kafka/tools/api/package-tree.html @@ -0,0 +1,94 @@ + + + + +org.apache.kafka.tools.api Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For Package org.apache.kafka.tools.api

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/overview-summary.html b/static/41/javadoc/overview-summary.html new file mode 100644 index 000000000..1199e3eea --- /dev/null +++ b/static/41/javadoc/overview-summary.html @@ -0,0 +1,25 @@ + + + + +kafka 4.1.0 API + + + + + + + + + + +
    + +

    index.html

    +
    + + diff --git a/static/41/javadoc/overview-tree.html b/static/41/javadoc/overview-tree.html new file mode 100644 index 000000000..38e2f20c7 --- /dev/null +++ b/static/41/javadoc/overview-tree.html @@ -0,0 +1,1452 @@ + + + + +Class Hierarchy (kafka 4.1.0 API) + + + + + + + + + + + + + + +
    + +
    +
    +
    +

    Hierarchy For All Packages

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    +
    +

    Interface Hierarchy

    + +
    +
    +

    Annotation Interface Hierarchy

    + +
    +
    +

    Enum Class Hierarchy

    + +
    +
    +
    +
    + + diff --git a/static/41/javadoc/package-search-index.js b/static/41/javadoc/package-search-index.js new file mode 100644 index 000000000..aeef0c1ed --- /dev/null +++ b/static/41/javadoc/package-search-index.js @@ -0,0 +1 @@ +packageSearchIndex = [{"l":"All Packages","u":"allpackages-index.html"},{"l":"org.apache.kafka.clients.admin"},{"l":"org.apache.kafka.clients.consumer"},{"l":"org.apache.kafka.clients.producer"},{"l":"org.apache.kafka.common"},{"l":"org.apache.kafka.common.acl"},{"l":"org.apache.kafka.common.annotation"},{"l":"org.apache.kafka.common.config"},{"l":"org.apache.kafka.common.config.provider"},{"l":"org.apache.kafka.common.errors"},{"l":"org.apache.kafka.common.header"},{"l":"org.apache.kafka.common.metrics"},{"l":"org.apache.kafka.common.metrics.stats"},{"l":"org.apache.kafka.common.quota"},{"l":"org.apache.kafka.common.resource"},{"l":"org.apache.kafka.common.security.auth"},{"l":"org.apache.kafka.common.security.oauthbearer"},{"l":"org.apache.kafka.common.security.plain"},{"l":"org.apache.kafka.common.security.scram"},{"l":"org.apache.kafka.common.security.token.delegation"},{"l":"org.apache.kafka.common.serialization"},{"l":"org.apache.kafka.connect.components"},{"l":"org.apache.kafka.connect.connector"},{"l":"org.apache.kafka.connect.connector.policy"},{"l":"org.apache.kafka.connect.data"},{"l":"org.apache.kafka.connect.errors"},{"l":"org.apache.kafka.connect.header"},{"l":"org.apache.kafka.connect.health"},{"l":"org.apache.kafka.connect.mirror"},{"l":"org.apache.kafka.connect.rest"},{"l":"org.apache.kafka.connect.sink"},{"l":"org.apache.kafka.connect.source"},{"l":"org.apache.kafka.connect.storage"},{"l":"org.apache.kafka.connect.tools"},{"l":"org.apache.kafka.connect.transforms"},{"l":"org.apache.kafka.connect.transforms.predicates"},{"l":"org.apache.kafka.connect.util"},{"l":"org.apache.kafka.coordinator.group.api.assignor"},{"l":"org.apache.kafka.server.authorizer"},{"l":"org.apache.kafka.server.log.remote.storage"},{"l":"org.apache.kafka.server.policy"},{"l":"org.apache.kafka.server.quota"},{"l":"org.apache.kafka.server.telemetry"},{"l":"org.apache.kafka.streams"},{"l":"org.apache.kafka.streams.errors"},{"l":"org.apache.kafka.streams.kstream"},{"l":"org.apache.kafka.streams.processor"},{"l":"org.apache.kafka.streams.processor.api"},{"l":"org.apache.kafka.streams.processor.assignment"},{"l":"org.apache.kafka.streams.processor.assignment.assignors"},{"l":"org.apache.kafka.streams.query"},{"l":"org.apache.kafka.streams.state"},{"l":"org.apache.kafka.streams.test"},{"l":"org.apache.kafka.tools.api"}];updateSearchResults(); \ No newline at end of file diff --git a/static/41/javadoc/resources/glass.png b/static/41/javadoc/resources/glass.png new file mode 100644 index 000000000..a7f591f46 Binary files /dev/null and b/static/41/javadoc/resources/glass.png differ diff --git a/static/41/javadoc/resources/x.png b/static/41/javadoc/resources/x.png new file mode 100644 index 000000000..30548a756 Binary files /dev/null and b/static/41/javadoc/resources/x.png differ diff --git a/static/41/javadoc/script-dir/jquery-3.7.1.min.js b/static/41/javadoc/script-dir/jquery-3.7.1.min.js new file mode 100644 index 000000000..7f37b5d99 --- /dev/null +++ b/static/41/javadoc/script-dir/jquery-3.7.1.min.js @@ -0,0 +1,2 @@ +/*! jQuery v3.7.1 | (c) OpenJS Foundation and other contributors | jquery.org/license */ +!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(ie,e){"use strict";var oe=[],r=Object.getPrototypeOf,ae=oe.slice,g=oe.flat?function(e){return oe.flat.call(e)}:function(e){return oe.concat.apply([],e)},s=oe.push,se=oe.indexOf,n={},i=n.toString,ue=n.hasOwnProperty,o=ue.toString,a=o.call(Object),le={},v=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType&&"function"!=typeof e.item},y=function(e){return null!=e&&e===e.window},C=ie.document,u={type:!0,src:!0,nonce:!0,noModule:!0};function m(e,t,n){var r,i,o=(n=n||C).createElement("script");if(o.text=e,t)for(r in u)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function x(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[i.call(e)]||"object":typeof e}var t="3.7.1",l=/HTML$/i,ce=function(e,t){return new ce.fn.init(e,t)};function c(e){var t=!!e&&"length"in e&&e.length,n=x(e);return!v(e)&&!y(e)&&("array"===n||0===t||"number"==typeof t&&0+~]|"+ge+")"+ge+"*"),x=new RegExp(ge+"|>"),j=new RegExp(g),A=new RegExp("^"+t+"$"),D={ID:new RegExp("^#("+t+")"),CLASS:new RegExp("^\\.("+t+")"),TAG:new RegExp("^("+t+"|[*])"),ATTR:new RegExp("^"+p),PSEUDO:new RegExp("^"+g),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+ge+"*(even|odd|(([+-]|)(\\d*)n|)"+ge+"*(?:([+-]|)"+ge+"*(\\d+)|))"+ge+"*\\)|)","i"),bool:new RegExp("^(?:"+f+")$","i"),needsContext:new RegExp("^"+ge+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+ge+"*((?:-\\d)?\\d*)"+ge+"*\\)|)(?=[^-]|$)","i")},N=/^(?:input|select|textarea|button)$/i,q=/^h\d$/i,L=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,H=/[+~]/,O=new RegExp("\\\\[\\da-fA-F]{1,6}"+ge+"?|\\\\([^\\r\\n\\f])","g"),P=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},M=function(){V()},R=J(function(e){return!0===e.disabled&&fe(e,"fieldset")},{dir:"parentNode",next:"legend"});try{k.apply(oe=ae.call(ye.childNodes),ye.childNodes),oe[ye.childNodes.length].nodeType}catch(e){k={apply:function(e,t){me.apply(e,ae.call(t))},call:function(e){me.apply(e,ae.call(arguments,1))}}}function I(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(V(e),e=e||T,C)){if(11!==p&&(u=L.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return k.call(n,a),n}else if(f&&(a=f.getElementById(i))&&I.contains(e,a)&&a.id===i)return k.call(n,a),n}else{if(u[2])return k.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&e.getElementsByClassName)return k.apply(n,e.getElementsByClassName(i)),n}if(!(h[t+" "]||d&&d.test(t))){if(c=t,f=e,1===p&&(x.test(t)||m.test(t))){(f=H.test(t)&&U(e.parentNode)||e)==e&&le.scope||((s=e.getAttribute("id"))?s=ce.escapeSelector(s):e.setAttribute("id",s=S)),o=(l=Y(t)).length;while(o--)l[o]=(s?"#"+s:":scope")+" "+Q(l[o]);c=l.join(",")}try{return k.apply(n,f.querySelectorAll(c)),n}catch(e){h(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return re(t.replace(ve,"$1"),e,n,r)}function W(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function F(e){return e[S]=!0,e}function $(e){var t=T.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function B(t){return function(e){return fe(e,"input")&&e.type===t}}function _(t){return function(e){return(fe(e,"input")||fe(e,"button"))&&e.type===t}}function z(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&R(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function X(a){return F(function(o){return o=+o,F(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function U(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}function V(e){var t,n=e?e.ownerDocument||e:ye;return n!=T&&9===n.nodeType&&n.documentElement&&(r=(T=n).documentElement,C=!ce.isXMLDoc(T),i=r.matches||r.webkitMatchesSelector||r.msMatchesSelector,r.msMatchesSelector&&ye!=T&&(t=T.defaultView)&&t.top!==t&&t.addEventListener("unload",M),le.getById=$(function(e){return r.appendChild(e).id=ce.expando,!T.getElementsByName||!T.getElementsByName(ce.expando).length}),le.disconnectedMatch=$(function(e){return i.call(e,"*")}),le.scope=$(function(){return T.querySelectorAll(":scope")}),le.cssHas=$(function(){try{return T.querySelector(":has(*,:jqfake)"),!1}catch(e){return!0}}),le.getById?(b.filter.ID=function(e){var t=e.replace(O,P);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&C){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(O,P);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&C){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):t.querySelectorAll(e)},b.find.CLASS=function(e,t){if("undefined"!=typeof t.getElementsByClassName&&C)return t.getElementsByClassName(e)},d=[],$(function(e){var t;r.appendChild(e).innerHTML="",e.querySelectorAll("[selected]").length||d.push("\\["+ge+"*(?:value|"+f+")"),e.querySelectorAll("[id~="+S+"-]").length||d.push("~="),e.querySelectorAll("a#"+S+"+*").length||d.push(".#.+[+~]"),e.querySelectorAll(":checked").length||d.push(":checked"),(t=T.createElement("input")).setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),r.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&d.push(":enabled",":disabled"),(t=T.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||d.push("\\["+ge+"*name"+ge+"*="+ge+"*(?:''|\"\")")}),le.cssHas||d.push(":has"),d=d.length&&new RegExp(d.join("|")),l=function(e,t){if(e===t)return a=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!le.sortDetached&&t.compareDocumentPosition(e)===n?e===T||e.ownerDocument==ye&&I.contains(ye,e)?-1:t===T||t.ownerDocument==ye&&I.contains(ye,t)?1:o?se.call(o,e)-se.call(o,t):0:4&n?-1:1)}),T}for(e in I.matches=function(e,t){return I(e,null,null,t)},I.matchesSelector=function(e,t){if(V(e),C&&!h[t+" "]&&(!d||!d.test(t)))try{var n=i.call(e,t);if(n||le.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){h(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(O,P),e[3]=(e[3]||e[4]||e[5]||"").replace(O,P),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||I.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&I.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return D.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&j.test(n)&&(t=Y(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(O,P).toLowerCase();return"*"===e?function(){return!0}:function(e){return fe(e,t)}},CLASS:function(e){var t=s[e+" "];return t||(t=new RegExp("(^|"+ge+")"+e+"("+ge+"|$)"))&&s(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=I.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function T(e,n,r){return v(n)?ce.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?ce.grep(e,function(e){return e===n!==r}):"string"!=typeof n?ce.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(ce.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||k,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:S.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof ce?t[0]:t,ce.merge(this,ce.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:C,!0)),w.test(r[1])&&ce.isPlainObject(t))for(r in t)v(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=C.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):v(e)?void 0!==n.ready?n.ready(e):e(ce):ce.makeArray(e,this)}).prototype=ce.fn,k=ce(C);var E=/^(?:parents|prev(?:Until|All))/,j={children:!0,contents:!0,next:!0,prev:!0};function A(e,t){while((e=e[t])&&1!==e.nodeType);return e}ce.fn.extend({has:function(e){var t=ce(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,Ce=/^$|^module$|\/(?:java|ecma)script/i;xe=C.createDocumentFragment().appendChild(C.createElement("div")),(be=C.createElement("input")).setAttribute("type","radio"),be.setAttribute("checked","checked"),be.setAttribute("name","t"),xe.appendChild(be),le.checkClone=xe.cloneNode(!0).cloneNode(!0).lastChild.checked,xe.innerHTML="",le.noCloneChecked=!!xe.cloneNode(!0).lastChild.defaultValue,xe.innerHTML="",le.option=!!xe.lastChild;var ke={thead:[1,"","
    "],col:[2,"","
    "],tr:[2,"","
    "],td:[3,"","
    "],_default:[0,"",""]};function Se(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&fe(e,t)?ce.merge([e],n):n}function Ee(e,t){for(var n=0,r=e.length;n",""]);var je=/<|&#?\w+;/;function Ae(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\s*$/g;function Re(e,t){return fe(e,"table")&&fe(11!==t.nodeType?t:t.firstChild,"tr")&&ce(e).children("tbody")[0]||e}function Ie(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function We(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Fe(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(_.hasData(e)&&(s=_.get(e).events))for(i in _.remove(t,"handle events"),s)for(n=0,r=s[i].length;n").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),C.head.appendChild(r[0])},abort:function(){i&&i()}}});var Jt,Kt=[],Zt=/(=)\?(?=&|$)|\?\?/;ce.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Kt.pop()||ce.expando+"_"+jt.guid++;return this[e]=!0,e}}),ce.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Zt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Zt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=v(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Zt,"$1"+r):!1!==e.jsonp&&(e.url+=(At.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||ce.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=ie[r],ie[r]=function(){o=arguments},n.always(function(){void 0===i?ce(ie).removeProp(r):ie[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Kt.push(r)),o&&v(i)&&i(o[0]),o=i=void 0}),"script"}),le.createHTMLDocument=((Jt=C.implementation.createHTMLDocument("").body).innerHTML="
    ",2===Jt.childNodes.length),ce.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(le.createHTMLDocument?((r=(t=C.implementation.createHTMLDocument("")).createElement("base")).href=C.location.href,t.head.appendChild(r)):t=C),o=!n&&[],(i=w.exec(e))?[t.createElement(i[1])]:(i=Ae([e],t,o),o&&o.length&&ce(o).remove(),ce.merge([],i.childNodes)));var r,i,o},ce.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(ce.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},ce.expr.pseudos.animated=function(t){return ce.grep(ce.timers,function(e){return t===e.elem}).length},ce.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=ce.css(e,"position"),c=ce(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=ce.css(e,"top"),u=ce.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),v(t)&&(t=t.call(e,n,ce.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},ce.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){ce.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===ce.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===ce.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=ce(e).offset()).top+=ce.css(e,"borderTopWidth",!0),i.left+=ce.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-ce.css(r,"marginTop",!0),left:t.left-i.left-ce.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===ce.css(e,"position"))e=e.offsetParent;return e||J})}}),ce.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;ce.fn[t]=function(e){return M(this,function(e,t,n){var r;if(y(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),ce.each(["top","left"],function(e,n){ce.cssHooks[n]=Ye(le.pixelPosition,function(e,t){if(t)return t=Ge(e,n),_e.test(t)?ce(e).position()[n]+"px":t})}),ce.each({Height:"height",Width:"width"},function(a,s){ce.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){ce.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return M(this,function(e,t,n){var r;return y(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?ce.css(e,t,i):ce.style(e,t,n,i)},s,n?e:void 0,n)}})}),ce.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){ce.fn[t]=function(e){return this.on(t,e)}}),ce.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.on("mouseenter",e).on("mouseleave",t||e)}}),ce.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){ce.fn[n]=function(e,t){return 0",options:{classes:{},disabled:!1,create:null},_createWidget:function(t,e){e=x(e||this.defaultElement||this)[0],this.element=x(e),this.uuid=i++,this.eventNamespace="."+this.widgetName+this.uuid,this.bindings=x(),this.hoverable=x(),this.focusable=x(),this.classesElementLookup={},e!==this&&(x.data(e,this.widgetFullName,this),this._on(!0,this.element,{remove:function(t){t.target===e&&this.destroy()}}),this.document=x(e.style?e.ownerDocument:e.document||e),this.window=x(this.document[0].defaultView||this.document[0].parentWindow)),this.options=x.widget.extend({},this.options,this._getCreateOptions(),t),this._create(),this.options.disabled&&this._setOptionDisabled(this.options.disabled),this._trigger("create",null,this._getCreateEventData()),this._init()},_getCreateOptions:function(){return{}},_getCreateEventData:x.noop,_create:x.noop,_init:x.noop,destroy:function(){var i=this;this._destroy(),x.each(this.classesElementLookup,function(t,e){i._removeClass(e,t)}),this.element.off(this.eventNamespace).removeData(this.widgetFullName),this.widget().off(this.eventNamespace).removeAttr("aria-disabled"),this.bindings.off(this.eventNamespace)},_destroy:x.noop,widget:function(){return this.element},option:function(t,e){var i,s,n,o=t;if(0===arguments.length)return x.widget.extend({},this.options);if("string"==typeof t)if(o={},t=(i=t.split(".")).shift(),i.length){for(s=o[t]=x.widget.extend({},this.options[t]),n=0;n
    "),i=e.children()[0];return x("body").append(e),t=i.offsetWidth,e.css("overflow","scroll"),t===(i=i.offsetWidth)&&(i=e[0].clientWidth),e.remove(),s=t-i},getScrollInfo:function(t){var e=t.isWindow||t.isDocument?"":t.element.css("overflow-x"),i=t.isWindow||t.isDocument?"":t.element.css("overflow-y"),e="scroll"===e||"auto"===e&&t.widthC(E(s),E(n))?o.important="horizontal":o.important="vertical",c.using.call(this,t,o)}),l.offset(x.extend(u,{using:t}))})},x.ui.position={fit:{left:function(t,e){var i=e.within,s=i.isWindow?i.scrollLeft:i.offset.left,n=i.width,o=t.left-e.collisionPosition.marginLeft,l=s-o,a=o+e.collisionWidth-n-s;e.collisionWidth>n?0n?0",delay:300,options:{icons:{submenu:"ui-icon-caret-1-e"},items:"> *",menus:"ul",position:{my:"left top",at:"right top"},role:"menu",blur:null,focus:null,select:null},_create:function(){this.activeMenu=this.element,this.mouseHandled=!1,this.lastMousePosition={x:null,y:null},this.element.uniqueId().attr({role:this.options.role,tabIndex:0}),this._addClass("ui-menu","ui-widget ui-widget-content"),this._on({"mousedown .ui-menu-item":function(t){t.preventDefault(),this._activateItem(t)},"click .ui-menu-item":function(t){var e=x(t.target),i=x(x.ui.safeActiveElement(this.document[0]));!this.mouseHandled&&e.not(".ui-state-disabled").length&&(this.select(t),t.isPropagationStopped()||(this.mouseHandled=!0),e.has(".ui-menu").length?this.expand(t):!this.element.is(":focus")&&i.closest(".ui-menu").length&&(this.element.trigger("focus",[!0]),this.active&&1===this.active.parents(".ui-menu").length&&clearTimeout(this.timer)))},"mouseenter .ui-menu-item":"_activateItem","mousemove .ui-menu-item":"_activateItem",mouseleave:"collapseAll","mouseleave .ui-menu":"collapseAll",focus:function(t,e){var i=this.active||this._menuItems().first();e||this.focus(t,i)},blur:function(t){this._delay(function(){x.contains(this.element[0],x.ui.safeActiveElement(this.document[0]))||this.collapseAll(t)})},keydown:"_keydown"}),this.refresh(),this._on(this.document,{click:function(t){this._closeOnDocumentClick(t)&&this.collapseAll(t,!0),this.mouseHandled=!1}})},_activateItem:function(t){var e,i;this.previousFilter||t.clientX===this.lastMousePosition.x&&t.clientY===this.lastMousePosition.y||(this.lastMousePosition={x:t.clientX,y:t.clientY},e=x(t.target).closest(".ui-menu-item"),i=x(t.currentTarget),e[0]===i[0]&&(i.is(".ui-state-active")||(this._removeClass(i.siblings().children(".ui-state-active"),null,"ui-state-active"),this.focus(t,i))))},_destroy:function(){var t=this.element.find(".ui-menu-item").removeAttr("role aria-disabled").children(".ui-menu-item-wrapper").removeUniqueId().removeAttr("tabIndex role aria-haspopup");this.element.removeAttr("aria-activedescendant").find(".ui-menu").addBack().removeAttr("role aria-labelledby aria-expanded aria-hidden aria-disabled tabIndex").removeUniqueId().show(),t.children().each(function(){var t=x(this);t.data("ui-menu-submenu-caret")&&t.remove()})},_keydown:function(t){var e,i,s,n=!0;switch(t.keyCode){case x.ui.keyCode.PAGE_UP:this.previousPage(t);break;case x.ui.keyCode.PAGE_DOWN:this.nextPage(t);break;case x.ui.keyCode.HOME:this._move("first","first",t);break;case x.ui.keyCode.END:this._move("last","last",t);break;case x.ui.keyCode.UP:this.previous(t);break;case x.ui.keyCode.DOWN:this.next(t);break;case x.ui.keyCode.LEFT:this.collapse(t);break;case x.ui.keyCode.RIGHT:this.active&&!this.active.is(".ui-state-disabled")&&this.expand(t);break;case x.ui.keyCode.ENTER:case x.ui.keyCode.SPACE:this._activate(t);break;case x.ui.keyCode.ESCAPE:this.collapse(t);break;default:e=this.previousFilter||"",s=n=!1,i=96<=t.keyCode&&t.keyCode<=105?(t.keyCode-96).toString():String.fromCharCode(t.keyCode),clearTimeout(this.filterTimer),i===e?s=!0:i=e+i,e=this._filterMenuItems(i),(e=s&&-1!==e.index(this.active.next())?this.active.nextAll(".ui-menu-item"):e).length||(i=String.fromCharCode(t.keyCode),e=this._filterMenuItems(i)),e.length?(this.focus(t,e),this.previousFilter=i,this.filterTimer=this._delay(function(){delete this.previousFilter},1e3)):delete this.previousFilter}n&&t.preventDefault()},_activate:function(t){this.active&&!this.active.is(".ui-state-disabled")&&(this.active.children("[aria-haspopup='true']").length?this.expand(t):this.select(t))},refresh:function(){var t,e,s=this,n=this.options.icons.submenu,i=this.element.find(this.options.menus);this._toggleClass("ui-menu-icons",null,!!this.element.find(".ui-icon").length),e=i.filter(":not(.ui-menu)").hide().attr({role:this.options.role,"aria-hidden":"true","aria-expanded":"false"}).each(function(){var t=x(this),e=t.prev(),i=x("").data("ui-menu-submenu-caret",!0);s._addClass(i,"ui-menu-icon","ui-icon "+n),e.attr("aria-haspopup","true").prepend(i),t.attr("aria-labelledby",e.attr("id"))}),this._addClass(e,"ui-menu","ui-widget ui-widget-content ui-front"),(t=i.add(this.element).find(this.options.items)).not(".ui-menu-item").each(function(){var t=x(this);s._isDivider(t)&&s._addClass(t,"ui-menu-divider","ui-widget-content")}),i=(e=t.not(".ui-menu-item, .ui-menu-divider")).children().not(".ui-menu").uniqueId().attr({tabIndex:-1,role:this._itemRole()}),this._addClass(e,"ui-menu-item")._addClass(i,"ui-menu-item-wrapper"),t.filter(".ui-state-disabled").attr("aria-disabled","true"),this.active&&!x.contains(this.element[0],this.active[0])&&this.blur()},_itemRole:function(){return{menu:"menuitem",listbox:"option"}[this.options.role]},_setOption:function(t,e){var i;"icons"===t&&(i=this.element.find(".ui-menu-icon"),this._removeClass(i,null,this.options.icons.submenu)._addClass(i,null,e.submenu)),this._super(t,e)},_setOptionDisabled:function(t){this._super(t),this.element.attr("aria-disabled",String(t)),this._toggleClass(null,"ui-state-disabled",!!t)},focus:function(t,e){var i;this.blur(t,t&&"focus"===t.type),this._scrollIntoView(e),this.active=e.first(),i=this.active.children(".ui-menu-item-wrapper"),this._addClass(i,null,"ui-state-active"),this.options.role&&this.element.attr("aria-activedescendant",i.attr("id")),i=this.active.parent().closest(".ui-menu-item").children(".ui-menu-item-wrapper"),this._addClass(i,null,"ui-state-active"),t&&"keydown"===t.type?this._close():this.timer=this._delay(function(){this._close()},this.delay),(i=e.children(".ui-menu")).length&&t&&/^mouse/.test(t.type)&&this._startOpening(i),this.activeMenu=e.parent(),this._trigger("focus",t,{item:e})},_scrollIntoView:function(t){var e,i,s;this._hasScroll()&&(i=parseFloat(x.css(this.activeMenu[0],"borderTopWidth"))||0,s=parseFloat(x.css(this.activeMenu[0],"paddingTop"))||0,e=t.offset().top-this.activeMenu.offset().top-i-s,i=this.activeMenu.scrollTop(),s=this.activeMenu.height(),t=t.outerHeight(),e<0?this.activeMenu.scrollTop(i+e):s",options:{appendTo:null,autoFocus:!1,delay:300,minLength:1,position:{my:"left top",at:"left bottom",collision:"none"},source:null,change:null,close:null,focus:null,open:null,response:null,search:null,select:null},requestIndex:0,pending:0,liveRegionTimer:null,_create:function(){var i,s,n,t=this.element[0].nodeName.toLowerCase(),e="textarea"===t,t="input"===t;this.isMultiLine=e||!t&&this._isContentEditable(this.element),this.valueMethod=this.element[e||t?"val":"text"],this.isNewMenu=!0,this._addClass("ui-autocomplete-input"),this.element.attr("autocomplete","off"),this._on(this.element,{keydown:function(t){if(this.element.prop("readOnly"))s=n=i=!0;else{s=n=i=!1;var e=x.ui.keyCode;switch(t.keyCode){case e.PAGE_UP:i=!0,this._move("previousPage",t);break;case e.PAGE_DOWN:i=!0,this._move("nextPage",t);break;case e.UP:i=!0,this._keyEvent("previous",t);break;case e.DOWN:i=!0,this._keyEvent("next",t);break;case e.ENTER:this.menu.active&&(i=!0,t.preventDefault(),this.menu.select(t));break;case e.TAB:this.menu.active&&this.menu.select(t);break;case e.ESCAPE:this.menu.element.is(":visible")&&(this.isMultiLine||this._value(this.term),this.close(t),t.preventDefault());break;default:s=!0,this._searchTimeout(t)}}},keypress:function(t){if(i)return i=!1,void(this.isMultiLine&&!this.menu.element.is(":visible")||t.preventDefault());if(!s){var e=x.ui.keyCode;switch(t.keyCode){case e.PAGE_UP:this._move("previousPage",t);break;case e.PAGE_DOWN:this._move("nextPage",t);break;case e.UP:this._keyEvent("previous",t);break;case e.DOWN:this._keyEvent("next",t)}}},input:function(t){if(n)return n=!1,void t.preventDefault();this._searchTimeout(t)},focus:function(){this.selectedItem=null,this.previous=this._value()},blur:function(t){clearTimeout(this.searching),this.close(t),this._change(t)}}),this._initSource(),this.menu=x("
      ").appendTo(this._appendTo()).menu({role:null}).hide().attr({unselectable:"on"}).menu("instance"),this._addClass(this.menu.element,"ui-autocomplete","ui-front"),this._on(this.menu.element,{mousedown:function(t){t.preventDefault()},menufocus:function(t,e){var i,s;if(this.isNewMenu&&(this.isNewMenu=!1,t.originalEvent&&/^mouse/.test(t.originalEvent.type)))return this.menu.blur(),void this.document.one("mousemove",function(){x(t.target).trigger(t.originalEvent)});s=e.item.data("ui-autocomplete-item"),!1!==this._trigger("focus",t,{item:s})&&t.originalEvent&&/^key/.test(t.originalEvent.type)&&this._value(s.value),(i=e.item.attr("aria-label")||s.value)&&String.prototype.trim.call(i).length&&(clearTimeout(this.liveRegionTimer),this.liveRegionTimer=this._delay(function(){this.liveRegion.html(x("
      ").text(i))},100))},menuselect:function(t,e){var i=e.item.data("ui-autocomplete-item"),s=this.previous;this.element[0]!==x.ui.safeActiveElement(this.document[0])&&(this.element.trigger("focus"),this.previous=s,this._delay(function(){this.previous=s,this.selectedItem=i})),!1!==this._trigger("select",t,{item:i})&&this._value(i.value),this.term=this._value(),this.close(t),this.selectedItem=i}}),this.liveRegion=x("
      ",{role:"status","aria-live":"assertive","aria-relevant":"additions"}).appendTo(this.document[0].body),this._addClass(this.liveRegion,null,"ui-helper-hidden-accessible"),this._on(this.window,{beforeunload:function(){this.element.removeAttr("autocomplete")}})},_destroy:function(){clearTimeout(this.searching),this.element.removeAttr("autocomplete"),this.menu.element.remove(),this.liveRegion.remove()},_setOption:function(t,e){this._super(t,e),"source"===t&&this._initSource(),"appendTo"===t&&this.menu.element.appendTo(this._appendTo()),"disabled"===t&&e&&this.xhr&&this.xhr.abort()},_isEventTargetInWidget:function(t){var e=this.menu.element[0];return t.target===this.element[0]||t.target===e||x.contains(e,t.target)},_closeOnClickOutside:function(t){this._isEventTargetInWidget(t)||this.close()},_appendTo:function(){var t=this.options.appendTo;return t=!(t=!(t=t&&(t.jquery||t.nodeType?x(t):this.document.find(t).eq(0)))||!t[0]?this.element.closest(".ui-front, dialog"):t).length?this.document[0].body:t},_initSource:function(){var i,s,n=this;Array.isArray(this.options.source)?(i=this.options.source,this.source=function(t,e){e(x.ui.autocomplete.filter(i,t.term))}):"string"==typeof this.options.source?(s=this.options.source,this.source=function(t,e){n.xhr&&n.xhr.abort(),n.xhr=x.ajax({url:s,data:t,dataType:"json",success:function(t){e(t)},error:function(){e([])}})}):this.source=this.options.source},_searchTimeout:function(s){clearTimeout(this.searching),this.searching=this._delay(function(){var t=this.term===this._value(),e=this.menu.element.is(":visible"),i=s.altKey||s.ctrlKey||s.metaKey||s.shiftKey;t&&(e||i)||(this.selectedItem=null,this.search(null,s))},this.options.delay)},search:function(t,e){return t=null!=t?t:this._value(),this.term=this._value(),t.length").append(x("
      ").text(e.label)).appendTo(t)},_move:function(t,e){if(this.menu.element.is(":visible"))return this.menu.isFirstItem()&&/^previous/.test(t)||this.menu.isLastItem()&&/^next/.test(t)?(this.isMultiLine||this._value(this.term),void this.menu.blur()):void this.menu[t](e);this.search(null,e)},widget:function(){return this.menu.element},_value:function(){return this.valueMethod.apply(this.element,arguments)},_keyEvent:function(t,e){this.isMultiLine&&!this.menu.element.is(":visible")||(this._move(t,e),e.preventDefault())},_isContentEditable:function(t){if(!t.length)return!1;var e=t.prop("contentEditable");return"inherit"===e?this._isContentEditable(t.parent()):"true"===e}}),x.extend(x.ui.autocomplete,{escapeRegex:function(t){return t.replace(/[\-\[\]{}()*+?.,\\\^$|#\s]/g,"\\$&")},filter:function(t,e){var i=new RegExp(x.ui.autocomplete.escapeRegex(e),"i");return x.grep(t,function(t){return i.test(t.label||t.value||t)})}}),x.widget("ui.autocomplete",x.ui.autocomplete,{options:{messages:{noResults:"No search results.",results:function(t){return t+(1").text(e))},100))}});x.ui.autocomplete}); \ No newline at end of file diff --git a/static/41/javadoc/script.js b/static/41/javadoc/script.js new file mode 100644 index 000000000..73cd8faac --- /dev/null +++ b/static/41/javadoc/script.js @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +var moduleSearchIndex; +var packageSearchIndex; +var typeSearchIndex; +var memberSearchIndex; +var tagSearchIndex; +function loadScripts(doc, tag) { + createElem(doc, tag, 'search.js'); + + createElem(doc, tag, 'module-search-index.js'); + createElem(doc, tag, 'package-search-index.js'); + createElem(doc, tag, 'type-search-index.js'); + createElem(doc, tag, 'member-search-index.js'); + createElem(doc, tag, 'tag-search-index.js'); +} + +function createElem(doc, tag, path) { + var script = doc.createElement(tag); + var scriptElement = doc.getElementsByTagName(tag)[0]; + script.src = pathtoroot + path; + scriptElement.parentNode.insertBefore(script, scriptElement); +} + +function show(tableId, selected, columns) { + if (tableId !== selected) { + document.querySelectorAll('div.' + tableId + ':not(.' + selected + ')') + .forEach(function(elem) { + elem.style.display = 'none'; + }); + } + document.querySelectorAll('div.' + selected) + .forEach(function(elem, index) { + elem.style.display = ''; + var isEvenRow = index % (columns * 2) < columns; + elem.classList.remove(isEvenRow ? oddRowColor : evenRowColor); + elem.classList.add(isEvenRow ? evenRowColor : oddRowColor); + }); + updateTabs(tableId, selected); +} + +function updateTabs(tableId, selected) { + document.getElementById(tableId + '.tabpanel') + .setAttribute('aria-labelledby', selected); + document.querySelectorAll('button[id^="' + tableId + '"]') + .forEach(function(tab, index) { + if (selected === tab.id || (tableId === selected && index === 0)) { + tab.className = activeTableTab; + tab.setAttribute('aria-selected', true); + tab.setAttribute('tabindex',0); + } else { + tab.className = tableTab; + tab.setAttribute('aria-selected', false); + tab.setAttribute('tabindex',-1); + } + }); +} + +function switchTab(e) { + var selected = document.querySelector('[aria-selected=true]'); + if (selected) { + if ((e.keyCode === 37 || e.keyCode === 38) && selected.previousSibling) { + // left or up arrow key pressed: move focus to previous tab + selected.previousSibling.click(); + selected.previousSibling.focus(); + e.preventDefault(); + } else if ((e.keyCode === 39 || e.keyCode === 40) && selected.nextSibling) { + // right or down arrow key pressed: move focus to next tab + selected.nextSibling.click(); + selected.nextSibling.focus(); + e.preventDefault(); + } + } +} + +var updateSearchResults = function() {}; + +function indexFilesLoaded() { + return moduleSearchIndex + && packageSearchIndex + && typeSearchIndex + && memberSearchIndex + && tagSearchIndex; +} + +// Workaround for scroll position not being included in browser history (8249133) +document.addEventListener("DOMContentLoaded", function(e) { + var contentDiv = document.querySelector("div.flex-content"); + window.addEventListener("popstate", function(e) { + if (e.state !== null) { + contentDiv.scrollTop = e.state; + } + }); + window.addEventListener("hashchange", function(e) { + history.replaceState(contentDiv.scrollTop, document.title); + }); + contentDiv.addEventListener("scroll", function(e) { + var timeoutID; + if (!timeoutID) { + timeoutID = setTimeout(function() { + history.replaceState(contentDiv.scrollTop, document.title); + timeoutID = null; + }, 100); + } + }); + if (!location.hash) { + history.replaceState(contentDiv.scrollTop, document.title); + } +}); diff --git a/static/41/javadoc/search.js b/static/41/javadoc/search.js new file mode 100644 index 000000000..db3b2f4a6 --- /dev/null +++ b/static/41/javadoc/search.js @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +var noResult = {l: "No results found"}; +var loading = {l: "Loading search index..."}; +var catModules = "Modules"; +var catPackages = "Packages"; +var catTypes = "Classes and Interfaces"; +var catMembers = "Members"; +var catSearchTags = "Search Tags"; +var highlight = "$&"; +var searchPattern = ""; +var fallbackPattern = ""; +var RANKING_THRESHOLD = 2; +var NO_MATCH = 0xffff; +var MIN_RESULTS = 3; +var MAX_RESULTS = 500; +var UNNAMED = ""; +function escapeHtml(str) { + return str.replace(//g, ">"); +} +function getHighlightedText(item, matcher, fallbackMatcher) { + var escapedItem = escapeHtml(item); + var highlighted = escapedItem.replace(matcher, highlight); + if (highlighted === escapedItem) { + highlighted = escapedItem.replace(fallbackMatcher, highlight) + } + return highlighted; +} +function getURLPrefix(ui) { + var urlPrefix=""; + var slash = "/"; + if (ui.item.category === catModules) { + return ui.item.l + slash; + } else if (ui.item.category === catPackages && ui.item.m) { + return ui.item.m + slash; + } else if (ui.item.category === catTypes || ui.item.category === catMembers) { + if (ui.item.m) { + urlPrefix = ui.item.m + slash; + } else { + $.each(packageSearchIndex, function(index, item) { + if (item.m && ui.item.p === item.l) { + urlPrefix = item.m + slash; + } + }); + } + } + return urlPrefix; +} +function createSearchPattern(term) { + var pattern = ""; + var isWordToken = false; + term.replace(/,\s*/g, ", ").trim().split(/\s+/).forEach(function(w, index) { + if (index > 0) { + // whitespace between identifiers is significant + pattern += (isWordToken && /^\w/.test(w)) ? "\\s+" : "\\s*"; + } + var tokens = w.split(/(?=[A-Z,.()<>[\/])/); + for (var i = 0; i < tokens.length; i++) { + var s = tokens[i]; + if (s === "") { + continue; + } + pattern += $.ui.autocomplete.escapeRegex(s); + isWordToken = /\w$/.test(s); + if (isWordToken) { + pattern += "([a-z0-9_$<>\\[\\]]*?)"; + } + } + }); + return pattern; +} +function createMatcher(pattern, flags) { + var isCamelCase = /[A-Z]/.test(pattern); + return new RegExp(pattern, flags + (isCamelCase ? "" : "i")); +} +var watermark = 'Search'; +$(function() { + var search = $("#search-input"); + var reset = $("#reset-button"); + search.val(''); + search.prop("disabled", false); + reset.prop("disabled", false); + search.val(watermark).addClass('watermark'); + search.blur(function() { + if ($(this).val().length === 0) { + $(this).val(watermark).addClass('watermark'); + } + }); + search.on('click keydown paste', function() { + if ($(this).val() === watermark) { + $(this).val('').removeClass('watermark'); + } + }); + reset.click(function() { + search.val('').focus(); + }); + search.focus()[0].setSelectionRange(0, 0); +}); +$.widget("custom.catcomplete", $.ui.autocomplete, { + _create: function() { + this._super(); + this.widget().menu("option", "items", "> :not(.ui-autocomplete-category)"); + }, + _renderMenu: function(ul, items) { + var rMenu = this; + var currentCategory = ""; + rMenu.menu.bindings = $(); + $.each(items, function(index, item) { + var li; + if (item.category && item.category !== currentCategory) { + ul.append("
    • " + item.category + "
    • "); + currentCategory = item.category; + } + li = rMenu._renderItemData(ul, item); + if (item.category) { + li.attr("aria-label", item.category + " : " + item.l); + li.attr("class", "result-item"); + } else { + li.attr("aria-label", item.l); + li.attr("class", "result-item"); + } + }); + }, + _renderItem: function(ul, item) { + var label = ""; + var matcher = createMatcher(escapeHtml(searchPattern), "g"); + var fallbackMatcher = new RegExp(fallbackPattern, "gi") + if (item.category === catModules) { + label = getHighlightedText(item.l, matcher, fallbackMatcher); + } else if (item.category === catPackages) { + label = getHighlightedText(item.l, matcher, fallbackMatcher); + } else if (item.category === catTypes) { + label = (item.p && item.p !== UNNAMED) + ? getHighlightedText(item.p + "." + item.l, matcher, fallbackMatcher) + : getHighlightedText(item.l, matcher, fallbackMatcher); + } else if (item.category === catMembers) { + label = (item.p && item.p !== UNNAMED) + ? getHighlightedText(item.p + "." + item.c + "." + item.l, matcher, fallbackMatcher) + : getHighlightedText(item.c + "." + item.l, matcher, fallbackMatcher); + } else if (item.category === catSearchTags) { + label = getHighlightedText(item.l, matcher, fallbackMatcher); + } else { + label = item.l; + } + var li = $("
    • ").appendTo(ul); + var div = $("
      ").appendTo(li); + if (item.category === catSearchTags && item.h) { + if (item.d) { + div.html(label + " (" + item.h + ")
      " + + item.d + "
      "); + } else { + div.html(label + " (" + item.h + ")"); + } + } else { + if (item.m) { + div.html(item.m + "/" + label); + } else { + div.html(label); + } + } + return li; + } +}); +function rankMatch(match, category) { + if (!match) { + return NO_MATCH; + } + var index = match.index; + var input = match.input; + var leftBoundaryMatch = 2; + var periferalMatch = 0; + // make sure match is anchored on a left word boundary + if (index === 0 || /\W/.test(input[index - 1]) || "_" === input[index]) { + leftBoundaryMatch = 0; + } else if ("_" === input[index - 1] || (input[index] === input[index].toUpperCase() && !/^[A-Z0-9_$]+$/.test(input))) { + leftBoundaryMatch = 1; + } + var matchEnd = index + match[0].length; + var leftParen = input.indexOf("("); + var endOfName = leftParen > -1 ? leftParen : input.length; + // exclude peripheral matches + if (category !== catModules && category !== catSearchTags) { + var delim = category === catPackages ? "/" : "."; + if (leftParen > -1 && leftParen < index) { + periferalMatch += 2; + } else if (input.lastIndexOf(delim, endOfName) >= matchEnd) { + periferalMatch += 2; + } + } + var delta = match[0].length === endOfName ? 0 : 1; // rank full match higher than partial match + for (var i = 1; i < match.length; i++) { + // lower ranking if parts of the name are missing + if (match[i]) + delta += match[i].length; + } + if (category === catTypes) { + // lower ranking if a type name contains unmatched camel-case parts + if (/[A-Z]/.test(input.substring(matchEnd))) + delta += 5; + if (/[A-Z]/.test(input.substring(0, index))) + delta += 5; + } + return leftBoundaryMatch + periferalMatch + (delta / 200); + +} +function doSearch(request, response) { + var result = []; + searchPattern = createSearchPattern(request.term); + fallbackPattern = createSearchPattern(request.term.toLowerCase()); + if (searchPattern === "") { + return this.close(); + } + var camelCaseMatcher = createMatcher(searchPattern, ""); + var fallbackMatcher = new RegExp(fallbackPattern, "i"); + + function searchIndexWithMatcher(indexArray, matcher, category, nameFunc) { + if (indexArray) { + var newResults = []; + $.each(indexArray, function (i, item) { + item.category = category; + var ranking = rankMatch(matcher.exec(nameFunc(item)), category); + if (ranking < RANKING_THRESHOLD) { + newResults.push({ranking: ranking, item: item}); + } + return newResults.length <= MAX_RESULTS; + }); + return newResults.sort(function(e1, e2) { + return e1.ranking - e2.ranking; + }).map(function(e) { + return e.item; + }); + } + return []; + } + function searchIndex(indexArray, category, nameFunc) { + var primaryResults = searchIndexWithMatcher(indexArray, camelCaseMatcher, category, nameFunc); + result = result.concat(primaryResults); + if (primaryResults.length <= MIN_RESULTS && !camelCaseMatcher.ignoreCase) { + var secondaryResults = searchIndexWithMatcher(indexArray, fallbackMatcher, category, nameFunc); + result = result.concat(secondaryResults.filter(function (item) { + return primaryResults.indexOf(item) === -1; + })); + } + } + + searchIndex(moduleSearchIndex, catModules, function(item) { return item.l; }); + searchIndex(packageSearchIndex, catPackages, function(item) { + return (item.m && request.term.indexOf("/") > -1) + ? (item.m + "/" + item.l) : item.l; + }); + searchIndex(typeSearchIndex, catTypes, function(item) { + return request.term.indexOf(".") > -1 ? item.p + "." + item.l : item.l; + }); + searchIndex(memberSearchIndex, catMembers, function(item) { + return request.term.indexOf(".") > -1 + ? item.p + "." + item.c + "." + item.l : item.l; + }); + searchIndex(tagSearchIndex, catSearchTags, function(item) { return item.l; }); + + if (!indexFilesLoaded()) { + updateSearchResults = function() { + doSearch(request, response); + } + result.unshift(loading); + } else { + updateSearchResults = function() {}; + } + response(result); +} +$(function() { + $("#search-input").catcomplete({ + minLength: 1, + delay: 300, + source: doSearch, + response: function(event, ui) { + if (!ui.content.length) { + ui.content.push(noResult); + } else { + $("#search-input").empty(); + } + }, + autoFocus: true, + focus: function(event, ui) { + return false; + }, + position: { + collision: "flip" + }, + select: function(event, ui) { + if (ui.item.category) { + var url = getURLPrefix(ui); + if (ui.item.category === catModules) { + url += "module-summary.html"; + } else if (ui.item.category === catPackages) { + if (ui.item.u) { + url = ui.item.u; + } else { + url += ui.item.l.replace(/\./g, '/') + "/package-summary.html"; + } + } else if (ui.item.category === catTypes) { + if (ui.item.u) { + url = ui.item.u; + } else if (ui.item.p === UNNAMED) { + url += ui.item.l + ".html"; + } else { + url += ui.item.p.replace(/\./g, '/') + "/" + ui.item.l + ".html"; + } + } else if (ui.item.category === catMembers) { + if (ui.item.p === UNNAMED) { + url += ui.item.c + ".html" + "#"; + } else { + url += ui.item.p.replace(/\./g, '/') + "/" + ui.item.c + ".html" + "#"; + } + if (ui.item.u) { + url += ui.item.u; + } else { + url += ui.item.l; + } + } else if (ui.item.category === catSearchTags) { + url += ui.item.u; + } + if (top !== window) { + parent.classFrame.location = pathtoroot + url; + } else { + window.location.href = pathtoroot + url; + } + $("#search-input").focus(); + } + } + }); +}); diff --git a/static/41/javadoc/serialized-form.html b/static/41/javadoc/serialized-form.html new file mode 100644 index 000000000..8f86fbfd8 --- /dev/null +++ b/static/41/javadoc/serialized-form.html @@ -0,0 +1,2163 @@ + + + + +Serialized Form (kafka 4.1.0 API) + + + + + + + + + + + + + + +
      + +
      +
      +
      +

      Serialized Form

      +
      + +
      +
      +
      + + diff --git a/static/41/javadoc/stylesheet.css b/static/41/javadoc/stylesheet.css new file mode 100644 index 000000000..4a576bd24 --- /dev/null +++ b/static/41/javadoc/stylesheet.css @@ -0,0 +1,869 @@ +/* + * Javadoc style sheet + */ + +@import url('resources/fonts/dejavu.css'); + +/* + * Styles for individual HTML elements. + * + * These are styles that are specific to individual HTML elements. Changing them affects the style of a particular + * HTML element throughout the page. + */ + +body { + background-color:#ffffff; + color:#353833; + font-family:'DejaVu Sans', Arial, Helvetica, sans-serif; + font-size:14px; + margin:0; + padding:0; + height:100%; + width:100%; +} +iframe { + margin:0; + padding:0; + height:100%; + width:100%; + overflow-y:scroll; + border:none; +} +a:link, a:visited { + text-decoration:none; + color:#4A6782; +} +a[href]:hover, a[href]:focus { + text-decoration:none; + color:#bb7a2a; +} +a[name] { + color:#353833; +} +pre { + font-family:'DejaVu Sans Mono', monospace; + font-size:14px; +} +h1 { + font-size:20px; +} +h2 { + font-size:18px; +} +h3 { + font-size:16px; +} +h4 { + font-size:15px; +} +h5 { + font-size:14px; +} +h6 { + font-size:13px; +} +ul { + list-style-type:disc; +} +code, tt { + font-family:'DejaVu Sans Mono', monospace; +} +:not(h1, h2, h3, h4, h5, h6) > code, +:not(h1, h2, h3, h4, h5, h6) > tt { + font-size:14px; + padding-top:4px; + margin-top:8px; + line-height:1.4em; +} +dt code { + font-family:'DejaVu Sans Mono', monospace; + font-size:14px; + padding-top:4px; +} +.summary-table dt code { + font-family:'DejaVu Sans Mono', monospace; + font-size:14px; + vertical-align:top; + padding-top:4px; +} +sup { + font-size:8px; +} +button { + font-family: 'DejaVu Sans', Arial, Helvetica, sans-serif; + font-size: 14px; +} +/* + * Styles for HTML generated by javadoc. + * + * These are style classes that are used by the standard doclet to generate HTML documentation. + */ + +/* + * Styles for document title and copyright. + */ +.clear { + clear:both; + height:0; + overflow:hidden; +} +.about-language { + float:right; + padding:0 21px 8px 8px; + font-size:11px; + margin-top:-9px; + height:2.9em; +} +.legal-copy { + margin-left:.5em; +} +.tab { + background-color:#0066FF; + color:#ffffff; + padding:8px; + width:5em; + font-weight:bold; +} +/* + * Styles for navigation bar. + */ +@media screen { + .flex-box { + position:fixed; + display:flex; + flex-direction:column; + height: 100%; + width: 100%; + } + .flex-header { + flex: 0 0 auto; + } + .flex-content { + flex: 1 1 auto; + overflow-y: auto; + } +} +.top-nav { + background-color:#4D7A97; + color:#FFFFFF; + float:left; + padding:0; + width:100%; + clear:right; + min-height:2.8em; + padding-top:10px; + overflow:hidden; + font-size:12px; +} +.sub-nav { + background-color:#dee3e9; + float:left; + width:100%; + overflow:hidden; + font-size:12px; +} +.sub-nav div { + clear:left; + float:left; + padding:0 0 5px 6px; + text-transform:uppercase; +} +.sub-nav .nav-list { + padding-top:5px; +} +ul.nav-list { + display:block; + margin:0 25px 0 0; + padding:0; +} +ul.sub-nav-list { + float:left; + margin:0 25px 0 0; + padding:0; +} +ul.nav-list li { + list-style:none; + float:left; + padding: 5px 6px; + text-transform:uppercase; +} +.sub-nav .nav-list-search { + float:right; + margin:0 0 0 0; + padding:5px 6px; + clear:none; +} +.nav-list-search label { + position:relative; + right:-16px; +} +ul.sub-nav-list li { + list-style:none; + float:left; + padding-top:10px; +} +.top-nav a:link, .top-nav a:active, .top-nav a:visited { + color:#FFFFFF; + text-decoration:none; + text-transform:uppercase; +} +.top-nav a:hover { + text-decoration:none; + color:#bb7a2a; + text-transform:uppercase; +} +.nav-bar-cell1-rev { + background-color:#F8981D; + color:#253441; + margin: auto 5px; +} +.skip-nav { + position:absolute; + top:auto; + left:-9999px; + overflow:hidden; +} +/* + * Hide navigation links and search box in print layout + */ +@media print { + ul.nav-list, div.sub-nav { + display:none; + } +} +/* + * Styles for page header and footer. + */ +.title { + color:#2c4557; + margin:10px 0; +} +.sub-title { + margin:5px 0 0 0; +} +.header ul { + margin:0 0 15px 0; + padding:0; +} +.header ul li, .footer ul li { + list-style:none; + font-size:13px; +} +/* + * Styles for headings. + */ +body.class-declaration-page .summary h2, +body.class-declaration-page .details h2, +body.class-use-page h2, +body.module-declaration-page .block-list h2 { + font-style: italic; + padding:0; + margin:15px 0; +} +body.class-declaration-page .summary h3, +body.class-declaration-page .details h3, +body.class-declaration-page .summary .inherited-list h2 { + background-color:#dee3e9; + border:1px solid #d0d9e0; + margin:0 0 6px -8px; + padding:7px 5px; +} +/* + * Styles for page layout containers. + */ +main { + clear:both; + padding:10px 20px; + position:relative; +} +dl.notes > dt { + font-family: 'DejaVu Sans', Arial, Helvetica, sans-serif; + font-size:12px; + font-weight:bold; + margin:10px 0 0 0; + color:#4E4E4E; +} +dl.notes > dd { + margin:5px 10px 10px 0; + font-size:14px; + font-family:'DejaVu Serif', Georgia, "Times New Roman", Times, serif; +} +dl.name-value > dt { + margin-left:1px; + font-size:1.1em; + display:inline; + font-weight:bold; +} +dl.name-value > dd { + margin:0 0 0 1px; + font-size:1.1em; + display:inline; +} +/* + * Styles for lists. + */ +li.circle { + list-style:circle; +} +ul.horizontal li { + display:inline; + font-size:0.9em; +} +div.inheritance { + margin:0; + padding:0; +} +div.inheritance div.inheritance { + margin-left:2em; +} +ul.block-list, +ul.details-list, +ul.member-list, +ul.summary-list { + margin:10px 0 10px 0; + padding:0; +} +ul.block-list > li, +ul.details-list > li, +ul.member-list > li, +ul.summary-list > li { + list-style:none; + margin-bottom:15px; + line-height:1.4; +} +.summary-table dl, .summary-table dl dt, .summary-table dl dd { + margin-top:0; + margin-bottom:1px; +} +ul.see-list, ul.see-list-long { + padding-left: 0; + list-style: none; +} +ul.see-list li { + display: inline; +} +ul.see-list li:not(:last-child):after, +ul.see-list-long li:not(:last-child):after { + content: ", "; + white-space: pre-wrap; +} +/* + * Styles for tables. + */ +.summary-table, .details-table { + width:100%; + border-spacing:0; + border-left:1px solid #EEE; + border-right:1px solid #EEE; + border-bottom:1px solid #EEE; + padding:0; +} +.caption { + position:relative; + text-align:left; + background-repeat:no-repeat; + color:#253441; + font-weight:bold; + clear:none; + overflow:hidden; + padding:0; + padding-top:10px; + padding-left:1px; + margin:0; + white-space:pre; +} +.caption a:link, .caption a:visited { + color:#1f389c; +} +.caption a:hover, +.caption a:active { + color:#FFFFFF; +} +.caption span { + white-space:nowrap; + padding-top:5px; + padding-left:12px; + padding-right:12px; + padding-bottom:7px; + display:inline-block; + float:left; + background-color:#F8981D; + border: none; + height:16px; +} +div.table-tabs { + padding:10px 0 0 1px; + margin:0; +} +div.table-tabs > button { + border: none; + cursor: pointer; + padding: 5px 12px 7px 12px; + font-weight: bold; + margin-right: 3px; +} +div.table-tabs > button.active-table-tab { + background: #F8981D; + color: #253441; +} +div.table-tabs > button.table-tab { + background: #4D7A97; + color: #FFFFFF; +} +.two-column-summary { + display: grid; + grid-template-columns: minmax(15%, max-content) minmax(15%, auto); +} +.three-column-summary { + display: grid; + grid-template-columns: minmax(10%, max-content) minmax(15%, max-content) minmax(15%, auto); +} +.four-column-summary { + display: grid; + grid-template-columns: minmax(10%, max-content) minmax(10%, max-content) minmax(10%, max-content) minmax(10%, auto); +} +@media screen and (max-width: 600px) { + .two-column-summary { + display: grid; + grid-template-columns: 1fr; + } +} +@media screen and (max-width: 800px) { + .three-column-summary { + display: grid; + grid-template-columns: minmax(10%, max-content) minmax(25%, auto); + } + .three-column-summary .col-last { + grid-column-end: span 2; + } +} +@media screen and (max-width: 1000px) { + .four-column-summary { + display: grid; + grid-template-columns: minmax(15%, max-content) minmax(15%, auto); + } +} +.summary-table > div, .details-table > div { + text-align:left; + padding: 8px 3px 3px 7px; +} +.col-first, .col-second, .col-last, .col-constructor-name, .col-summary-item-name { + vertical-align:top; + padding-right:0; + padding-top:8px; + padding-bottom:3px; +} +.table-header { + background:#dee3e9; + font-weight: bold; +} +.col-first, .col-first { + font-size:13px; +} +.col-second, .col-second, .col-last, .col-constructor-name, .col-summary-item-name, .col-last { + font-size:13px; +} +.col-first, .col-second, .col-constructor-name { + vertical-align:top; + overflow: auto; +} +.col-last { + white-space:normal; +} +.col-first a:link, .col-first a:visited, +.col-second a:link, .col-second a:visited, +.col-first a:link, .col-first a:visited, +.col-second a:link, .col-second a:visited, +.col-constructor-name a:link, .col-constructor-name a:visited, +.col-summary-item-name a:link, .col-summary-item-name a:visited, +.constant-values-container a:link, .constant-values-container a:visited, +.all-classes-container a:link, .all-classes-container a:visited, +.all-packages-container a:link, .all-packages-container a:visited { + font-weight:bold; +} +.table-sub-heading-color { + background-color:#EEEEFF; +} +.even-row-color, .even-row-color .table-header { + background-color:#FFFFFF; +} +.odd-row-color, .odd-row-color .table-header { + background-color:#EEEEEF; +} +/* + * Styles for contents. + */ +.deprecated-content { + margin:0; + padding:10px 0; +} +div.block { + font-size:14px; + font-family:'DejaVu Serif', Georgia, "Times New Roman", Times, serif; +} +.col-last div { + padding-top:0; +} +.col-last a { + padding-bottom:3px; +} +.module-signature, +.package-signature, +.type-signature, +.member-signature { + font-family:'DejaVu Sans Mono', monospace; + font-size:14px; + margin:14px 0; + white-space: pre-wrap; +} +.module-signature, +.package-signature, +.type-signature { + margin-top: 0; +} +.member-signature .type-parameters-long, +.member-signature .parameters, +.member-signature .exceptions { + display: inline-block; + vertical-align: top; + white-space: pre; +} +.member-signature .type-parameters { + white-space: normal; +} +/* + * Styles for formatting effect. + */ +.source-line-no { + color:green; + padding:0 30px 0 0; +} +h1.hidden { + visibility:hidden; + overflow:hidden; + font-size:10px; +} +.block { + display:block; + margin:0 10px 5px 0; + color:#474747; +} +.deprecated-label, .descfrm-type-label, .implementation-label, .member-name-label, .member-name-link, +.module-label-in-package, .module-label-in-type, .override-specify-label, .package-label-in-type, +.package-hierarchy-label, .type-name-label, .type-name-link, .search-tag-link, .preview-label { + font-weight:bold; +} +.deprecation-comment, .help-footnote, .preview-comment { + font-style:italic; +} +.deprecation-block { + font-size:14px; + font-family:'DejaVu Serif', Georgia, "Times New Roman", Times, serif; + border-style:solid; + border-width:thin; + border-radius:10px; + padding:10px; + margin-bottom:10px; + margin-right:10px; + display:inline-block; +} +.preview-block { + font-size:14px; + font-family:'DejaVu Serif', Georgia, "Times New Roman", Times, serif; + border-style:solid; + border-width:thin; + border-radius:10px; + padding:10px; + margin-bottom:10px; + margin-right:10px; + display:inline-block; +} +div.block div.deprecation-comment { + font-style:normal; +} +/* + * Styles specific to HTML5 elements. + */ +main, nav, header, footer, section { + display:block; +} +/* + * Styles for javadoc search. + */ +.ui-autocomplete-category { + font-weight:bold; + font-size:15px; + padding:7px 0 7px 3px; + background-color:#4D7A97; + color:#FFFFFF; +} +.result-item { + font-size:13px; +} +.ui-autocomplete { + max-height:85%; + max-width:65%; + overflow-y:scroll; + overflow-x:scroll; + white-space:nowrap; + box-shadow: 0 3px 6px rgba(0,0,0,0.16), 0 3px 6px rgba(0,0,0,0.23); +} +ul.ui-autocomplete { + position:fixed; + z-index:999999; + background-color: #FFFFFF; +} +ul.ui-autocomplete li { + float:left; + clear:both; + width:100%; +} +.result-highlight { + font-weight:bold; +} +.ui-autocomplete .result-item { + font-size: inherit; +} +#search-input { + background-image:url('resources/glass.png'); + background-size:13px; + background-repeat:no-repeat; + background-position:2px 3px; + padding-left:20px; + position:relative; + right:-18px; + width:400px; +} +#reset-button { + background-color: rgb(255,255,255); + background-image:url('resources/x.png'); + background-position:center; + background-repeat:no-repeat; + background-size:12px; + border:0 none; + width:16px; + height:16px; + position:relative; + left:-4px; + top:-4px; + font-size:0px; +} +.watermark { + color:#545454; +} +.search-tag-desc-result { + font-style:italic; + font-size:11px; +} +.search-tag-holder-result { + font-style:italic; + font-size:12px; +} +.search-tag-result:target { + background-color:yellow; +} +.module-graph span { + display:none; + position:absolute; +} +.module-graph:hover span { + display:block; + margin: -100px 0 0 100px; + z-index: 1; +} +.inherited-list { + margin: 10px 0 10px 0; +} +section.class-description { + line-height: 1.4; +} +.summary section[class$="-summary"], .details section[class$="-details"], +.class-uses .detail, .serialized-class-details { + padding: 0px 20px 5px 10px; + border: 1px solid #ededed; + background-color: #f8f8f8; +} +.inherited-list, section[class$="-details"] .detail { + padding:0 0 5px 8px; + background-color:#ffffff; + border:none; +} +.vertical-separator { + padding: 0 5px; +} +ul.help-section-list { + margin: 0; +} +ul.help-subtoc > li { + display: inline-block; + padding-right: 5px; + font-size: smaller; +} +ul.help-subtoc > li::before { + content: "\2022" ; + padding-right:2px; +} +span.help-note { + font-style: italic; +} +/* + * Indicator icon for external links. + */ +main a[href*="://"]::after { + content:""; + display:inline-block; + background-image:url('data:image/svg+xml; utf8, \ + \ + \ + '); + background-size:100% 100%; + width:7px; + height:7px; + margin-left:2px; + margin-bottom:4px; +} +main a[href*="://"]:hover::after, +main a[href*="://"]:focus::after { + background-image:url('data:image/svg+xml; utf8, \ + \ + \ + '); +} + +/* + * Styles for user-provided tables. + * + * borderless: + * No borders, vertical margins, styled caption. + * This style is provided for use with existing doc comments. + * In general, borderless tables should not be used for layout purposes. + * + * plain: + * Plain borders around table and cells, vertical margins, styled caption. + * Best for small tables or for complex tables for tables with cells that span + * rows and columns, when the "striped" style does not work well. + * + * striped: + * Borders around the table and vertical borders between cells, striped rows, + * vertical margins, styled caption. + * Best for tables that have a header row, and a body containing a series of simple rows. + */ + +table.borderless, +table.plain, +table.striped { + margin-top: 10px; + margin-bottom: 10px; +} +table.borderless > caption, +table.plain > caption, +table.striped > caption { + font-weight: bold; + font-size: smaller; +} +table.borderless th, table.borderless td, +table.plain th, table.plain td, +table.striped th, table.striped td { + padding: 2px 5px; +} +table.borderless, +table.borderless > thead > tr > th, table.borderless > tbody > tr > th, table.borderless > tr > th, +table.borderless > thead > tr > td, table.borderless > tbody > tr > td, table.borderless > tr > td { + border: none; +} +table.borderless > thead > tr, table.borderless > tbody > tr, table.borderless > tr { + background-color: transparent; +} +table.plain { + border-collapse: collapse; + border: 1px solid black; +} +table.plain > thead > tr, table.plain > tbody tr, table.plain > tr { + background-color: transparent; +} +table.plain > thead > tr > th, table.plain > tbody > tr > th, table.plain > tr > th, +table.plain > thead > tr > td, table.plain > tbody > tr > td, table.plain > tr > td { + border: 1px solid black; +} +table.striped { + border-collapse: collapse; + border: 1px solid black; +} +table.striped > thead { + background-color: #E3E3E3; +} +table.striped > thead > tr > th, table.striped > thead > tr > td { + border: 1px solid black; +} +table.striped > tbody > tr:nth-child(even) { + background-color: #EEE +} +table.striped > tbody > tr:nth-child(odd) { + background-color: #FFF +} +table.striped > tbody > tr > th, table.striped > tbody > tr > td { + border-left: 1px solid black; + border-right: 1px solid black; +} +table.striped > tbody > tr > th { + font-weight: normal; +} +/** + * Tweak font sizes and paddings for small screens. + */ +@media screen and (max-width: 1050px) { + #search-input { + width: 300px; + } +} +@media screen and (max-width: 800px) { + #search-input { + width: 200px; + } + .top-nav, + .bottom-nav { + font-size: 11px; + padding-top: 6px; + } + .sub-nav { + font-size: 11px; + } + .about-language { + padding-right: 16px; + } + ul.nav-list li, + .sub-nav .nav-list-search { + padding: 6px; + } + ul.sub-nav-list li { + padding-top: 5px; + } + main { + padding: 10px; + } + .summary section[class$="-summary"], .details section[class$="-details"], + .class-uses .detail, .serialized-class-details { + padding: 0 8px 5px 8px; + } + body { + -webkit-text-size-adjust: none; + } +} +@media screen and (max-width: 500px) { + #search-input { + width: 150px; + } + .top-nav, + .bottom-nav { + font-size: 10px; + } + .sub-nav { + font-size: 10px; + } + .about-language { + font-size: 10px; + padding-right: 12px; + } +} diff --git a/static/41/javadoc/tag-search-index.js b/static/41/javadoc/tag-search-index.js new file mode 100644 index 000000000..bf10aaf6d --- /dev/null +++ b/static/41/javadoc/tag-search-index.js @@ -0,0 +1 @@ +tagSearchIndex = [{"l":"Constant Field Values","h":"","u":"constant-values.html"},{"l":"Serialized Form","h":"","u":"serialized-form.html"}];updateSearchResults(); \ No newline at end of file diff --git a/static/41/javadoc/type-search-index.js b/static/41/javadoc/type-search-index.js new file mode 100644 index 000000000..0b14abe93 --- /dev/null +++ b/static/41/javadoc/type-search-index.js @@ -0,0 +1 @@ +typeSearchIndex = [{"p":"org.apache.kafka.clients.admin","l":"AbortTransactionOptions"},{"p":"org.apache.kafka.clients.admin","l":"AbortTransactionResult"},{"p":"org.apache.kafka.clients.admin","l":"AbortTransactionSpec"},{"p":"org.apache.kafka.common.config","l":"AbstractConfig"},{"p":"org.apache.kafka.clients.admin","l":"AbstractOptions"},{"p":"org.apache.kafka.connect.health","l":"AbstractState"},{"p":"org.apache.kafka.common.acl","l":"AccessControlEntry"},{"p":"org.apache.kafka.common.acl","l":"AccessControlEntryFilter"},{"p":"org.apache.kafka.clients.consumer","l":"AcknowledgementCommitCallback"},{"p":"org.apache.kafka.clients.consumer","l":"AcknowledgeType"},{"p":"org.apache.kafka.common.acl","l":"AclBinding"},{"p":"org.apache.kafka.server.authorizer","l":"AclDeleteResult.AclBindingDeleteResult"},{"p":"org.apache.kafka.common.acl","l":"AclBindingFilter"},{"p":"org.apache.kafka.server.authorizer","l":"AclCreateResult"},{"p":"org.apache.kafka.server.authorizer","l":"AclDeleteResult"},{"p":"org.apache.kafka.common.acl","l":"AclOperation"},{"p":"org.apache.kafka.common.acl","l":"AclPermissionType"},{"p":"org.apache.kafka.server.authorizer","l":"Action"},{"p":"org.apache.kafka.clients.admin","l":"AddRaftVoterOptions"},{"p":"org.apache.kafka.clients.admin","l":"AddRaftVoterResult"},{"p":"org.apache.kafka.clients.admin","l":"Admin"},{"p":"org.apache.kafka.clients.admin","l":"AdminClient"},{"p":"org.apache.kafka.clients.admin","l":"AdminClientConfig"},{"p":"org.apache.kafka.streams.kstream","l":"Aggregator"},{"l":"All Classes and Interfaces","u":"allclasses-index.html"},{"p":"org.apache.kafka.connect.errors","l":"AlreadyExistsException"},{"p":"org.apache.kafka.clients.admin","l":"AlterClientQuotasOptions"},{"p":"org.apache.kafka.clients.admin","l":"AlterClientQuotasResult"},{"p":"org.apache.kafka.clients.admin","l":"AlterConfigOp"},{"p":"org.apache.kafka.server.policy","l":"AlterConfigPolicy"},{"p":"org.apache.kafka.clients.admin","l":"AlterConfigsOptions"},{"p":"org.apache.kafka.clients.admin","l":"AlterConfigsResult"},{"p":"org.apache.kafka.clients.admin","l":"AlterConsumerGroupOffsetsOptions"},{"p":"org.apache.kafka.clients.admin","l":"AlterConsumerGroupOffsetsResult"},{"p":"org.apache.kafka.clients.admin","l":"AlterPartitionReassignmentsOptions"},{"p":"org.apache.kafka.clients.admin","l":"AlterPartitionReassignmentsResult"},{"p":"org.apache.kafka.clients.admin","l":"AlterReplicaLogDirsOptions"},{"p":"org.apache.kafka.clients.admin","l":"AlterReplicaLogDirsResult"},{"p":"org.apache.kafka.clients.admin","l":"AlterShareGroupOffsetsOptions"},{"p":"org.apache.kafka.clients.admin","l":"AlterShareGroupOffsetsResult"},{"p":"org.apache.kafka.clients.admin","l":"AlterStreamsGroupOffsetsOptions"},{"p":"org.apache.kafka.clients.admin","l":"AlterStreamsGroupOffsetsResult"},{"p":"org.apache.kafka.clients.admin","l":"AlterUserScramCredentialsOptions"},{"p":"org.apache.kafka.clients.admin","l":"AlterUserScramCredentialsResult"},{"p":"org.apache.kafka.common.errors","l":"ApiException"},{"p":"org.apache.kafka.common.errors","l":"ApplicationRecoverableException"},{"p":"org.apache.kafka.streams.processor.assignment","l":"ApplicationState"},{"p":"org.apache.kafka.streams.processor.assignment","l":"KafkaStreamsAssignment.AssignedTask"},{"p":"org.apache.kafka.clients.consumer","l":"ConsumerPartitionAssignor.Assignment"},{"p":"org.apache.kafka.streams.processor.assignment","l":"AssignmentConfigs"},{"p":"org.apache.kafka.streams.processor.assignment","l":"TaskAssignor.AssignmentError"},{"p":"org.apache.kafka.common.security.auth","l":"AuthenticateCallbackHandler"},{"p":"org.apache.kafka.common.security.auth","l":"AuthenticationContext"},{"p":"org.apache.kafka.common.errors","l":"AuthenticationException"},{"p":"org.apache.kafka.server.authorizer","l":"AuthorizableRequestContext"},{"p":"org.apache.kafka.common.errors","l":"AuthorizationException"},{"p":"org.apache.kafka.server.authorizer","l":"AuthorizationResult"},{"p":"org.apache.kafka.server.authorizer","l":"Authorizer"},{"p":"org.apache.kafka.common.errors","l":"AuthorizerNotReadyException"},{"p":"org.apache.kafka.server.authorizer","l":"AuthorizerServerInfo"},{"p":"org.apache.kafka.streams","l":"AutoOffsetReset"},{"p":"org.apache.kafka.streams","l":"Topology.AutoOffsetReset"},{"p":"org.apache.kafka.common.metrics.stats","l":"Avg"},{"p":"org.apache.kafka.common","l":"KafkaFuture.BaseFunction"},{"p":"org.apache.kafka.streams.processor","l":"BatchingStateRestoreCallback"},{"p":"org.apache.kafka.common","l":"KafkaFuture.BiConsumer"},{"p":"org.apache.kafka.common.metrics.stats","l":"Histogram.BinScheme"},{"p":"org.apache.kafka.common.serialization","l":"BooleanDeserializer"},{"p":"org.apache.kafka.common.serialization","l":"Serdes.BooleanSerde"},{"p":"org.apache.kafka.common.serialization","l":"BooleanSerializer"},{"p":"org.apache.kafka.streams.kstream","l":"Branched"},{"p":"org.apache.kafka.streams.kstream","l":"BranchedKStream"},{"p":"org.apache.kafka.common.errors","l":"BrokerIdNotRegisteredException"},{"p":"org.apache.kafka.common.security.oauthbearer","l":"BrokerJwtValidator"},{"p":"org.apache.kafka.common.errors","l":"BrokerNotAvailableException"},{"p":"org.apache.kafka.streams.errors","l":"BrokerNotFoundException"},{"p":"org.apache.kafka.common.metrics.stats","l":"Percentiles.BucketSizing"},{"p":"org.apache.kafka.streams.kstream","l":"Suppressed.BufferConfig"},{"p":"org.apache.kafka.clients.producer","l":"BufferExhaustedException"},{"p":"org.apache.kafka.streams.state","l":"BuiltInDslStoreSuppliers"},{"p":"org.apache.kafka.common.serialization","l":"ByteArrayDeserializer"},{"p":"org.apache.kafka.common.serialization","l":"Serdes.ByteArraySerde"},{"p":"org.apache.kafka.common.serialization","l":"ByteArraySerializer"},{"p":"org.apache.kafka.common.serialization","l":"ByteBufferDeserializer"},{"p":"org.apache.kafka.common.serialization","l":"Serdes.ByteBufferSerde"},{"p":"org.apache.kafka.common.serialization","l":"ByteBufferSerializer"},{"p":"org.apache.kafka.common.serialization","l":"BytesDeserializer"},{"p":"org.apache.kafka.common.serialization","l":"Serdes.BytesSerde"},{"p":"org.apache.kafka.common.serialization","l":"BytesSerializer"},{"p":"org.apache.kafka.clients.producer","l":"Callback"},{"p":"org.apache.kafka.streams.processor","l":"Cancellable"},{"p":"org.apache.kafka.streams.processor.api","l":"MockProcessorContext.CapturedForward"},{"p":"org.apache.kafka.streams.processor","l":"MockProcessorContext.CapturedForward"},{"p":"org.apache.kafka.streams.processor.api","l":"MockProcessorContext.CapturedPunctuator"},{"p":"org.apache.kafka.streams.processor","l":"MockProcessorContext.CapturedPunctuator"},{"p":"org.apache.kafka.common.config","l":"ConfigDef.CaseInsensitiveValidString"},{"p":"org.apache.kafka.connect.mirror","l":"Checkpoint"},{"p":"org.apache.kafka.common.security.oauthbearer","l":"BrokerJwtValidator.ClaimSupplier"},{"p":"org.apache.kafka.clients.admin","l":"ClassicGroupDescription"},{"p":"org.apache.kafka.common","l":"ClassicGroupState"},{"p":"org.apache.kafka.common.security.oauthbearer","l":"ClientCredentialsJwtRetriever"},{"p":"org.apache.kafka.streams","l":"ClientInstanceIds"},{"p":"org.apache.kafka.common.security.oauthbearer","l":"ClientJwtValidator"},{"p":"org.apache.kafka.clients.admin","l":"ClientMetricsResourceListing"},{"p":"org.apache.kafka.common.quota","l":"ClientQuotaAlteration"},{"p":"org.apache.kafka.server.quota","l":"ClientQuotaCallback"},{"p":"org.apache.kafka.common.quota","l":"ClientQuotaEntity"},{"p":"org.apache.kafka.server.quota","l":"ClientQuotaEntity"},{"p":"org.apache.kafka.common.quota","l":"ClientQuotaFilter"},{"p":"org.apache.kafka.common.quota","l":"ClientQuotaFilterComponent"},{"p":"org.apache.kafka.server.quota","l":"ClientQuotaType"},{"p":"org.apache.kafka.server.telemetry","l":"ClientTelemetry"},{"p":"org.apache.kafka.server.telemetry","l":"ClientTelemetryPayload"},{"p":"org.apache.kafka.server.telemetry","l":"ClientTelemetryReceiver"},{"p":"org.apache.kafka.connect.connector.policy","l":"ConnectorClientConfigRequest.ClientType"},{"p":"org.apache.kafka.clients.consumer","l":"CloseOptions"},{"p":"org.apache.kafka.streams","l":"KafkaStreams.CloseOptions"},{"p":"org.apache.kafka.common","l":"Cluster"},{"p":"org.apache.kafka.common.errors","l":"ClusterAuthorizationException"},{"p":"org.apache.kafka.common","l":"ClusterResource"},{"p":"org.apache.kafka.common","l":"ClusterResourceListener"},{"p":"org.apache.kafka.streams.kstream","l":"CogroupedKStream"},{"p":"org.apache.kafka.streams.processor","l":"CommitCallback"},{"p":"org.apache.kafka.clients.consumer","l":"CommitFailedException"},{"p":"org.apache.kafka.common.config","l":"ConfigDef.CompositeValidator"},{"p":"org.apache.kafka.common.metrics","l":"CompoundStat"},{"p":"org.apache.kafka.common.errors","l":"ConcurrentTransactionsException"},{"p":"org.apache.kafka.clients.admin","l":"Config"},{"p":"org.apache.kafka.common.config","l":"Config"},{"p":"org.apache.kafka.common.config","l":"ConfigChangeCallback"},{"p":"org.apache.kafka.common.config","l":"ConfigData"},{"p":"org.apache.kafka.common.config","l":"ConfigDef"},{"p":"org.apache.kafka.server.quota","l":"ClientQuotaEntity.ConfigEntity"},{"p":"org.apache.kafka.server.quota","l":"ClientQuotaEntity.ConfigEntityType"},{"p":"org.apache.kafka.clients.admin","l":"ConfigEntry"},{"p":"org.apache.kafka.common.config","l":"ConfigException"},{"p":"org.apache.kafka.common.config","l":"ConfigDef.ConfigKey"},{"p":"org.apache.kafka.common.config.provider","l":"ConfigProvider"},{"p":"org.apache.kafka.common.config","l":"ConfigResource"},{"p":"org.apache.kafka.clients.admin","l":"ConfigEntry.ConfigSource"},{"p":"org.apache.kafka.clients.admin","l":"ConfigEntry.ConfigSynonym"},{"p":"org.apache.kafka.common.config","l":"ConfigTransformer"},{"p":"org.apache.kafka.common.config","l":"ConfigTransformerResult"},{"p":"org.apache.kafka.clients.admin","l":"ConfigEntry.ConfigType"},{"p":"org.apache.kafka.common","l":"Configurable"},{"p":"org.apache.kafka.common.config","l":"ConfigValue"},{"p":"org.apache.kafka.connect.health","l":"ConnectClusterDetails"},{"p":"org.apache.kafka.connect.health","l":"ConnectClusterState"},{"p":"org.apache.kafka.streams.processor","l":"ConnectedStoreProvider"},{"p":"org.apache.kafka.connect.errors","l":"ConnectException"},{"p":"org.apache.kafka.connect.header","l":"ConnectHeaders"},{"p":"org.apache.kafka.connect.connector","l":"Connector"},{"p":"org.apache.kafka.connect.connector.policy","l":"ConnectorClientConfigOverridePolicy"},{"p":"org.apache.kafka.connect.connector.policy","l":"ConnectorClientConfigRequest"},{"p":"org.apache.kafka.connect.connector","l":"ConnectorContext"},{"p":"org.apache.kafka.connect.health","l":"ConnectorHealth"},{"p":"org.apache.kafka.connect.health","l":"ConnectorState"},{"p":"org.apache.kafka.connect.source","l":"ConnectorTransactionBoundaries"},{"p":"org.apache.kafka.connect.health","l":"ConnectorType"},{"p":"org.apache.kafka.connect.util","l":"ConnectorUtils"},{"p":"org.apache.kafka.connect.connector","l":"ConnectRecord"},{"p":"org.apache.kafka.connect.rest","l":"ConnectRestExtension"},{"p":"org.apache.kafka.connect.rest","l":"ConnectRestExtensionContext"},{"p":"org.apache.kafka.connect.data","l":"ConnectSchema"},{"p":"org.apache.kafka.common.metrics.stats","l":"Histogram.ConstantBinScheme"},{"p":"org.apache.kafka.streams.kstream","l":"Consumed"},{"p":"org.apache.kafka.clients.consumer","l":"Consumer"},{"p":"org.apache.kafka.clients.consumer","l":"ConsumerConfig"},{"p":"org.apache.kafka.clients.admin","l":"ConsumerGroupDescription"},{"p":"org.apache.kafka.clients.admin","l":"ConsumerGroupListing"},{"p":"org.apache.kafka.clients.consumer","l":"ConsumerGroupMetadata"},{"p":"org.apache.kafka.coordinator.group.api.assignor","l":"ConsumerGroupPartitionAssignor"},{"p":"org.apache.kafka.common","l":"ConsumerGroupState"},{"p":"org.apache.kafka.clients.consumer","l":"ConsumerInterceptor"},{"p":"org.apache.kafka.clients.consumer","l":"ConsumerPartitionAssignor"},{"p":"org.apache.kafka.clients.consumer","l":"ConsumerRebalanceListener"},{"p":"org.apache.kafka.clients.consumer","l":"ConsumerRecord"},{"p":"org.apache.kafka.clients.consumer","l":"ConsumerRecords"},{"p":"org.apache.kafka.streams.processor.api","l":"ContextualFixedKeyProcessor"},{"p":"org.apache.kafka.streams.processor.api","l":"ContextualProcessor"},{"p":"org.apache.kafka.common.errors","l":"ControllerMovedException"},{"p":"org.apache.kafka.connect.storage","l":"Converter"},{"p":"org.apache.kafka.connect.storage","l":"ConverterConfig"},{"p":"org.apache.kafka.connect.storage","l":"ConverterType"},{"p":"org.apache.kafka.clients.consumer","l":"CooperativeStickyAssignor"},{"p":"org.apache.kafka.common.errors","l":"CoordinatorLoadInProgressException"},{"p":"org.apache.kafka.common.errors","l":"CoordinatorNotAvailableException"},{"p":"org.apache.kafka.common.errors","l":"CorruptRecordException"},{"p":"org.apache.kafka.clients.admin","l":"CreateAclsOptions"},{"p":"org.apache.kafka.clients.admin","l":"CreateAclsResult"},{"p":"org.apache.kafka.clients.admin","l":"CreateDelegationTokenOptions"},{"p":"org.apache.kafka.clients.admin","l":"CreateDelegationTokenResult"},{"p":"org.apache.kafka.clients.admin","l":"CreatePartitionsOptions"},{"p":"org.apache.kafka.clients.admin","l":"CreatePartitionsResult"},{"p":"org.apache.kafka.server.policy","l":"CreateTopicPolicy"},{"p":"org.apache.kafka.clients.admin","l":"CreateTopicsOptions"},{"p":"org.apache.kafka.clients.admin","l":"CreateTopicsResult"},{"p":"org.apache.kafka.common.metrics.stats","l":"CumulativeCount"},{"p":"org.apache.kafka.common.metrics.stats","l":"CumulativeSum"},{"p":"org.apache.kafka.server.log.remote.storage","l":"RemoteLogSegmentMetadata.CustomMetadata"},{"p":"org.apache.kafka.connect.errors","l":"DataException"},{"p":"org.apache.kafka.connect.data","l":"Date"},{"p":"org.apache.kafka.connect.data","l":"Decimal"},{"p":"org.apache.kafka.tools.api","l":"Decoder"},{"p":"org.apache.kafka.tools.api","l":"DefaultDecoder"},{"p":"org.apache.kafka.common.security.oauthbearer","l":"DefaultJwtRetriever"},{"p":"org.apache.kafka.common.security.oauthbearer","l":"DefaultJwtValidator"},{"p":"org.apache.kafka.streams.errors","l":"DefaultProductionExceptionHandler"},{"p":"org.apache.kafka.connect.mirror","l":"DefaultReplicationPolicy"},{"p":"org.apache.kafka.common.security.token.delegation","l":"DelegationToken"},{"p":"org.apache.kafka.common.errors","l":"DelegationTokenAuthorizationException"},{"p":"org.apache.kafka.common.errors","l":"DelegationTokenDisabledException"},{"p":"org.apache.kafka.common.errors","l":"DelegationTokenExpiredException"},{"p":"org.apache.kafka.common.errors","l":"DelegationTokenNotFoundException"},{"p":"org.apache.kafka.common.errors","l":"DelegationTokenOwnerMismatchException"},{"p":"org.apache.kafka.clients.admin","l":"DeleteAclsOptions"},{"p":"org.apache.kafka.clients.admin","l":"DeleteAclsResult"},{"p":"org.apache.kafka.clients.admin","l":"DeleteConsumerGroupOffsetsOptions"},{"p":"org.apache.kafka.clients.admin","l":"DeleteConsumerGroupOffsetsResult"},{"p":"org.apache.kafka.clients.admin","l":"DeleteConsumerGroupsOptions"},{"p":"org.apache.kafka.clients.admin","l":"DeleteConsumerGroupsResult"},{"p":"org.apache.kafka.clients.admin","l":"DeletedRecords"},{"p":"org.apache.kafka.clients.admin","l":"DeleteRecordsOptions"},{"p":"org.apache.kafka.clients.admin","l":"DeleteRecordsResult"},{"p":"org.apache.kafka.clients.admin","l":"DeleteShareGroupOffsetsOptions"},{"p":"org.apache.kafka.clients.admin","l":"DeleteShareGroupOffsetsResult"},{"p":"org.apache.kafka.clients.admin","l":"DeleteShareGroupsOptions"},{"p":"org.apache.kafka.clients.admin","l":"DeleteShareGroupsResult"},{"p":"org.apache.kafka.clients.admin","l":"DeleteStreamsGroupOffsetsOptions"},{"p":"org.apache.kafka.clients.admin","l":"DeleteStreamsGroupOffsetsResult"},{"p":"org.apache.kafka.clients.admin","l":"DeleteStreamsGroupsOptions"},{"p":"org.apache.kafka.clients.admin","l":"DeleteStreamsGroupsResult"},{"p":"org.apache.kafka.clients.admin","l":"DeleteTopicsOptions"},{"p":"org.apache.kafka.clients.admin","l":"DeleteTopicsResult"},{"p":"org.apache.kafka.clients.admin","l":"DescribeAclsOptions"},{"p":"org.apache.kafka.clients.admin","l":"DescribeAclsResult"},{"p":"org.apache.kafka.clients.admin","l":"DescribeClassicGroupsOptions"},{"p":"org.apache.kafka.clients.admin","l":"DescribeClassicGroupsResult"},{"p":"org.apache.kafka.clients.admin","l":"DescribeClientQuotasOptions"},{"p":"org.apache.kafka.clients.admin","l":"DescribeClientQuotasResult"},{"p":"org.apache.kafka.clients.admin","l":"DescribeClusterOptions"},{"p":"org.apache.kafka.clients.admin","l":"DescribeClusterResult"},{"p":"org.apache.kafka.clients.admin","l":"DescribeConfigsOptions"},{"p":"org.apache.kafka.clients.admin","l":"DescribeConfigsResult"},{"p":"org.apache.kafka.clients.admin","l":"DescribeConsumerGroupsOptions"},{"p":"org.apache.kafka.clients.admin","l":"DescribeConsumerGroupsResult"},{"p":"org.apache.kafka.clients.admin","l":"DescribeDelegationTokenOptions"},{"p":"org.apache.kafka.clients.admin","l":"DescribeDelegationTokenResult"},{"p":"org.apache.kafka.clients.admin","l":"DescribeFeaturesOptions"},{"p":"org.apache.kafka.clients.admin","l":"DescribeFeaturesResult"},{"p":"org.apache.kafka.clients.admin","l":"DescribeLogDirsOptions"},{"p":"org.apache.kafka.clients.admin","l":"DescribeLogDirsResult"},{"p":"org.apache.kafka.clients.admin","l":"DescribeMetadataQuorumOptions"},{"p":"org.apache.kafka.clients.admin","l":"DescribeMetadataQuorumResult"},{"p":"org.apache.kafka.clients.admin","l":"DescribeProducersOptions"},{"p":"org.apache.kafka.clients.admin","l":"DescribeProducersResult"},{"p":"org.apache.kafka.clients.admin","l":"DescribeReplicaLogDirsOptions"},{"p":"org.apache.kafka.clients.admin","l":"DescribeReplicaLogDirsResult"},{"p":"org.apache.kafka.clients.admin","l":"DescribeShareGroupsOptions"},{"p":"org.apache.kafka.clients.admin","l":"DescribeShareGroupsResult"},{"p":"org.apache.kafka.clients.admin","l":"DescribeStreamsGroupsOptions"},{"p":"org.apache.kafka.clients.admin","l":"DescribeStreamsGroupsResult"},{"p":"org.apache.kafka.clients.admin","l":"DescribeTopicsOptions"},{"p":"org.apache.kafka.clients.admin","l":"DescribeTopicsResult"},{"p":"org.apache.kafka.clients.admin","l":"DescribeTransactionsOptions"},{"p":"org.apache.kafka.clients.admin","l":"DescribeTransactionsResult"},{"p":"org.apache.kafka.clients.admin","l":"DescribeUserScramCredentialsOptions"},{"p":"org.apache.kafka.clients.admin","l":"DescribeUserScramCredentialsResult"},{"p":"org.apache.kafka.streams.errors","l":"DeserializationExceptionHandler"},{"p":"org.apache.kafka.common.errors","l":"RecordDeserializationException.DeserializationExceptionOrigin"},{"p":"org.apache.kafka.streams.errors","l":"DeserializationExceptionHandler.DeserializationHandlerResponse"},{"p":"org.apache.kafka.common.serialization","l":"Deserializer"},{"p":"org.apache.kafka.common.config.provider","l":"DirectoryConfigProvider"},{"p":"org.apache.kafka.common.errors","l":"DisconnectException"},{"p":"org.apache.kafka.common.serialization","l":"DoubleDeserializer"},{"p":"org.apache.kafka.common.serialization","l":"Serdes.DoubleSerde"},{"p":"org.apache.kafka.common.serialization","l":"DoubleSerializer"},{"p":"org.apache.kafka.streams.state","l":"DslKeyValueParams"},{"p":"org.apache.kafka.streams.state","l":"DslSessionParams"},{"p":"org.apache.kafka.streams.state","l":"DslStoreSuppliers"},{"p":"org.apache.kafka.streams.state","l":"DslWindowParams"},{"p":"org.apache.kafka.common.errors","l":"DuplicateBrokerRegistrationException"},{"p":"org.apache.kafka.common.errors","l":"DuplicateResourceException"},{"p":"org.apache.kafka.common.errors","l":"DuplicateSequenceException"},{"p":"org.apache.kafka.common.errors","l":"DuplicateVoterException"},{"p":"org.apache.kafka.streams.kstream","l":"Suppressed.EagerBufferConfig"},{"p":"org.apache.kafka.clients.admin","l":"OffsetSpec.EarliestLocalSpec"},{"p":"org.apache.kafka.clients.admin","l":"OffsetSpec.EarliestSpec"},{"p":"org.apache.kafka.common.errors","l":"ElectionNotNeededException"},{"p":"org.apache.kafka.common","l":"ElectionType"},{"p":"org.apache.kafka.clients.admin","l":"ElectLeadersOptions"},{"p":"org.apache.kafka.clients.admin","l":"ElectLeadersResult"},{"p":"org.apache.kafka.common.errors","l":"EligibleLeadersNotAvailableException"},{"p":"org.apache.kafka.streams.kstream","l":"EmitStrategy"},{"p":"org.apache.kafka.common","l":"Endpoint"},{"p":"org.apache.kafka.clients.admin","l":"StreamsGroupMemberDescription.Endpoint"},{"p":"org.apache.kafka.clients.admin","l":"EndpointType"},{"p":"org.apache.kafka.common.config.provider","l":"EnvVarConfigProvider"},{"p":"org.apache.kafka.connect.sink","l":"ErrantRecordReporter"},{"p":"org.apache.kafka.streams.errors","l":"ErrorHandlerContext"},{"p":"org.apache.kafka.common.annotation","l":"InterfaceStability.Evolving"},{"p":"org.apache.kafka.connect.source","l":"ExactlyOnceSupport"},{"p":"org.apache.kafka.clients.admin","l":"ExpireDelegationTokenOptions"},{"p":"org.apache.kafka.clients.admin","l":"ExpireDelegationTokenResult"},{"p":"org.apache.kafka.streams.processor","l":"FailOnInvalidTimestamp"},{"p":"org.apache.kafka.streams.query","l":"FailureReason"},{"p":"org.apache.kafka.clients.admin","l":"FeatureMetadata"},{"p":"org.apache.kafka.clients.admin","l":"FeatureUpdate"},{"p":"org.apache.kafka.common.errors","l":"FeatureUpdateFailedException"},{"p":"org.apache.kafka.common.errors","l":"FencedInstanceIdException"},{"p":"org.apache.kafka.common.errors","l":"FencedLeaderEpochException"},{"p":"org.apache.kafka.common.errors","l":"FencedMemberEpochException"},{"p":"org.apache.kafka.common.errors","l":"FencedStateEpochException"},{"p":"org.apache.kafka.clients.admin","l":"FenceProducersOptions"},{"p":"org.apache.kafka.clients.admin","l":"FenceProducersResult"},{"p":"org.apache.kafka.common.errors","l":"FetchSessionIdNotFoundException"},{"p":"org.apache.kafka.common.errors","l":"FetchSessionTopicIdException"},{"p":"org.apache.kafka.connect.data","l":"Field"},{"p":"org.apache.kafka.common.config.provider","l":"FileConfigProvider"},{"p":"org.apache.kafka.common.security.oauthbearer","l":"FileJwtRetriever"},{"p":"org.apache.kafka.clients.admin","l":"DeleteAclsResult.FilterResult"},{"p":"org.apache.kafka.clients.admin","l":"DeleteAclsResult.FilterResults"},{"p":"org.apache.kafka.clients.admin","l":"FinalizedVersionRange"},{"p":"org.apache.kafka.streams.processor.api","l":"FixedKeyProcessor"},{"p":"org.apache.kafka.streams.processor.api","l":"FixedKeyProcessorContext"},{"p":"org.apache.kafka.streams.processor.api","l":"FixedKeyProcessorSupplier"},{"p":"org.apache.kafka.streams.processor.api","l":"FixedKeyRecord"},{"p":"org.apache.kafka.common.serialization","l":"FloatDeserializer"},{"p":"org.apache.kafka.common.serialization","l":"Serdes.FloatSerde"},{"p":"org.apache.kafka.common.serialization","l":"FloatSerializer"},{"p":"org.apache.kafka.streams.kstream","l":"ForeachAction"},{"p":"org.apache.kafka.streams.kstream","l":"ForeachProcessor"},{"p":"org.apache.kafka.clients.admin","l":"ForwardingAdmin"},{"p":"org.apache.kafka.common.metrics.stats","l":"Frequencies"},{"p":"org.apache.kafka.common.metrics.stats","l":"Frequency"},{"p":"org.apache.kafka.common.metrics","l":"Gauge"},{"p":"org.apache.kafka.streams.kstream","l":"GlobalKTable"},{"p":"org.apache.kafka.streams","l":"TopologyDescription.GlobalStore"},{"p":"org.apache.kafka.clients.consumer","l":"ConsumerPartitionAssignor.GroupAssignment"},{"p":"org.apache.kafka.coordinator.group.api.assignor","l":"GroupAssignment"},{"p":"org.apache.kafka.common.errors","l":"GroupAuthorizationException"},{"p":"org.apache.kafka.streams.kstream","l":"Grouped"},{"p":"org.apache.kafka.common.errors","l":"GroupIdNotFoundException"},{"p":"org.apache.kafka.clients.admin","l":"GroupListing"},{"p":"org.apache.kafka.common.errors","l":"GroupMaxSizeReachedException"},{"p":"org.apache.kafka.clients.consumer","l":"CloseOptions.GroupMembershipOperation"},{"p":"org.apache.kafka.common.errors","l":"GroupNotEmptyException"},{"p":"org.apache.kafka.clients.consumer","l":"GroupProtocol"},{"p":"org.apache.kafka.streams","l":"GroupProtocol"},{"p":"org.apache.kafka.coordinator.group.api.assignor","l":"GroupSpec"},{"p":"org.apache.kafka.common","l":"GroupState"},{"p":"org.apache.kafka.common.errors","l":"GroupSubscribedToTopicException"},{"p":"org.apache.kafka.clients.consumer","l":"ConsumerPartitionAssignor.GroupSubscription"},{"p":"org.apache.kafka.common","l":"GroupType"},{"p":"org.apache.kafka.common.header","l":"Header"},{"p":"org.apache.kafka.connect.header","l":"Header"},{"p":"org.apache.kafka.connect.storage","l":"HeaderConverter"},{"p":"org.apache.kafka.common.header","l":"Headers"},{"p":"org.apache.kafka.connect.header","l":"Headers"},{"p":"org.apache.kafka.connect.header","l":"Headers.HeaderTransform"},{"p":"org.apache.kafka.connect.mirror","l":"Heartbeat"},{"p":"org.apache.kafka.common.metrics.stats","l":"Histogram"},{"p":"org.apache.kafka.streams.state","l":"HostInfo"},{"p":"org.apache.kafka.connect.mirror","l":"IdentityReplicationPolicy"},{"p":"org.apache.kafka.common.errors","l":"IllegalGenerationException"},{"p":"org.apache.kafka.common.errors","l":"IllegalSaslStateException"},{"p":"org.apache.kafka.connect.errors","l":"IllegalWorkerStateException"},{"p":"org.apache.kafka.common.config","l":"ConfigDef.Importance"},{"p":"org.apache.kafka.common.errors","l":"InconsistentClusterIdException"},{"p":"org.apache.kafka.common.errors","l":"InconsistentGroupProtocolException"},{"p":"org.apache.kafka.common.errors","l":"InconsistentTopicIdException"},{"p":"org.apache.kafka.common.errors","l":"InconsistentVoterSetException"},{"p":"org.apache.kafka.server.log.remote.storage","l":"RemoteStorageManager.IndexType"},{"p":"org.apache.kafka.common.errors","l":"IneligibleReplicaException"},{"p":"org.apache.kafka.streams.kstream","l":"Initializer"},{"p":"org.apache.kafka.streams.state","l":"BuiltInDslStoreSuppliers.InMemoryDslStoreSuppliers"},{"p":"org.apache.kafka.streams.query","l":"StateQueryRequest.InStore"},{"p":"org.apache.kafka.tools.api","l":"IntegerDecoder"},{"p":"org.apache.kafka.common.serialization","l":"IntegerDeserializer"},{"p":"org.apache.kafka.common.serialization","l":"Serdes.IntegerSerde"},{"p":"org.apache.kafka.common.serialization","l":"IntegerSerializer"},{"p":"org.apache.kafka.common.annotation","l":"InterfaceStability"},{"p":"org.apache.kafka.streams","l":"StreamsConfig.InternalConfig"},{"p":"org.apache.kafka.streams.processor.api","l":"InternalFixedKeyRecordFactory"},{"p":"org.apache.kafka.common.errors","l":"InterruptException"},{"p":"org.apache.kafka.common.errors","l":"InvalidCommitOffsetSizeException"},{"p":"org.apache.kafka.common.errors","l":"InvalidConfigurationException"},{"p":"org.apache.kafka.common.errors","l":"InvalidFetchSessionEpochException"},{"p":"org.apache.kafka.common.errors","l":"InvalidFetchSizeException"},{"p":"org.apache.kafka.common.errors","l":"InvalidGroupIdException"},{"p":"org.apache.kafka.common.errors","l":"InvalidMetadataException"},{"p":"org.apache.kafka.clients.consumer","l":"InvalidOffsetException"},{"p":"org.apache.kafka.common.errors","l":"InvalidOffsetException"},{"p":"org.apache.kafka.common.errors","l":"InvalidPartitionsException"},{"p":"org.apache.kafka.common.errors","l":"InvalidPidMappingException"},{"p":"org.apache.kafka.common.errors","l":"InvalidPrincipalTypeException"},{"p":"org.apache.kafka.common.errors","l":"InvalidProducerEpochException"},{"p":"org.apache.kafka.common","l":"InvalidRecordException"},{"p":"org.apache.kafka.common.errors","l":"InvalidRecordStateException"},{"p":"org.apache.kafka.common.errors","l":"InvalidRegistrationException"},{"p":"org.apache.kafka.common.errors","l":"InvalidRegularExpression"},{"p":"org.apache.kafka.common.errors","l":"InvalidReplicaAssignmentException"},{"p":"org.apache.kafka.common.errors","l":"InvalidReplicationFactorException"},{"p":"org.apache.kafka.common.errors","l":"InvalidRequestException"},{"p":"org.apache.kafka.common.errors","l":"InvalidRequiredAcksException"},{"p":"org.apache.kafka.common.errors","l":"InvalidSessionTimeoutException"},{"p":"org.apache.kafka.common.errors","l":"InvalidShareSessionEpochException"},{"p":"org.apache.kafka.streams.errors","l":"InvalidStateStoreException"},{"p":"org.apache.kafka.streams.errors","l":"InvalidStateStorePartitionException"},{"p":"org.apache.kafka.common.errors","l":"InvalidTimestampException"},{"p":"org.apache.kafka.common.errors","l":"InvalidTopicException"},{"p":"org.apache.kafka.common.errors","l":"InvalidTxnStateException"},{"p":"org.apache.kafka.common.errors","l":"InvalidTxnTimeoutException"},{"p":"org.apache.kafka.common.errors","l":"InvalidUpdateVersionException"},{"p":"org.apache.kafka.common.errors","l":"InvalidVoterKeyException"},{"p":"org.apache.kafka.common","l":"IsolationLevel"},{"p":"org.apache.kafka.common.metrics","l":"JmxReporter"},{"p":"org.apache.kafka.streams.kstream","l":"Joined"},{"p":"org.apache.kafka.streams.kstream","l":"JoinWindows"},{"p":"org.apache.kafka.common.security.oauthbearer","l":"JwtBearerJwtRetriever"},{"p":"org.apache.kafka.common.security.oauthbearer","l":"JwtRetriever"},{"p":"org.apache.kafka.common.security.oauthbearer","l":"JwtRetrieverException"},{"p":"org.apache.kafka.common.security.oauthbearer","l":"JwtValidator"},{"p":"org.apache.kafka.common.security.oauthbearer","l":"JwtValidatorException"},{"p":"org.apache.kafka.clients.admin","l":"KafkaAdminClient"},{"p":"org.apache.kafka.streams","l":"KafkaClientSupplier"},{"p":"org.apache.kafka.clients.consumer","l":"KafkaConsumer"},{"p":"org.apache.kafka.common","l":"KafkaException"},{"p":"org.apache.kafka.common","l":"KafkaFuture"},{"p":"org.apache.kafka.common.metrics","l":"KafkaMetric"},{"p":"org.apache.kafka.common.metrics","l":"KafkaMetricsContext"},{"p":"org.apache.kafka.common.security.auth","l":"KafkaPrincipal"},{"p":"org.apache.kafka.common.security.auth","l":"KafkaPrincipalBuilder"},{"p":"org.apache.kafka.common.security.auth","l":"KafkaPrincipalSerde"},{"p":"org.apache.kafka.clients.producer","l":"KafkaProducer"},{"p":"org.apache.kafka.clients.consumer","l":"KafkaShareConsumer"},{"p":"org.apache.kafka.common.errors","l":"KafkaStorageException"},{"p":"org.apache.kafka.streams","l":"KafkaStreams"},{"p":"org.apache.kafka.streams.processor.assignment","l":"KafkaStreamsAssignment"},{"p":"org.apache.kafka.streams.processor.assignment","l":"KafkaStreamsState"},{"p":"org.apache.kafka.streams.query","l":"KeyQuery"},{"p":"org.apache.kafka.streams","l":"KeyQueryMetadata"},{"p":"org.apache.kafka.streams","l":"KeyValue"},{"p":"org.apache.kafka.streams.state","l":"KeyValueBytesStoreSupplier"},{"p":"org.apache.kafka.streams.state","l":"KeyValueIterator"},{"p":"org.apache.kafka.streams.kstream","l":"KeyValueMapper"},{"p":"org.apache.kafka.streams.state","l":"KeyValueStore"},{"p":"org.apache.kafka.streams.state","l":"QueryableStoreTypes.KeyValueStoreType"},{"p":"org.apache.kafka.streams.kstream","l":"KGroupedStream"},{"p":"org.apache.kafka.streams.kstream","l":"KGroupedTable"},{"p":"org.apache.kafka.streams.kstream","l":"KStream"},{"p":"org.apache.kafka.streams.kstream","l":"KTable"},{"p":"org.apache.kafka.streams","l":"LagInfo"},{"p":"org.apache.kafka.common.config","l":"ConfigDef.LambdaValidator"},{"p":"org.apache.kafka.clients.admin","l":"OffsetSpec.LatestSpec"},{"p":"org.apache.kafka.clients.admin","l":"OffsetSpec.LatestTieredSpec"},{"p":"org.apache.kafka.common.errors","l":"LeaderNotAvailableException"},{"p":"org.apache.kafka.common.metrics.stats","l":"Histogram.LinearBinScheme"},{"p":"org.apache.kafka.clients.admin","l":"ListClientMetricsResourcesOptions"},{"p":"org.apache.kafka.clients.admin","l":"ListClientMetricsResourcesResult"},{"p":"org.apache.kafka.clients.admin","l":"ListConfigResourcesOptions"},{"p":"org.apache.kafka.clients.admin","l":"ListConfigResourcesResult"},{"p":"org.apache.kafka.clients.admin","l":"ListConsumerGroupOffsetsOptions"},{"p":"org.apache.kafka.clients.admin","l":"ListConsumerGroupOffsetsResult"},{"p":"org.apache.kafka.clients.admin","l":"ListConsumerGroupOffsetsSpec"},{"p":"org.apache.kafka.clients.admin","l":"ListConsumerGroupsOptions"},{"p":"org.apache.kafka.clients.admin","l":"ListConsumerGroupsResult"},{"p":"org.apache.kafka.common.serialization","l":"ListDeserializer"},{"p":"org.apache.kafka.common.errors","l":"ListenerNotFoundException"},{"p":"org.apache.kafka.clients.admin","l":"ListGroupsOptions"},{"p":"org.apache.kafka.clients.admin","l":"ListGroupsResult"},{"p":"org.apache.kafka.clients.admin","l":"ListOffsetsOptions"},{"p":"org.apache.kafka.clients.admin","l":"ListOffsetsResult"},{"p":"org.apache.kafka.clients.admin","l":"ListOffsetsResult.ListOffsetsResultInfo"},{"p":"org.apache.kafka.clients.admin","l":"ListPartitionReassignmentsOptions"},{"p":"org.apache.kafka.clients.admin","l":"ListPartitionReassignmentsResult"},{"p":"org.apache.kafka.common.serialization","l":"Serdes.ListSerde"},{"p":"org.apache.kafka.common.serialization","l":"ListSerializer"},{"p":"org.apache.kafka.clients.admin","l":"ListShareGroupOffsetsOptions"},{"p":"org.apache.kafka.clients.admin","l":"ListShareGroupOffsetsResult"},{"p":"org.apache.kafka.clients.admin","l":"ListShareGroupOffsetsSpec"},{"p":"org.apache.kafka.common.config","l":"ConfigDef.ListSize"},{"p":"org.apache.kafka.clients.admin","l":"ListStreamsGroupOffsetsOptions"},{"p":"org.apache.kafka.clients.admin","l":"ListStreamsGroupOffsetsResult"},{"p":"org.apache.kafka.clients.admin","l":"ListStreamsGroupOffsetsSpec"},{"p":"org.apache.kafka.clients.admin","l":"ListTopicsOptions"},{"p":"org.apache.kafka.clients.admin","l":"ListTopicsResult"},{"p":"org.apache.kafka.clients.admin","l":"ListTransactionsOptions"},{"p":"org.apache.kafka.clients.admin","l":"ListTransactionsResult"},{"p":"org.apache.kafka.streams.errors","l":"LockException"},{"p":"org.apache.kafka.streams.errors","l":"LogAndContinueExceptionHandler"},{"p":"org.apache.kafka.streams.errors","l":"LogAndContinueProcessingExceptionHandler"},{"p":"org.apache.kafka.streams.errors","l":"LogAndFailExceptionHandler"},{"p":"org.apache.kafka.streams.errors","l":"LogAndFailProcessingExceptionHandler"},{"p":"org.apache.kafka.streams.processor","l":"LogAndSkipOnInvalidTimestamp"},{"p":"org.apache.kafka.clients.admin","l":"LogDirDescription"},{"p":"org.apache.kafka.common.errors","l":"LogDirNotFoundException"},{"p":"org.apache.kafka.common.security.auth","l":"Login"},{"p":"org.apache.kafka.common.config","l":"LogLevelConfig"},{"p":"org.apache.kafka.server.log.remote.storage","l":"LogSegmentData"},{"p":"org.apache.kafka.clients.consumer","l":"LogTruncationException"},{"p":"org.apache.kafka.tools.api","l":"LongDecoder"},{"p":"org.apache.kafka.common.serialization","l":"LongDeserializer"},{"p":"org.apache.kafka.common.serialization","l":"Serdes.LongSerde"},{"p":"org.apache.kafka.common.serialization","l":"LongSerializer"},{"p":"org.apache.kafka.streams.kstream","l":"Materialized"},{"p":"org.apache.kafka.common.metrics.stats","l":"Max"},{"p":"org.apache.kafka.clients.admin","l":"OffsetSpec.MaxTimestampSpec"},{"p":"org.apache.kafka.common.metrics","l":"Measurable"},{"p":"org.apache.kafka.common.metrics","l":"MeasurableStat"},{"p":"org.apache.kafka.clients.admin","l":"MemberAssignment"},{"p":"org.apache.kafka.coordinator.group.api.assignor","l":"MemberAssignment"},{"p":"org.apache.kafka.clients.admin","l":"MemberDescription"},{"p":"org.apache.kafka.common.errors","l":"MemberIdRequiredException"},{"p":"org.apache.kafka.coordinator.group.api.assignor","l":"MemberSubscription"},{"p":"org.apache.kafka.clients.admin","l":"MemberToRemove"},{"p":"org.apache.kafka.streams.kstream","l":"Merger"},{"p":"org.apache.kafka.common","l":"MessageFormatter"},{"p":"org.apache.kafka.common.metrics.stats","l":"Meter"},{"p":"org.apache.kafka.common","l":"Metric"},{"p":"org.apache.kafka.common.metrics","l":"MetricConfig"},{"p":"org.apache.kafka.common","l":"MetricName"},{"p":"org.apache.kafka.common","l":"MetricNameTemplate"},{"p":"org.apache.kafka.common.metrics","l":"Metrics"},{"p":"org.apache.kafka.common.metrics","l":"MetricsContext"},{"p":"org.apache.kafka.common.metrics","l":"MetricsReporter"},{"p":"org.apache.kafka.common.metrics","l":"MetricValueProvider"},{"p":"org.apache.kafka.common.metrics.stats","l":"Min"},{"p":"org.apache.kafka.connect.mirror","l":"MirrorClient"},{"p":"org.apache.kafka.connect.mirror","l":"MirrorClientConfig"},{"p":"org.apache.kafka.common.errors","l":"MismatchedEndpointTypeException"},{"p":"org.apache.kafka.streams.errors","l":"MissingSourceTopicException"},{"p":"org.apache.kafka.connect.tools","l":"MockConnector"},{"p":"org.apache.kafka.clients.consumer","l":"MockConsumer"},{"p":"org.apache.kafka.streams.processor.api","l":"MockProcessorContext"},{"p":"org.apache.kafka.streams.processor","l":"MockProcessorContext"},{"p":"org.apache.kafka.clients.producer","l":"MockProducer"},{"p":"org.apache.kafka.clients.consumer","l":"MockShareConsumer"},{"p":"org.apache.kafka.connect.tools","l":"MockSinkConnector"},{"p":"org.apache.kafka.connect.tools","l":"MockSinkTask"},{"p":"org.apache.kafka.connect.tools","l":"MockSourceConnector"},{"p":"org.apache.kafka.connect.tools","l":"MockSourceTask"},{"p":"org.apache.kafka.common.metrics","l":"Monitorable"},{"p":"org.apache.kafka.streams.processor.assignment","l":"TaskAssignmentUtils.MoveStandbyTaskPredicate"},{"p":"org.apache.kafka.streams.query","l":"MultiVersionedKeyQuery"},{"p":"org.apache.kafka.streams.kstream","l":"Named"},{"p":"org.apache.kafka.common.metrics","l":"CompoundStat.NamedMeasurable"},{"p":"org.apache.kafka.common.errors","l":"NetworkException"},{"p":"org.apache.kafka.common.errors","l":"NewLeaderElectedException"},{"p":"org.apache.kafka.clients.admin","l":"NewPartitionReassignment"},{"p":"org.apache.kafka.clients.admin","l":"NewPartitions"},{"p":"org.apache.kafka.clients.admin","l":"NewTopic"},{"p":"org.apache.kafka.common","l":"Node"},{"p":"org.apache.kafka.clients.admin","l":"QuorumInfo.Node"},{"p":"org.apache.kafka.streams","l":"TopologyDescription.Node"},{"p":"org.apache.kafka.common.config","l":"ConfigDef.NonEmptyString"},{"p":"org.apache.kafka.common.config","l":"ConfigDef.NonEmptyStringWithoutControlChars"},{"p":"org.apache.kafka.common.config","l":"ConfigDef.NonNullValidator"},{"p":"org.apache.kafka.clients.consumer","l":"NoOffsetForPartitionException"},{"p":"org.apache.kafka.common.errors","l":"NoReassignmentInProgressException"},{"p":"org.apache.kafka.common.errors","l":"NotControllerException"},{"p":"org.apache.kafka.common.errors","l":"NotCoordinatorException"},{"p":"org.apache.kafka.common.errors","l":"NotEnoughReplicasAfterAppendException"},{"p":"org.apache.kafka.common.errors","l":"NotEnoughReplicasException"},{"p":"org.apache.kafka.connect.errors","l":"NotFoundException"},{"p":"org.apache.kafka.common.errors","l":"NotLeaderOrFollowerException"},{"p":"org.apache.kafka.common.security.oauthbearer","l":"OAuthBearerExtensionsValidatorCallback"},{"p":"org.apache.kafka.common.security.oauthbearer","l":"OAuthBearerLoginCallbackHandler"},{"p":"org.apache.kafka.common.security.oauthbearer","l":"OAuthBearerLoginModule"},{"p":"org.apache.kafka.common.security.oauthbearer","l":"OAuthBearerToken"},{"p":"org.apache.kafka.common.security.oauthbearer","l":"OAuthBearerTokenCallback"},{"p":"org.apache.kafka.common.security.oauthbearer","l":"OAuthBearerValidatorCallback"},{"p":"org.apache.kafka.common.security.oauthbearer","l":"OAuthBearerValidatorCallbackHandler"},{"p":"org.apache.kafka.clients.consumer","l":"OffsetAndMetadata"},{"p":"org.apache.kafka.clients.consumer","l":"OffsetAndTimestamp"},{"p":"org.apache.kafka.clients.consumer","l":"OffsetCommitCallback"},{"p":"org.apache.kafka.common.errors","l":"OffsetMetadataTooLarge"},{"p":"org.apache.kafka.common.errors","l":"OffsetMovedToTieredStorageException"},{"p":"org.apache.kafka.common.errors","l":"OffsetNotAvailableException"},{"p":"org.apache.kafka.clients.consumer","l":"OffsetOutOfRangeException"},{"p":"org.apache.kafka.common.errors","l":"OffsetOutOfRangeException"},{"p":"org.apache.kafka.clients.consumer","l":"OffsetResetStrategy"},{"p":"org.apache.kafka.clients.admin","l":"OffsetSpec"},{"p":"org.apache.kafka.connect.storage","l":"OffsetStorageReader"},{"p":"org.apache.kafka.common.quota","l":"ClientQuotaAlteration.Op"},{"p":"org.apache.kafka.common.errors","l":"OperationNotAttemptedException"},{"p":"org.apache.kafka.clients.admin","l":"AlterConfigOp.OpType"},{"p":"org.apache.kafka.common.errors","l":"OutOfOrderSequenceException"},{"p":"org.apache.kafka.coordinator.group.api.assignor","l":"PartitionAssignor"},{"p":"org.apache.kafka.coordinator.group.api.assignor","l":"PartitionAssignorException"},{"p":"org.apache.kafka.clients.producer","l":"Partitioner"},{"p":"org.apache.kafka.common","l":"PartitionInfo"},{"p":"org.apache.kafka.clients.admin","l":"DescribeProducersResult.PartitionProducerState"},{"p":"org.apache.kafka.clients.admin","l":"PartitionReassignment"},{"p":"org.apache.kafka.common.resource","l":"PatternType"},{"p":"org.apache.kafka.common.metrics.stats","l":"Percentile"},{"p":"org.apache.kafka.common.metrics.stats","l":"Percentiles"},{"p":"org.apache.kafka.common.security.plain","l":"PlainAuthenticateCallback"},{"p":"org.apache.kafka.common.security.plain","l":"PlainLoginModule"},{"p":"org.apache.kafka.common.security.auth","l":"PlaintextAuthenticationContext"},{"p":"org.apache.kafka.common.metrics","l":"PluginMetrics"},{"p":"org.apache.kafka.common.errors","l":"PolicyViolationException"},{"p":"org.apache.kafka.streams.query","l":"Position"},{"p":"org.apache.kafka.streams.query","l":"PositionBound"},{"p":"org.apache.kafka.common.errors","l":"PositionOutOfRangeException"},{"p":"org.apache.kafka.connect.transforms.predicates","l":"Predicate"},{"p":"org.apache.kafka.streams.kstream","l":"Predicate"},{"p":"org.apache.kafka.common.errors","l":"PreferredLeaderNotAvailableException"},{"p":"org.apache.kafka.clients.producer","l":"PreparedTxnState"},{"p":"org.apache.kafka.common.errors","l":"PrincipalDeserializationException"},{"p":"org.apache.kafka.streams.kstream","l":"Printed"},{"p":"org.apache.kafka.streams.processor.assignment","l":"ProcessId"},{"p":"org.apache.kafka.streams.processor.api","l":"ProcessingContext"},{"p":"org.apache.kafka.streams.errors","l":"ProcessingExceptionHandler"},{"p":"org.apache.kafka.streams.errors","l":"ProcessingExceptionHandler.ProcessingHandlerResponse"},{"p":"org.apache.kafka.streams.processor.api","l":"Processor"},{"p":"org.apache.kafka.streams","l":"TopologyDescription.Processor"},{"p":"org.apache.kafka.streams.processor.api","l":"ProcessorContext"},{"p":"org.apache.kafka.streams.processor","l":"ProcessorContext"},{"p":"org.apache.kafka.streams.errors","l":"ProcessorStateException"},{"p":"org.apache.kafka.streams.processor.api","l":"ProcessorSupplier"},{"p":"org.apache.kafka.streams.processor.api","l":"ProcessorWrapper"},{"p":"org.apache.kafka.streams.kstream","l":"Produced"},{"p":"org.apache.kafka.clients.producer","l":"Producer"},{"p":"org.apache.kafka.clients.producer","l":"ProducerConfig"},{"p":"org.apache.kafka.common.errors","l":"ProducerFencedException"},{"p":"org.apache.kafka.clients.producer","l":"ProducerInterceptor"},{"p":"org.apache.kafka.clients.producer","l":"ProducerRecord"},{"p":"org.apache.kafka.clients.admin","l":"ProducerState"},{"p":"org.apache.kafka.streams.errors","l":"ProductionExceptionHandler"},{"p":"org.apache.kafka.streams.errors","l":"ProductionExceptionHandler.ProductionExceptionHandlerResponse"},{"p":"org.apache.kafka.streams.processor","l":"PunctuationType"},{"p":"org.apache.kafka.streams.processor","l":"Punctuator"},{"p":"org.apache.kafka.streams.query","l":"Query"},{"p":"org.apache.kafka.streams.state","l":"QueryableStoreType"},{"p":"org.apache.kafka.streams.state","l":"QueryableStoreTypes"},{"p":"org.apache.kafka.streams.query","l":"QueryConfig"},{"p":"org.apache.kafka.streams.query","l":"QueryResult"},{"p":"org.apache.kafka.clients.admin","l":"QuorumInfo"},{"p":"org.apache.kafka.common.metrics","l":"Quota"},{"p":"org.apache.kafka.common.metrics","l":"QuotaViolationException"},{"p":"org.apache.kafka.streams.processor.assignment","l":"TaskAssignmentUtils.RackAwareOptimizationParams"},{"p":"org.apache.kafka.clients.admin","l":"RaftVoterEndpoint"},{"p":"org.apache.kafka.common.config","l":"ConfigDef.Range"},{"p":"org.apache.kafka.clients.consumer","l":"RangeAssignor"},{"p":"org.apache.kafka.streams.query","l":"RangeQuery"},{"p":"org.apache.kafka.common.metrics.stats","l":"Rate"},{"p":"org.apache.kafka.streams.state","l":"ReadOnlyKeyValueStore"},{"p":"org.apache.kafka.streams.state","l":"ReadOnlySessionStore"},{"p":"org.apache.kafka.streams.state","l":"ReadOnlyWindowStore"},{"p":"org.apache.kafka.common.errors","l":"ReassignmentInProgressException"},{"p":"org.apache.kafka.common.errors","l":"RebalanceInProgressException"},{"p":"org.apache.kafka.clients.consumer","l":"ConsumerPartitionAssignor.RebalanceProtocol"},{"p":"org.apache.kafka.common.errors","l":"RebootstrapRequiredException"},{"p":"org.apache.kafka.common.config","l":"ConfigDef.Recommender"},{"p":"org.apache.kafka.common","l":"Reconfigurable"},{"p":"org.apache.kafka.streams.processor.api","l":"Record"},{"p":"org.apache.kafka.common.errors","l":"RecordBatchTooLargeException"},{"p":"org.apache.kafka.streams.processor","l":"RecordContext"},{"p":"org.apache.kafka.common.errors","l":"RecordDeserializationException"},{"p":"org.apache.kafka.common.metrics","l":"Sensor.RecordingLevel"},{"p":"org.apache.kafka.clients.producer","l":"RecordMetadata"},{"p":"org.apache.kafka.streams.processor.api","l":"RecordMetadata"},{"p":"org.apache.kafka.tools.api","l":"RecordReader"},{"p":"org.apache.kafka.clients.admin","l":"RecordsToDelete"},{"p":"org.apache.kafka.common.errors","l":"RecordTooLargeException"},{"p":"org.apache.kafka.streams.kstream","l":"Reducer"},{"p":"org.apache.kafka.common.errors","l":"RefreshRetriableException"},{"p":"org.apache.kafka.connect.mirror","l":"RemoteClusterUtils"},{"p":"org.apache.kafka.server.log.remote.storage","l":"RemoteLogMetadata"},{"p":"org.apache.kafka.server.log.remote.storage","l":"RemoteLogMetadataManager"},{"p":"org.apache.kafka.server.log.remote.storage","l":"RemoteLogSegmentId"},{"p":"org.apache.kafka.server.log.remote.storage","l":"RemoteLogSegmentMetadata"},{"p":"org.apache.kafka.server.log.remote.storage","l":"RemoteLogSegmentMetadataUpdate"},{"p":"org.apache.kafka.server.log.remote.storage","l":"RemoteLogSegmentState"},{"p":"org.apache.kafka.server.log.remote.storage","l":"RemotePartitionDeleteMetadata"},{"p":"org.apache.kafka.server.log.remote.storage","l":"RemotePartitionDeleteState"},{"p":"org.apache.kafka.server.log.remote.storage","l":"RemoteResourceNotFoundException"},{"p":"org.apache.kafka.server.log.remote.storage","l":"RemoteStorageException"},{"p":"org.apache.kafka.server.log.remote.storage","l":"RemoteStorageManager"},{"p":"org.apache.kafka.server.log.remote.storage","l":"RemoteStorageMetrics"},{"p":"org.apache.kafka.clients.admin","l":"RemoveMembersFromConsumerGroupOptions"},{"p":"org.apache.kafka.clients.admin","l":"RemoveMembersFromConsumerGroupResult"},{"p":"org.apache.kafka.clients.admin","l":"RemoveRaftVoterOptions"},{"p":"org.apache.kafka.clients.admin","l":"RemoveRaftVoterResult"},{"p":"org.apache.kafka.clients.admin","l":"RenewDelegationTokenOptions"},{"p":"org.apache.kafka.clients.admin","l":"RenewDelegationTokenResult"},{"p":"org.apache.kafka.streams.kstream","l":"Repartitioned"},{"p":"org.apache.kafka.clients.admin","l":"ReplicaInfo"},{"p":"org.apache.kafka.clients.admin","l":"DescribeReplicaLogDirsResult.ReplicaLogDirInfo"},{"p":"org.apache.kafka.common.errors","l":"ReplicaNotAvailableException"},{"p":"org.apache.kafka.clients.admin","l":"QuorumInfo.ReplicaState"},{"p":"org.apache.kafka.connect.mirror","l":"ReplicationPolicy"},{"p":"org.apache.kafka.server.policy","l":"AlterConfigPolicy.RequestMetadata"},{"p":"org.apache.kafka.server.policy","l":"CreateTopicPolicy.RequestMetadata"},{"p":"org.apache.kafka.common.resource","l":"Resource"},{"p":"org.apache.kafka.common.errors","l":"ResourceNotFoundException"},{"p":"org.apache.kafka.common.resource","l":"ResourcePattern"},{"p":"org.apache.kafka.common.resource","l":"ResourcePatternFilter"},{"p":"org.apache.kafka.common.resource","l":"ResourceType"},{"p":"org.apache.kafka.streams.query","l":"ResultOrder"},{"p":"org.apache.kafka.clients.consumer","l":"RetriableCommitFailedException"},{"p":"org.apache.kafka.common.errors","l":"RetriableException"},{"p":"org.apache.kafka.connect.errors","l":"RetriableException"},{"p":"org.apache.kafka.streams.state","l":"RocksDBConfigSetter"},{"p":"org.apache.kafka.streams.state","l":"BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers"},{"p":"org.apache.kafka.clients.consumer","l":"RoundRobinAssignor"},{"p":"org.apache.kafka.clients.producer","l":"RoundRobinPartitioner"},{"p":"org.apache.kafka.common.metrics.stats","l":"SampledStat"},{"p":"org.apache.kafka.common.security.auth","l":"SaslAuthenticationContext"},{"p":"org.apache.kafka.common.errors","l":"SaslAuthenticationException"},{"p":"org.apache.kafka.common.config","l":"SaslConfigs"},{"p":"org.apache.kafka.common.security.auth","l":"SaslExtensions"},{"p":"org.apache.kafka.common.security.auth","l":"SaslExtensionsCallback"},{"p":"org.apache.kafka.connect.data","l":"Schema"},{"p":"org.apache.kafka.connect.data","l":"SchemaAndValue"},{"p":"org.apache.kafka.connect.data","l":"SchemaBuilder"},{"p":"org.apache.kafka.connect.errors","l":"SchemaBuilderException"},{"p":"org.apache.kafka.connect.data","l":"SchemaProjector"},{"p":"org.apache.kafka.connect.errors","l":"SchemaProjectorException"},{"p":"org.apache.kafka.connect.tools","l":"SchemaSourceConnector"},{"p":"org.apache.kafka.connect.tools","l":"SchemaSourceTask"},{"p":"org.apache.kafka.common.security.scram","l":"ScramCredential"},{"p":"org.apache.kafka.common.security.scram","l":"ScramCredentialCallback"},{"p":"org.apache.kafka.clients.admin","l":"ScramCredentialInfo"},{"p":"org.apache.kafka.common.security.scram","l":"ScramExtensionsCallback"},{"p":"org.apache.kafka.common.security.scram","l":"ScramLoginModule"},{"p":"org.apache.kafka.clients.admin","l":"ScramMechanism"},{"p":"org.apache.kafka.common.config","l":"SecurityConfig"},{"p":"org.apache.kafka.common.errors","l":"SecurityDisabledException"},{"p":"org.apache.kafka.common.security.auth","l":"SecurityProtocol"},{"p":"org.apache.kafka.common.security.auth","l":"SecurityProviderCreator"},{"p":"org.apache.kafka.common.metrics","l":"Sensor"},{"p":"org.apache.kafka.common.serialization","l":"Serde"},{"p":"org.apache.kafka.common.serialization","l":"Serdes"},{"p":"org.apache.kafka.common.errors","l":"SerializationException"},{"p":"org.apache.kafka.streams.errors","l":"ProductionExceptionHandler.SerializationExceptionOrigin"},{"p":"org.apache.kafka.common.serialization","l":"Serializer"},{"p":"org.apache.kafka.streams.state","l":"SessionBytesStoreSupplier"},{"p":"org.apache.kafka.streams.state","l":"SessionStore"},{"p":"org.apache.kafka.streams.state","l":"QueryableStoreTypes.SessionStoreType"},{"p":"org.apache.kafka.streams.kstream","l":"SessionWindowedCogroupedKStream"},{"p":"org.apache.kafka.streams.kstream","l":"SessionWindowedDeserializer"},{"p":"org.apache.kafka.streams.kstream","l":"SessionWindowedKStream"},{"p":"org.apache.kafka.streams.kstream","l":"WindowedSerdes.SessionWindowedSerde"},{"p":"org.apache.kafka.streams.kstream","l":"SessionWindowedSerializer"},{"p":"org.apache.kafka.streams.kstream","l":"SessionWindows"},{"p":"org.apache.kafka.clients.consumer","l":"ShareConsumer"},{"p":"org.apache.kafka.clients.admin","l":"ShareGroupDescription"},{"p":"org.apache.kafka.coordinator.group.api.assignor","l":"ShareGroupPartitionAssignor"},{"p":"org.apache.kafka.clients.admin","l":"ShareMemberAssignment"},{"p":"org.apache.kafka.clients.admin","l":"ShareMemberDescription"},{"p":"org.apache.kafka.common.errors","l":"ShareSessionLimitReachedException"},{"p":"org.apache.kafka.common.errors","l":"ShareSessionNotFoundException"},{"p":"org.apache.kafka.common.serialization","l":"ShortDeserializer"},{"p":"org.apache.kafka.common.serialization","l":"Serdes.ShortSerde"},{"p":"org.apache.kafka.common.serialization","l":"ShortSerializer"},{"p":"org.apache.kafka.connect.storage","l":"SimpleHeaderConverter"},{"p":"org.apache.kafka.common.metrics.stats","l":"SimpleRate"},{"p":"org.apache.kafka.streams","l":"TopologyDescription.Sink"},{"p":"org.apache.kafka.connect.sink","l":"SinkConnector"},{"p":"org.apache.kafka.connect.sink","l":"SinkConnectorContext"},{"p":"org.apache.kafka.connect.sink","l":"SinkRecord"},{"p":"org.apache.kafka.connect.sink","l":"SinkTask"},{"p":"org.apache.kafka.connect.sink","l":"SinkTaskContext"},{"p":"org.apache.kafka.streams.kstream","l":"SlidingWindows"},{"p":"org.apache.kafka.common.errors","l":"SnapshotNotFoundException"},{"p":"org.apache.kafka.streams","l":"TopologyDescription.Source"},{"p":"org.apache.kafka.connect.mirror","l":"SourceAndTarget"},{"p":"org.apache.kafka.connect.source","l":"SourceConnector"},{"p":"org.apache.kafka.connect.source","l":"SourceConnectorContext"},{"p":"org.apache.kafka.connect.source","l":"SourceRecord"},{"p":"org.apache.kafka.connect.source","l":"SourceTask"},{"p":"org.apache.kafka.connect.source","l":"SourceTaskContext"},{"p":"org.apache.kafka.common.security.auth","l":"SslAuthenticationContext"},{"p":"org.apache.kafka.common.errors","l":"SslAuthenticationException"},{"p":"org.apache.kafka.common.config","l":"SslClientAuth"},{"p":"org.apache.kafka.common.config","l":"SslConfigs"},{"p":"org.apache.kafka.common.security.auth","l":"SslEngineFactory"},{"p":"org.apache.kafka.common.annotation","l":"InterfaceStability.Stable"},{"p":"org.apache.kafka.common.errors","l":"StaleBrokerEpochException"},{"p":"org.apache.kafka.common.errors","l":"StaleMemberEpochException"},{"p":"org.apache.kafka.streams.processor","l":"StandbyUpdateListener"},{"p":"org.apache.kafka.common.metrics","l":"Stat"},{"p":"org.apache.kafka.streams","l":"KafkaStreams.State"},{"p":"org.apache.kafka.streams","l":"KafkaStreams.StateListener"},{"p":"org.apache.kafka.streams.query","l":"StateQueryRequest"},{"p":"org.apache.kafka.streams.query","l":"StateQueryResult"},{"p":"org.apache.kafka.streams.processor","l":"StateRestoreCallback"},{"p":"org.apache.kafka.streams.processor","l":"StateRestoreListener"},{"p":"org.apache.kafka.streams.state","l":"StateSerdes"},{"p":"org.apache.kafka.streams.processor","l":"StateStore"},{"p":"org.apache.kafka.streams.processor","l":"StateStoreContext"},{"p":"org.apache.kafka.streams.errors","l":"StateStoreMigratedException"},{"p":"org.apache.kafka.streams.errors","l":"StateStoreNotAvailableException"},{"p":"org.apache.kafka.clients.consumer","l":"StickyAssignor"},{"p":"org.apache.kafka.streams.processor.assignment.assignors","l":"StickyTaskAssignor"},{"p":"org.apache.kafka.streams.state","l":"StoreBuilder"},{"p":"org.apache.kafka.streams","l":"StoreQueryParameters"},{"p":"org.apache.kafka.streams.state","l":"Stores"},{"p":"org.apache.kafka.streams.state","l":"StoreSupplier"},{"p":"org.apache.kafka.streams.kstream","l":"Materialized.StoreType"},{"p":"org.apache.kafka.streams.kstream","l":"EmitStrategy.StrategyType"},{"p":"org.apache.kafka.streams.kstream","l":"StreamJoined"},{"p":"org.apache.kafka.streams.processor","l":"StreamPartitioner"},{"p":"org.apache.kafka.streams","l":"StreamsBuilder"},{"p":"org.apache.kafka.streams","l":"StreamsConfig"},{"p":"org.apache.kafka.streams.errors","l":"StreamsException"},{"p":"org.apache.kafka.clients.admin","l":"StreamsGroupDescription"},{"p":"org.apache.kafka.clients.admin","l":"StreamsGroupMemberAssignment"},{"p":"org.apache.kafka.clients.admin","l":"StreamsGroupMemberDescription"},{"p":"org.apache.kafka.clients.admin","l":"StreamsGroupSubtopologyDescription"},{"p":"org.apache.kafka.common.errors","l":"StreamsInvalidTopologyEpochException"},{"p":"org.apache.kafka.common.errors","l":"StreamsInvalidTopologyException"},{"p":"org.apache.kafka.streams","l":"StreamsMetadata"},{"p":"org.apache.kafka.streams","l":"StreamsMetrics"},{"p":"org.apache.kafka.streams.errors","l":"StreamsNotStartedException"},{"p":"org.apache.kafka.streams.errors","l":"StreamsRebalancingException"},{"p":"org.apache.kafka.streams.errors","l":"StreamsStoppedException"},{"p":"org.apache.kafka.common.errors","l":"StreamsTopologyFencedException"},{"p":"org.apache.kafka.streams.errors","l":"StreamsUncaughtExceptionHandler"},{"p":"org.apache.kafka.streams.errors","l":"StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse"},{"p":"org.apache.kafka.streams.kstream","l":"Suppressed.StrictBufferConfig"},{"p":"org.apache.kafka.connect.storage","l":"StringConverter"},{"p":"org.apache.kafka.connect.storage","l":"StringConverterConfig"},{"p":"org.apache.kafka.tools.api","l":"StringDecoder"},{"p":"org.apache.kafka.common.serialization","l":"StringDeserializer"},{"p":"org.apache.kafka.common.serialization","l":"Serdes.StringSerde"},{"p":"org.apache.kafka.common.serialization","l":"StringSerializer"},{"p":"org.apache.kafka.connect.data","l":"Struct"},{"p":"org.apache.kafka.coordinator.group.api.assignor","l":"SubscribedTopicDescriber"},{"p":"org.apache.kafka.clients.consumer","l":"ConsumerPartitionAssignor.Subscription"},{"p":"org.apache.kafka.clients.consumer","l":"SubscriptionPattern"},{"p":"org.apache.kafka.coordinator.group.api.assignor","l":"SubscriptionType"},{"p":"org.apache.kafka.streams","l":"TopologyDescription.Subtopology"},{"p":"org.apache.kafka.clients.admin","l":"SupportedVersionRange"},{"p":"org.apache.kafka.streams.kstream","l":"Suppressed"},{"p":"org.apache.kafka.streams.processor","l":"StandbyUpdateListener.SuspendReason"},{"p":"org.apache.kafka.streams.kstream","l":"TableJoined"},{"p":"org.apache.kafka.connect.connector","l":"Task"},{"p":"org.apache.kafka.streams.processor.assignment","l":"TaskAssignor.TaskAssignment"},{"p":"org.apache.kafka.streams.errors","l":"TaskAssignmentException"},{"p":"org.apache.kafka.streams.processor.assignment","l":"TaskAssignmentUtils"},{"p":"org.apache.kafka.streams.processor.assignment","l":"TaskAssignor"},{"p":"org.apache.kafka.streams","l":"TopologyConfig.TaskConfig"},{"p":"org.apache.kafka.streams.errors","l":"TaskCorruptedException"},{"p":"org.apache.kafka.streams.processor","l":"TaskId"},{"p":"org.apache.kafka.streams.errors","l":"TaskIdFormatException"},{"p":"org.apache.kafka.clients.admin","l":"StreamsGroupMemberAssignment.TaskIds"},{"p":"org.apache.kafka.streams.processor.assignment","l":"TaskInfo"},{"p":"org.apache.kafka.streams","l":"TaskMetadata"},{"p":"org.apache.kafka.streams.errors","l":"TaskMigratedException"},{"p":"org.apache.kafka.clients.admin","l":"StreamsGroupMemberDescription.TaskOffset"},{"p":"org.apache.kafka.connect.health","l":"TaskState"},{"p":"org.apache.kafka.streams.processor.assignment","l":"TaskTopicPartition"},{"p":"org.apache.kafka.common.errors","l":"TelemetryTooLargeException"},{"p":"org.apache.kafka.clients.admin","l":"TerminateTransactionOptions"},{"p":"org.apache.kafka.clients.admin","l":"TerminateTransactionResult"},{"p":"org.apache.kafka.streams","l":"TestInputTopic"},{"p":"org.apache.kafka.streams","l":"TestOutputTopic"},{"p":"org.apache.kafka.streams.test","l":"TestRecord"},{"p":"org.apache.kafka.streams","l":"ThreadMetadata"},{"p":"org.apache.kafka.common.errors","l":"ThrottlingQuotaExceededException"},{"p":"org.apache.kafka.connect.data","l":"Time"},{"p":"org.apache.kafka.common.errors","l":"TimeoutException"},{"p":"org.apache.kafka.connect.data","l":"Timestamp"},{"p":"org.apache.kafka.streams.state","l":"TimestampedBytesStore"},{"p":"org.apache.kafka.streams.query","l":"TimestampedKeyQuery"},{"p":"org.apache.kafka.streams.state","l":"TimestampedKeyValueStore"},{"p":"org.apache.kafka.streams.query","l":"TimestampedRangeQuery"},{"p":"org.apache.kafka.streams.state","l":"TimestampedWindowStore"},{"p":"org.apache.kafka.streams.processor","l":"TimestampExtractor"},{"p":"org.apache.kafka.clients.admin","l":"OffsetSpec.TimestampSpec"},{"p":"org.apache.kafka.streams.kstream","l":"TimeWindowedCogroupedKStream"},{"p":"org.apache.kafka.streams.kstream","l":"TimeWindowedDeserializer"},{"p":"org.apache.kafka.streams.kstream","l":"TimeWindowedKStream"},{"p":"org.apache.kafka.streams.kstream","l":"WindowedSerdes.TimeWindowedSerde"},{"p":"org.apache.kafka.streams.kstream","l":"TimeWindowedSerializer"},{"p":"org.apache.kafka.streams.kstream","l":"TimeWindows"},{"p":"org.apache.kafka.streams.processor","l":"To"},{"p":"org.apache.kafka.common.metrics.stats","l":"TokenBucket"},{"p":"org.apache.kafka.common.security.token.delegation","l":"TokenInformation"},{"p":"org.apache.kafka.common.errors","l":"TopicAuthorizationException"},{"p":"org.apache.kafka.common","l":"TopicCollection"},{"p":"org.apache.kafka.common.config","l":"TopicConfig"},{"p":"org.apache.kafka.common.errors","l":"TopicDeletionDisabledException"},{"p":"org.apache.kafka.clients.admin","l":"TopicDescription"},{"p":"org.apache.kafka.common.errors","l":"TopicExistsException"},{"p":"org.apache.kafka.common","l":"TopicCollection.TopicIdCollection"},{"p":"org.apache.kafka.common","l":"TopicIdPartition"},{"p":"org.apache.kafka.clients.admin","l":"StreamsGroupSubtopologyDescription.TopicInfo"},{"p":"org.apache.kafka.clients.admin","l":"TopicListing"},{"p":"org.apache.kafka.clients.admin","l":"CreateTopicsResult.TopicMetadataAndConfig"},{"p":"org.apache.kafka.common","l":"TopicCollection.TopicNameCollection"},{"p":"org.apache.kafka.streams.processor","l":"TopicNameExtractor"},{"p":"org.apache.kafka.common","l":"TopicPartition"},{"p":"org.apache.kafka.common","l":"TopicPartitionInfo"},{"p":"org.apache.kafka.common","l":"TopicPartitionReplica"},{"p":"org.apache.kafka.streams","l":"Topology"},{"p":"org.apache.kafka.streams","l":"TopologyConfig"},{"p":"org.apache.kafka.streams","l":"TopologyDescription"},{"p":"org.apache.kafka.streams.errors","l":"TopologyException"},{"p":"org.apache.kafka.streams","l":"TopologyTestDriver"},{"p":"org.apache.kafka.common.errors","l":"TransactionAbortableException"},{"p":"org.apache.kafka.common.errors","l":"TransactionAbortedException"},{"p":"org.apache.kafka.common.errors","l":"TransactionalIdAuthorizationException"},{"p":"org.apache.kafka.common.errors","l":"TransactionalIdNotFoundException"},{"p":"org.apache.kafka.connect.source","l":"SourceTask.TransactionBoundary"},{"p":"org.apache.kafka.connect.source","l":"TransactionContext"},{"p":"org.apache.kafka.common.errors","l":"TransactionCoordinatorFencedException"},{"p":"org.apache.kafka.clients.admin","l":"TransactionDescription"},{"p":"org.apache.kafka.clients.admin","l":"TransactionListing"},{"p":"org.apache.kafka.clients.admin","l":"TransactionState"},{"p":"org.apache.kafka.connect.transforms","l":"Transformation"},{"p":"org.apache.kafka.streams.kstream","l":"Transformer"},{"p":"org.apache.kafka.streams.kstream","l":"TransformerSupplier"},{"p":"org.apache.kafka.common.config","l":"ConfigDef.Type"},{"p":"org.apache.kafka.common.config","l":"ConfigResource.Type"},{"p":"org.apache.kafka.streams.processor.assignment","l":"KafkaStreamsAssignment.AssignedTask.Type"},{"p":"org.apache.kafka.connect.data","l":"Schema.Type"},{"p":"org.apache.kafka.common.errors","l":"UnacceptableCredentialException"},{"p":"org.apache.kafka.common.errors","l":"UnknownControllerIdException"},{"p":"org.apache.kafka.common.errors","l":"UnknownLeaderEpochException"},{"p":"org.apache.kafka.common.errors","l":"UnknownMemberIdException"},{"p":"org.apache.kafka.common.errors","l":"UnknownProducerIdException"},{"p":"org.apache.kafka.common.errors","l":"UnknownServerException"},{"p":"org.apache.kafka.streams.errors","l":"UnknownStateStoreException"},{"p":"org.apache.kafka.common.errors","l":"UnknownSubscriptionIdException"},{"p":"org.apache.kafka.common.errors","l":"UnknownTopicIdException"},{"p":"org.apache.kafka.common.errors","l":"UnknownTopicOrPartitionException"},{"p":"org.apache.kafka.streams.errors","l":"UnknownTopologyException"},{"p":"org.apache.kafka.streams.kstream","l":"UnlimitedWindows"},{"p":"org.apache.kafka.clients.admin","l":"UnregisterBrokerOptions"},{"p":"org.apache.kafka.clients.admin","l":"UnregisterBrokerResult"},{"p":"org.apache.kafka.common.errors","l":"UnreleasedInstanceIdException"},{"p":"org.apache.kafka.common.annotation","l":"InterfaceStability.Unstable"},{"p":"org.apache.kafka.common.errors","l":"UnstableOffsetCommitException"},{"p":"org.apache.kafka.common.errors","l":"UnsupportedAssignorException"},{"p":"org.apache.kafka.common.errors","l":"UnsupportedByAuthenticationException"},{"p":"org.apache.kafka.common.errors","l":"UnsupportedCompressionTypeException"},{"p":"org.apache.kafka.common.errors","l":"UnsupportedEndpointTypeException"},{"p":"org.apache.kafka.common.errors","l":"UnsupportedForMessageFormatException"},{"p":"org.apache.kafka.common.errors","l":"UnsupportedSaslMechanismException"},{"p":"org.apache.kafka.common.errors","l":"UnsupportedVersionException"},{"p":"org.apache.kafka.clients.admin","l":"UpdateFeaturesOptions"},{"p":"org.apache.kafka.clients.admin","l":"UpdateFeaturesResult"},{"p":"org.apache.kafka.clients.admin","l":"FeatureUpdate.UpgradeType"},{"p":"org.apache.kafka.streams.processor","l":"UsePartitionTimeOnInvalidTimestamp"},{"p":"org.apache.kafka.clients.admin","l":"UserScramCredentialAlteration"},{"p":"org.apache.kafka.clients.admin","l":"UserScramCredentialDeletion"},{"p":"org.apache.kafka.clients.admin","l":"UserScramCredentialsDescription"},{"p":"org.apache.kafka.clients.admin","l":"UserScramCredentialUpsertion"},{"p":"org.apache.kafka.common","l":"Uuid"},{"p":"org.apache.kafka.common.serialization","l":"UUIDDeserializer"},{"p":"org.apache.kafka.common.serialization","l":"Serdes.UUIDSerde"},{"p":"org.apache.kafka.common.serialization","l":"UUIDSerializer"},{"p":"org.apache.kafka.common.config","l":"ConfigDef.Validator"},{"p":"org.apache.kafka.common.config","l":"ConfigDef.ValidList"},{"p":"org.apache.kafka.common.config","l":"ConfigDef.ValidString"},{"p":"org.apache.kafka.common.metrics.stats","l":"Value"},{"p":"org.apache.kafka.streams.state","l":"ValueAndTimestamp"},{"p":"org.apache.kafka.streams.kstream","l":"ValueJoiner"},{"p":"org.apache.kafka.streams.kstream","l":"ValueJoinerWithKey"},{"p":"org.apache.kafka.streams.kstream","l":"ValueMapper"},{"p":"org.apache.kafka.streams.kstream","l":"ValueMapperWithKey"},{"p":"org.apache.kafka.connect.data","l":"Values"},{"p":"org.apache.kafka.streams.kstream","l":"ValueTransformer"},{"p":"org.apache.kafka.streams.kstream","l":"ValueTransformerSupplier"},{"p":"org.apache.kafka.streams.kstream","l":"ValueTransformerWithKey"},{"p":"org.apache.kafka.streams.kstream","l":"ValueTransformerWithKeySupplier"},{"p":"org.apache.kafka.connect.tools","l":"VerifiableSinkConnector"},{"p":"org.apache.kafka.connect.tools","l":"VerifiableSinkTask"},{"p":"org.apache.kafka.connect.tools","l":"VerifiableSourceConnector"},{"p":"org.apache.kafka.connect.tools","l":"VerifiableSourceTask"},{"p":"org.apache.kafka.connect.components","l":"Versioned"},{"p":"org.apache.kafka.streams.state","l":"VersionedBytesStore"},{"p":"org.apache.kafka.streams.state","l":"VersionedBytesStoreSupplier"},{"p":"org.apache.kafka.streams.query","l":"VersionedKeyQuery"},{"p":"org.apache.kafka.streams.state","l":"VersionedKeyValueStore"},{"p":"org.apache.kafka.streams.state","l":"VersionedRecord"},{"p":"org.apache.kafka.streams.state","l":"VersionedRecordIterator"},{"p":"org.apache.kafka.common.serialization","l":"VoidDeserializer"},{"p":"org.apache.kafka.common.serialization","l":"Serdes.VoidSerde"},{"p":"org.apache.kafka.common.serialization","l":"VoidSerializer"},{"p":"org.apache.kafka.common.errors","l":"VoterNotFoundException"},{"p":"org.apache.kafka.common.errors","l":"WakeupException"},{"p":"org.apache.kafka.streams.processor","l":"WallclockTimestampExtractor"},{"p":"org.apache.kafka.common.config","l":"ConfigDef.Width"},{"p":"org.apache.kafka.streams.kstream","l":"Window"},{"p":"org.apache.kafka.streams.state","l":"WindowBytesStoreSupplier"},{"p":"org.apache.kafka.streams.kstream","l":"Windowed"},{"p":"org.apache.kafka.common.metrics.stats","l":"WindowedCount"},{"p":"org.apache.kafka.streams.kstream","l":"WindowedSerdes"},{"p":"org.apache.kafka.common.metrics.stats","l":"WindowedSum"},{"p":"org.apache.kafka.streams.query","l":"WindowKeyQuery"},{"p":"org.apache.kafka.streams.query","l":"WindowRangeQuery"},{"p":"org.apache.kafka.streams.kstream","l":"Windows"},{"p":"org.apache.kafka.streams.state","l":"WindowStore"},{"p":"org.apache.kafka.streams.state","l":"WindowStoreIterator"},{"p":"org.apache.kafka.streams.state","l":"QueryableStoreTypes.WindowStoreType"},{"p":"org.apache.kafka.streams.processor.api","l":"WrappedFixedKeyProcessorSupplier"},{"p":"org.apache.kafka.streams.processor.api","l":"WrappedProcessorSupplier"},{"p":"org.apache.kafka.common.serialization","l":"Serdes.WrapperSerde"}];updateSearchResults(); \ No newline at end of file diff --git a/static/images/apache.png b/static/images/apache.png new file mode 100644 index 000000000..01f9cdb80 Binary files /dev/null and b/static/images/apache.png differ